repo_name
string
path
string
copies
string
size
string
content
string
license
string
mp3deviant721/boeffla-kernel-cm-bacon-mod
fs/afs/cell.c
10569
10547
/* AFS cell and server record management * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/ctype.h> #include <linux/dns_resolver.h> #include <linux/sched.h> #include <keys/rxrpc-type.h> #include "internal.h" DECLARE_RWSEM(afs_proc_cells_sem); LIST_HEAD(afs_proc_cells); static LIST_HEAD(afs_cells); static DEFINE_RWLOCK(afs_cells_lock); static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq); static struct afs_cell *afs_cell_root; /* * allocate a cell record and fill in its name, VL server address list and * allocate an anonymous key */ static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen, char *vllist) { struct afs_cell *cell; struct key *key; char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next; char *dvllist = NULL, *_vllist = NULL; char delimiter = ':'; int ret; _enter("%*.*s,%s", namelen, namelen, name ?: "", vllist); BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ if (namelen > AFS_MAXCELLNAME) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } /* allocate and initialise a cell record */ cell = kzalloc(sizeof(struct afs_cell) + namelen + 1, GFP_KERNEL); if (!cell) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } memcpy(cell->name, name, namelen); cell->name[namelen] = 0; atomic_set(&cell->usage, 1); INIT_LIST_HEAD(&cell->link); rwlock_init(&cell->servers_lock); INIT_LIST_HEAD(&cell->servers); init_rwsem(&cell->vl_sem); INIT_LIST_HEAD(&cell->vl_list); spin_lock_init(&cell->vl_lock); /* if the ip address is invalid, try dns query */ if (!vllist || strlen(vllist) < 7) { ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL); if (ret < 0) { if (ret == -ENODATA || ret == -EAGAIN || ret == -ENOKEY) /* translate these errors into something * userspace might understand */ ret = -EDESTADDRREQ; _leave(" = %d", ret); return ERR_PTR(ret); } _vllist = dvllist; /* change the delimiter for user-space reply */ delimiter = ','; } else { _vllist = vllist; } /* fill in the VL server list from the rest of the string */ do { unsigned a, b, c, d; next = strchr(_vllist, delimiter); if (next) *next++ = 0; if (sscanf(_vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4) goto bad_address; if (a > 255 || b > 255 || c > 255 || d > 255) goto bad_address; cell->vl_addrs[cell->vl_naddrs++].s_addr = htonl((a << 24) | (b << 16) | (c << 8) | d); } while (cell->vl_naddrs < AFS_CELL_MAX_ADDRS && (_vllist = next)); /* create a key to represent an anonymous user */ memcpy(keyname, "afs@", 4); dp = keyname + 4; cp = cell->name; do { *dp++ = toupper(*cp); } while (*cp++); key = rxrpc_get_null_key(keyname); if (IS_ERR(key)) { _debug("no key"); ret = PTR_ERR(key); goto error; } cell->anonymous_key = key; _debug("anon key %p{%x}", cell->anonymous_key, key_serial(cell->anonymous_key)); _leave(" = %p", cell); return cell; bad_address: printk(KERN_ERR "kAFS: bad VL server IP address\n"); ret = -EINVAL; error: key_put(cell->anonymous_key); kfree(dvllist); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); } /* * afs_cell_crate() - create a cell record * @name: is the name of the cell. * @namsesz: is the strlen of the cell name. * @vllist: is a colon separated list of IP addresses in "a.b.c.d" format. * @retref: is T to return the cell reference when the cell exists. */ struct afs_cell *afs_cell_create(const char *name, unsigned namesz, char *vllist, bool retref) { struct afs_cell *cell; int ret; _enter("%*.*s,%s", namesz, namesz, name ?: "", vllist); down_write(&afs_cells_sem); read_lock(&afs_cells_lock); list_for_each_entry(cell, &afs_cells, link) { if (strncasecmp(cell->name, name, namesz) == 0) goto duplicate_name; } read_unlock(&afs_cells_lock); cell = afs_cell_alloc(name, namesz, vllist); if (IS_ERR(cell)) { _leave(" = %ld", PTR_ERR(cell)); up_write(&afs_cells_sem); return cell; } /* add a proc directory for this cell */ ret = afs_proc_cell_setup(cell); if (ret < 0) goto error; #ifdef CONFIG_AFS_FSCACHE /* put it up for caching (this never returns an error) */ cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, &afs_cell_cache_index_def, cell); #endif /* add to the cell lists */ write_lock(&afs_cells_lock); list_add_tail(&cell->link, &afs_cells); write_unlock(&afs_cells_lock); down_write(&afs_proc_cells_sem); list_add_tail(&cell->proc_link, &afs_proc_cells); up_write(&afs_proc_cells_sem); up_write(&afs_cells_sem); _leave(" = %p", cell); return cell; error: up_write(&afs_cells_sem); key_put(cell->anonymous_key); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); duplicate_name: if (retref && !IS_ERR(cell)) afs_get_cell(cell); read_unlock(&afs_cells_lock); up_write(&afs_cells_sem); if (retref) { _leave(" = %p", cell); return cell; } _leave(" = -EEXIST"); return ERR_PTR(-EEXIST); } /* * set the root cell information * - can be called with a module parameter string * - can be called from a write to /proc/fs/afs/rootcell */ int afs_cell_init(char *rootcell) { struct afs_cell *old_root, *new_root; char *cp; _enter(""); if (!rootcell) { /* module is loaded with no parameters, or built statically. * - in the future we might initialize cell DB here. */ _leave(" = 0 [no root]"); return 0; } cp = strchr(rootcell, ':'); if (!cp) _debug("kAFS: no VL server IP addresses specified"); else *cp++ = 0; /* allocate a cell record for the root cell */ new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false); if (IS_ERR(new_root)) { _leave(" = %ld", PTR_ERR(new_root)); return PTR_ERR(new_root); } /* install the new cell */ write_lock(&afs_cells_lock); old_root = afs_cell_root; afs_cell_root = new_root; write_unlock(&afs_cells_lock); afs_put_cell(old_root); _leave(" = 0"); return 0; } /* * lookup a cell record */ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz, bool dns_cell) { struct afs_cell *cell; _enter("\"%*.*s\",", namesz, namesz, name ?: ""); down_read(&afs_cells_sem); read_lock(&afs_cells_lock); if (name) { /* if the cell was named, look for it in the cell record list */ list_for_each_entry(cell, &afs_cells, link) { if (strncmp(cell->name, name, namesz) == 0) { afs_get_cell(cell); goto found; } } cell = ERR_PTR(-ENOENT); if (dns_cell) goto create_cell; found: ; } else { cell = afs_cell_root; if (!cell) { /* this should not happen unless user tries to mount * when root cell is not set. Return an impossibly * bizarre errno to alert the user. Things like * ENOENT might be "more appropriate" but they happen * for other reasons. */ cell = ERR_PTR(-EDESTADDRREQ); } else { afs_get_cell(cell); } } read_unlock(&afs_cells_lock); up_read(&afs_cells_sem); _leave(" = %p", cell); return cell; create_cell: read_unlock(&afs_cells_lock); up_read(&afs_cells_sem); cell = afs_cell_create(name, namesz, NULL, true); _leave(" = %p", cell); return cell; } #if 0 /* * try and get a cell record */ struct afs_cell *afs_get_cell_maybe(struct afs_cell *cell) { write_lock(&afs_cells_lock); if (cell && !list_empty(&cell->link)) afs_get_cell(cell); else cell = NULL; write_unlock(&afs_cells_lock); return cell; } #endif /* 0 */ /* * destroy a cell record */ void afs_put_cell(struct afs_cell *cell) { if (!cell) return; _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); ASSERTCMP(atomic_read(&cell->usage), >, 0); /* to prevent a race, the decrement and the dequeue must be effectively * atomic */ write_lock(&afs_cells_lock); if (likely(!atomic_dec_and_test(&cell->usage))) { write_unlock(&afs_cells_lock); _leave(""); return; } ASSERT(list_empty(&cell->servers)); ASSERT(list_empty(&cell->vl_list)); write_unlock(&afs_cells_lock); wake_up(&afs_cells_freeable_wq); _leave(" [unused]"); } /* * destroy a cell record * - must be called with the afs_cells_sem write-locked * - cell->link should have been broken by the caller */ static void afs_cell_destroy(struct afs_cell *cell) { _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); ASSERTCMP(atomic_read(&cell->usage), >=, 0); ASSERT(list_empty(&cell->link)); /* wait for everyone to stop using the cell */ if (atomic_read(&cell->usage) > 0) { DECLARE_WAITQUEUE(myself, current); _debug("wait for cell %s", cell->name); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&afs_cells_freeable_wq, &myself); while (atomic_read(&cell->usage) > 0) { schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } remove_wait_queue(&afs_cells_freeable_wq, &myself); set_current_state(TASK_RUNNING); } _debug("cell dead"); ASSERTCMP(atomic_read(&cell->usage), ==, 0); ASSERT(list_empty(&cell->servers)); ASSERT(list_empty(&cell->vl_list)); afs_proc_cell_remove(cell); down_write(&afs_proc_cells_sem); list_del_init(&cell->proc_link); up_write(&afs_proc_cells_sem); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(cell->cache, 0); #endif key_put(cell->anonymous_key); kfree(cell); _leave(" [destroyed]"); } /* * purge in-memory cell database on module unload or afs_init() failure * - the timeout daemon is stopped before calling this */ void afs_cell_purge(void) { struct afs_cell *cell; _enter(""); afs_put_cell(afs_cell_root); down_write(&afs_cells_sem); while (!list_empty(&afs_cells)) { cell = NULL; /* remove the next cell from the front of the list */ write_lock(&afs_cells_lock); if (!list_empty(&afs_cells)) { cell = list_entry(afs_cells.next, struct afs_cell, link); list_del_init(&cell->link); } write_unlock(&afs_cells_lock); if (cell) { _debug("PURGING CELL %s (%d)", cell->name, atomic_read(&cell->usage)); /* now the cell should be left with no references */ afs_cell_destroy(cell); } } up_write(&afs_cells_sem); _leave(""); }
gpl-2.0
sudosurootdev/kernel_samsung_u8500
arch/mn10300/unit-asb2364/unit-init.c
12105
3490
/* ASB2364 initialisation * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/device.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/intctl-regs.h> #include <asm/serial-regs.h> #include <unit/fpga-regs.h> #include <unit/serial.h> #include <unit/smsc911x.h> #define TTYS0_SERIAL_IER __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 2, u8) #define LAN_IRQ_CFG __SYSREG(SMSC911X_BASE + 0x54, u32) #define LAN_INT_EN __SYSREG(SMSC911X_BASE + 0x5c, u32) /* * initialise some of the unit hardware before gdbstub is set up */ asmlinkage void __init unit_init(void) { /* Make sure we aren't going to get unexpected interrupts */ TTYS0_SERIAL_IER = 0; SC0RXICR = 0; SC0TXICR = 0; SC1RXICR = 0; SC1TXICR = 0; SC2RXICR = 0; SC2TXICR = 0; /* Attempt to reset the FPGA attached peripherals */ ASB2364_FPGA_REG_RESET_LAN = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_UART = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_I2C = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_USB = 0x0000; SyncExBus(); ASB2364_FPGA_REG_RESET_AV = 0x0000; SyncExBus(); /* set up the external interrupts */ /* XIRQ[0]: NAND RXBY */ /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */ /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */ SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL); /* XIRQ[2]: Extend Slot 1-9 */ /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */ #if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) && \ defined(CONFIG_ETHERNET_IRQ_LEVEL) && \ (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL) # error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL #endif #if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL)); #elif defined(CONFIG_ETHERNET_IRQ_LEVEL) set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL)); #endif } /* * initialise the rest of the unit hardware after gdbstub is ready */ asmlinkage void __init unit_setup(void) { /* Release the reset on the SMSC911X so that it is ready by the time we * need it */ ASB2364_FPGA_REG_RESET_LAN = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_UART = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_I2C = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_USB = 0x0001; SyncExBus(); ASB2364_FPGA_REG_RESET_AV = 0x0001; SyncExBus(); /* Make sure the ethernet chipset isn't going to give us an interrupt * storm from stuff it was doing pre-reset */ LAN_IRQ_CFG = 0; LAN_INT_EN = 0; } /* * initialise the external interrupts used by a unit of this type */ void __init unit_init_IRQ(void) { unsigned int extnum; for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) { switch (GET_XIRQ_TRIGGER(extnum)) { /* LEVEL triggered interrupts should be made * post-ACK'able as they hold their lines until * serviced */ case XIRQ_TRIGGER_HILEVEL: case XIRQ_TRIGGER_LOWLEVEL: mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum)); break; default: break; } } #define IRQCTL __SYSREG(0xd5000090, u32) IRQCTL |= 0x02; irq_fpga_init(); }
gpl-2.0
BorqsIndia/polaris-kernel
fs/nfs/symlink.c
12617
1661
/* * linux/fs/nfs/symlink.c * * Copyright (C) 1992 Rick Sladkey * * Optimization changes Copyright (C) 1994 Florian La Roche * * Jun 7 1999, cache symlink lookups in the page cache. -DaveM * * nfs symlink handling code */ #include <linux/time.h> #include <linux/errno.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs.h> #include <linux/nfs2.h> #include <linux/nfs_fs.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/namei.h> /* Symlink caching in the page cache is even more simplistic * and straight-forward than readdir caching. */ static int nfs_symlink_filler(struct inode *inode, struct page *page) { int error; error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); if (error < 0) goto error; SetPageUptodate(page); unlock_page(page); return 0; error: SetPageError(page); unlock_page(page); return -EIO; } static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct page *page; void *err; err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); if (err) goto read_failed; page = read_cache_page(&inode->i_data, 0, (filler_t *)nfs_symlink_filler, inode); if (IS_ERR(page)) { err = page; goto read_failed; } nd_set_link(nd, kmap(page)); return page; read_failed: nd_set_link(nd, err); return NULL; } /* * symlinks can't do much... */ const struct inode_operations nfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = nfs_follow_link, .put_link = page_put_link, .getattr = nfs_getattr, .setattr = nfs_setattr, };
gpl-2.0
guiwanglin/android_kernel_oneplus_msm8994
drivers/misc/sgi-xp/xp_main.c
13897
7985
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition (XP) base. * * XP provides a base from which its users can interact * with XPC, yet not be dependent on XPC. * */ #include <linux/module.h> #include <linux/device.h> #include "xp.h" /* define the XP debug device structures to be used with dev_dbg() et al */ struct device_driver xp_dbg_name = { .name = "xp" }; struct device xp_dbg_subname = { .init_name = "", /* set to "" */ .driver = &xp_dbg_name }; struct device *xp = &xp_dbg_subname; /* max #of partitions possible */ short xp_max_npartitions; EXPORT_SYMBOL_GPL(xp_max_npartitions); short xp_partition_id; EXPORT_SYMBOL_GPL(xp_partition_id); u8 xp_region_size; EXPORT_SYMBOL_GPL(xp_region_size); unsigned long (*xp_pa) (void *addr); EXPORT_SYMBOL_GPL(xp_pa); unsigned long (*xp_socket_pa) (unsigned long gpa); EXPORT_SYMBOL_GPL(xp_socket_pa); enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, const unsigned long src_gpa, size_t len); EXPORT_SYMBOL_GPL(xp_remote_memcpy); int (*xp_cpu_to_nasid) (int cpuid); EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, unsigned long size); EXPORT_SYMBOL_GPL(xp_expand_memprotect); enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, unsigned long size); EXPORT_SYMBOL_GPL(xp_restrict_memprotect); /* * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * users of XPC. */ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; EXPORT_SYMBOL_GPL(xpc_registrations); /* * Initialize the XPC interface to indicate that XPC isn't loaded. */ static enum xp_retval xpc_notloaded(void) { return xpNotLoaded; } struct xpc_interface xpc_interface = { (void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded, (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, void *))xpc_notloaded, (void (*)(short, int, void *))xpc_notloaded, (enum xp_retval(*)(short, void *))xpc_notloaded }; EXPORT_SYMBOL_GPL(xpc_interface); /* * XPC calls this when it (the XPC module) has been loaded. */ void xpc_set_interface(void (*connect) (int), void (*disconnect) (int), enum xp_retval (*send) (short, int, u32, void *, u16), enum xp_retval (*send_notify) (short, int, u32, void *, u16, xpc_notify_func, void *), void (*received) (short, int, void *), enum xp_retval (*partid_to_nasids) (short, void *)) { xpc_interface.connect = connect; xpc_interface.disconnect = disconnect; xpc_interface.send = send; xpc_interface.send_notify = send_notify; xpc_interface.received = received; xpc_interface.partid_to_nasids = partid_to_nasids; } EXPORT_SYMBOL_GPL(xpc_set_interface); /* * XPC calls this when it (the XPC module) is being unloaded. */ void xpc_clear_interface(void) { xpc_interface.connect = (void (*)(int))xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded; xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) xpc_notloaded; xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, void *))xpc_notloaded; xpc_interface.received = (void (*)(short, int, void *)) xpc_notloaded; xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) xpc_notloaded; } EXPORT_SYMBOL_GPL(xpc_clear_interface); /* * Register for automatic establishment of a channel connection whenever * a partition comes up. * * Arguments: * * ch_number - channel # to register for connection. * func - function to call for asynchronous notification of channel * state changes (i.e., connection, disconnection, error) and * the arrival of incoming messages. * key - pointer to optional user-defined value that gets passed back * to the user on any callouts made to func. * payload_size - size in bytes of the XPC message's payload area which * contains a user-defined message. The user should make * this large enough to hold their largest message. * nentries - max #of XPC message entries a message queue can contain. * The actual number, which is determined when a connection * is established and may be less then requested, will be * passed to the user via the xpConnected callout. * assigned_limit - max number of kthreads allowed to be processing * messages (per connection) at any given instant. * idle_limit - max number of kthreads allowed to be idle at any given * instant. */ enum xp_retval xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, u16 nentries, u32 assigned_limit, u32 idle_limit) { struct xpc_registration *registration; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); DBUG_ON(payload_size == 0 || nentries == 0); DBUG_ON(func == NULL); DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE) return xpPayloadTooBig; registration = &xpc_registrations[ch_number]; if (mutex_lock_interruptible(&registration->mutex) != 0) return xpInterrupted; /* if XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func != NULL) { mutex_unlock(&registration->mutex); return xpAlreadyRegistered; } /* register the channel for connection */ registration->entry_size = XPC_MSG_SIZE(payload_size); registration->nentries = nentries; registration->assigned_limit = assigned_limit; registration->idle_limit = idle_limit; registration->key = key; registration->func = func; mutex_unlock(&registration->mutex); xpc_interface.connect(ch_number); return xpSuccess; } EXPORT_SYMBOL_GPL(xpc_connect); /* * Remove the registration for automatic connection of the specified channel * when a partition comes up. * * Before returning this xpc_disconnect() will wait for all connections on the * specified channel have been closed/torndown. So the caller can be assured * that they will not be receiving any more callouts from XPC to their * function registered via xpc_connect(). * * Arguments: * * ch_number - channel # to unregister. */ void xpc_disconnect(int ch_number) { struct xpc_registration *registration; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); registration = &xpc_registrations[ch_number]; /* * We've decided not to make this a down_interruptible(), since we * figured XPC's users will just turn around and call xpc_disconnect() * again anyways, so we might as well wait, if need be. */ mutex_lock(&registration->mutex); /* if !XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func == NULL) { mutex_unlock(&registration->mutex); return; } /* remove the connection registration for the specified channel */ registration->func = NULL; registration->key = NULL; registration->nentries = 0; registration->entry_size = 0; registration->assigned_limit = 0; registration->idle_limit = 0; xpc_interface.disconnect(ch_number); mutex_unlock(&registration->mutex); return; } EXPORT_SYMBOL_GPL(xpc_disconnect); int __init xp_init(void) { enum xp_retval ret; int ch_number; /* initialize the connection registration mutex */ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) mutex_init(&xpc_registrations[ch_number].mutex); if (is_shub()) ret = xp_init_sn2(); else if (is_uv()) ret = xp_init_uv(); else ret = 0; if (ret != xpSuccess) return ret; return 0; } module_init(xp_init); void __exit xp_exit(void) { if (is_shub()) xp_exit_sn2(); else if (is_uv()) xp_exit_uv(); } module_exit(xp_exit); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_DESCRIPTION("Cross Partition (XP) base"); MODULE_LICENSE("GPL");
gpl-2.0
tmshlvck/omnia-linux
drivers/acpi/acpica/tbfind.c
74
3484
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: tbfind - find table * * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbfind") /******************************************************************************* * * FUNCTION: acpi_tb_find_table * * PARAMETERS: signature - String with ACPI table signature * oem_id - String with the table OEM ID * oem_table_id - String with the OEM Table ID * table_index - Where the table index is returned * * RETURN: Status and table index * * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the * Signature, OEM ID and OEM Table ID. Returns an index that can * be used to get the table header or entire table. * ******************************************************************************/ acpi_status acpi_tb_find_table(char *signature, char *oem_id, char *oem_table_id, u32 *table_index) { acpi_status status = AE_OK; struct acpi_table_header header; u32 i; ACPI_FUNCTION_TRACE(tb_find_table); /* Validate the input table signature */ if (!acpi_ut_valid_nameseg(signature)) { return_ACPI_STATUS(AE_BAD_SIGNATURE); } /* Don't allow the OEM strings to be too long */ if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) || (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) { return_ACPI_STATUS(AE_AML_STRING_LIMIT); } /* Normalize the input strings */ memset(&header, 0, sizeof(struct acpi_table_header)); ACPI_COPY_NAMESEG(header.signature, signature); strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); /* Search for the table */ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature), header.signature, ACPI_NAMESEG_SIZE)) { /* Not the requested table */ continue; } /* Table with matching signature has been found */ if (!acpi_gbl_root_table_list.tables[i].pointer) { /* Table is not currently mapped, map it */ status = acpi_tb_validate_table(&acpi_gbl_root_table_list. tables[i]); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } if (!acpi_gbl_root_table_list.tables[i].pointer) { continue; } } /* Check for table match on all IDs */ if (!memcmp (acpi_gbl_root_table_list.tables[i].pointer->signature, header.signature, ACPI_NAMESEG_SIZE) && (!oem_id[0] || !memcmp (acpi_gbl_root_table_list. tables[i]. pointer->oem_id, header.oem_id, ACPI_OEM_ID_SIZE)) && (!oem_table_id[0] || !memcmp(acpi_gbl_root_table_list.tables[i].pointer-> oem_table_id, header.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))) { *table_index = i; ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n", header.signature)); goto unlock_and_exit; } } status = AE_NOT_FOUND; unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); }
gpl-2.0
Flipkart/linux
mm/page_owner.c
74
7041
#include <linux/debugfs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/bootmem.h> #include <linux/stacktrace.h> #include <linux/page_owner.h> #include "internal.h" static bool page_owner_disabled = true; bool page_owner_inited __read_mostly; static void init_early_allocated_pages(void); static int early_page_owner_param(char *buf) { if (!buf) return -EINVAL; if (strcmp(buf, "on") == 0) page_owner_disabled = false; return 0; } early_param("page_owner", early_page_owner_param); static bool need_page_owner(void) { if (page_owner_disabled) return false; return true; } static void init_page_owner(void) { if (page_owner_disabled) return; page_owner_inited = true; init_early_allocated_pages(); } struct page_ext_operations page_owner_ops = { .need = need_page_owner, .init = init_page_owner, }; void __reset_page_owner(struct page *page, unsigned int order) { int i; struct page_ext *page_ext; for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext; struct stack_trace *trace; page_ext = lookup_page_ext(page); trace = &page_ext->trace; trace->nr_entries = 0; trace->max_entries = ARRAY_SIZE(page_ext->trace_entries); trace->entries = &page_ext->trace_entries[0]; trace->skip = 3; save_stack_trace(&page_ext->trace); page_ext->order = order; page_ext->gfp_mask = gfp_mask; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) { int ret; int pageblock_mt, page_mt; char *kbuf; kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = snprintf(kbuf, count, "Page allocated via order %u, mask 0x%x\n", page_ext->order, page_ext->gfp_mask); if (ret >= count) goto err; /* Print information relevant to grouping pages by mobility */ pageblock_mt = get_pfnblock_migratetype(page, pfn); page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); ret += snprintf(kbuf + ret, count - ret, "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", pfn, pfn >> pageblock_order, pageblock_mt, pageblock_mt != page_mt ? "Fallback" : " ", PageLocked(page) ? "K" : " ", PageError(page) ? "E" : " ", PageReferenced(page) ? "R" : " ", PageUptodate(page) ? "U" : " ", PageDirty(page) ? "D" : " ", PageLRU(page) ? "L" : " ", PageActive(page) ? "A" : " ", PageSlab(page) ? "S" : " ", PageWriteback(page) ? "W" : " ", PageCompound(page) ? "C" : " ", PageSwapCache(page) ? "B" : " ", PageMappedToDisk(page) ? "M" : " "); if (ret >= count) goto err; ret += snprint_stack_trace(kbuf + ret, count - ret, &page_ext->trace, 0); if (ret >= count) goto err; ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; kfree(kbuf); return ret; err: kfree(kbuf); return -ENOMEM; } static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; if (!page_owner_inited) return -EINVAL; page = NULL; pfn = min_low_pfn + *ppos; /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) pfn++; drain_all_pages(NULL); /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { pfn += MAX_ORDER_NR_PAGES - 1; continue; } /* Check for holes within a MAX_ORDER area */ if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (PageBuddy(page)) { unsigned long freepage_order = page_order_unsafe(page); if (freepage_order < MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } page_ext = lookup_page_ext(page); /* * Some pages could be missed by concurrent allocation or free, * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_ext); } return 0; } static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) { struct page *page; struct page_ext *page_ext; unsigned long pfn = zone->zone_start_pfn, block_end_pfn; unsigned long end_pfn = pfn + zone->spanned_pages; unsigned long count = 0; /* Scan block by block. First and last block may be incomplete */ pfn = zone->zone_start_pfn; /* * Walk the zone in pageblock_nr_pages steps. If a page block spans * a zone boundary, it will be double counted between zones. This does * not matter as the mixed block count will still be correct */ for (; pfn < end_pfn; ) { if (!pfn_valid(pfn)) { pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); continue; } block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); page = pfn_to_page(pfn); for (; pfn < block_end_pfn; pfn++) { if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); /* * We are safe to check buddy flag and order, because * this is init stage and only single thread runs. */ if (PageBuddy(page)) { pfn += (1UL << page_order(page)) - 1; continue; } if (PageReserved(page)) continue; page_ext = lookup_page_ext(page); /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Found early allocated page */ set_page_owner(page, 0, 0); count++; } } pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", pgdat->node_id, zone->name, count); } static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; unsigned long flags; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; spin_lock_irqsave(&zone->lock, flags); init_pages_in_zone(pgdat, zone); spin_unlock_irqrestore(&zone->lock, flags); } } static void init_early_allocated_pages(void) { pg_data_t *pgdat; drain_all_pages(NULL); for_each_online_pgdat(pgdat) init_zones_in_node(pgdat); } static const struct file_operations proc_page_owner_operations = { .read = read_page_owner, }; static int __init pageowner_init(void) { struct dentry *dentry; if (!page_owner_inited) { pr_info("page_owner is disabled\n"); return 0; } dentry = debugfs_create_file("page_owner", S_IRUSR, NULL, NULL, &proc_page_owner_operations); if (IS_ERR(dentry)) return PTR_ERR(dentry); return 0; } module_init(pageowner_init)
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M130L
drivers/usb/serial/option.c
330
60852
/* USB Driver for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - nonstandard flow (Option devices) control - controlling the baud rate doesn't make sense This driver is named "option" because the most common device it's used for is a PC-Card (with an internal OHCI-USB interface, behind which the GSM interface sits), made by Option Inc. Some of the "one port" devices actually exhibit multiple USB instances on the USB bus. This is not a bug, these ports are used for different device features. */ #define DRIVER_VERSION "v0.7.2" #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "usb-wwan.h" /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); static int option_send_setup(struct usb_serial_port *port); static void option_instat_callback(struct urb *urb); /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 #define OPTION_PRODUCT_COLT 0x5000 #define OPTION_PRODUCT_RICOLA 0x6000 #define OPTION_PRODUCT_RICOLA_LIGHT 0x6100 #define OPTION_PRODUCT_RICOLA_QUAD 0x6200 #define OPTION_PRODUCT_RICOLA_QUAD_LIGHT 0x6300 #define OPTION_PRODUCT_RICOLA_NDIS 0x6050 #define OPTION_PRODUCT_RICOLA_NDIS_LIGHT 0x6150 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD 0x6250 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT 0x6350 #define OPTION_PRODUCT_COBRA 0x6500 #define OPTION_PRODUCT_COBRA_BUS 0x6501 #define OPTION_PRODUCT_VIPER 0x6600 #define OPTION_PRODUCT_VIPER_BUS 0x6601 #define OPTION_PRODUCT_GT_MAX_READY 0x6701 #define OPTION_PRODUCT_FUJI_MODEM_LIGHT 0x6721 #define OPTION_PRODUCT_FUJI_MODEM_GT 0x6741 #define OPTION_PRODUCT_FUJI_MODEM_EX 0x6761 #define OPTION_PRODUCT_KOI_MODEM 0x6800 #define OPTION_PRODUCT_SCORPION_MODEM 0x6901 #define OPTION_PRODUCT_ETNA_MODEM 0x7001 #define OPTION_PRODUCT_ETNA_MODEM_LITE 0x7021 #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 #define HUAWEI_PRODUCT_E600 0x1001 #define HUAWEI_PRODUCT_E220 0x1003 #define HUAWEI_PRODUCT_E220BIS 0x1004 #define HUAWEI_PRODUCT_E1401 0x1401 #define HUAWEI_PRODUCT_E1402 0x1402 #define HUAWEI_PRODUCT_E1403 0x1403 #define HUAWEI_PRODUCT_E1404 0x1404 #define HUAWEI_PRODUCT_E1405 0x1405 #define HUAWEI_PRODUCT_E1406 0x1406 #define HUAWEI_PRODUCT_E1407 0x1407 #define HUAWEI_PRODUCT_E1408 0x1408 #define HUAWEI_PRODUCT_E1409 0x1409 #define HUAWEI_PRODUCT_E140A 0x140A #define HUAWEI_PRODUCT_E140B 0x140B #define HUAWEI_PRODUCT_E140C 0x140C #define HUAWEI_PRODUCT_E140D 0x140D #define HUAWEI_PRODUCT_E140E 0x140E #define HUAWEI_PRODUCT_E140F 0x140F #define HUAWEI_PRODUCT_E1410 0x1410 #define HUAWEI_PRODUCT_E1411 0x1411 #define HUAWEI_PRODUCT_E1412 0x1412 #define HUAWEI_PRODUCT_E1413 0x1413 #define HUAWEI_PRODUCT_E1414 0x1414 #define HUAWEI_PRODUCT_E1415 0x1415 #define HUAWEI_PRODUCT_E1416 0x1416 #define HUAWEI_PRODUCT_E1417 0x1417 #define HUAWEI_PRODUCT_E1418 0x1418 #define HUAWEI_PRODUCT_E1419 0x1419 #define HUAWEI_PRODUCT_E141A 0x141A #define HUAWEI_PRODUCT_E141B 0x141B #define HUAWEI_PRODUCT_E141C 0x141C #define HUAWEI_PRODUCT_E141D 0x141D #define HUAWEI_PRODUCT_E141E 0x141E #define HUAWEI_PRODUCT_E141F 0x141F #define HUAWEI_PRODUCT_E1420 0x1420 #define HUAWEI_PRODUCT_E1421 0x1421 #define HUAWEI_PRODUCT_E1422 0x1422 #define HUAWEI_PRODUCT_E1423 0x1423 #define HUAWEI_PRODUCT_E1424 0x1424 #define HUAWEI_PRODUCT_E1425 0x1425 #define HUAWEI_PRODUCT_E1426 0x1426 #define HUAWEI_PRODUCT_E1427 0x1427 #define HUAWEI_PRODUCT_E1428 0x1428 #define HUAWEI_PRODUCT_E1429 0x1429 #define HUAWEI_PRODUCT_E142A 0x142A #define HUAWEI_PRODUCT_E142B 0x142B #define HUAWEI_PRODUCT_E142C 0x142C #define HUAWEI_PRODUCT_E142D 0x142D #define HUAWEI_PRODUCT_E142E 0x142E #define HUAWEI_PRODUCT_E142F 0x142F #define HUAWEI_PRODUCT_E1430 0x1430 #define HUAWEI_PRODUCT_E1431 0x1431 #define HUAWEI_PRODUCT_E1432 0x1432 #define HUAWEI_PRODUCT_E1433 0x1433 #define HUAWEI_PRODUCT_E1434 0x1434 #define HUAWEI_PRODUCT_E1435 0x1435 #define HUAWEI_PRODUCT_E1436 0x1436 #define HUAWEI_PRODUCT_E1437 0x1437 #define HUAWEI_PRODUCT_E1438 0x1438 #define HUAWEI_PRODUCT_E1439 0x1439 #define HUAWEI_PRODUCT_E143A 0x143A #define HUAWEI_PRODUCT_E143B 0x143B #define HUAWEI_PRODUCT_E143C 0x143C #define HUAWEI_PRODUCT_E143D 0x143D #define HUAWEI_PRODUCT_E143E 0x143E #define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_ETS1220 0x1803 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 #define QUANTA_PRODUCT_Q111 0xEA03 #define QUANTA_PRODUCT_GLX 0xEA04 #define QUANTA_PRODUCT_GKE 0xEA05 #define QUANTA_PRODUCT_GLE 0xEA06 #define NOVATELWIRELESS_VENDOR_ID 0x1410 /* YISO PRODUCTS */ #define YISO_VENDOR_ID 0x0EAB #define YISO_PRODUCT_U893 0xC893 /* MERLIN EVDO PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_V640 0x1100 #define NOVATELWIRELESS_PRODUCT_V620 0x1110 #define NOVATELWIRELESS_PRODUCT_V740 0x1120 #define NOVATELWIRELESS_PRODUCT_V720 0x1130 /* MERLIN HSDPA/HSPA PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_U730 0x1400 #define NOVATELWIRELESS_PRODUCT_U740 0x1410 #define NOVATELWIRELESS_PRODUCT_U870 0x1420 #define NOVATELWIRELESS_PRODUCT_XU870 0x1430 #define NOVATELWIRELESS_PRODUCT_X950D 0x1450 /* EXPEDITE PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 #define NOVATELWIRELESS_PRODUCT_E725 0x2120 #define NOVATELWIRELESS_PRODUCT_ES620 0x2130 #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 /* OVATION PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_U727 0x5010 #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 /* FUTURE NOVATEL PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 #define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 #define AMOI_PRODUCT_H01A 0x7002 #define AMOI_PRODUCT_H02 0x0802 #define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407 #define DELL_VENDOR_ID 0x413C /* Dell modems */ #define DELL_PRODUCT_5700_MINICARD 0x8114 #define DELL_PRODUCT_5500_MINICARD 0x8115 #define DELL_PRODUCT_5505_MINICARD 0x8116 #define DELL_PRODUCT_5700_EXPRESSCARD 0x8117 #define DELL_PRODUCT_5510_EXPRESSCARD 0x8118 #define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128 #define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129 #define DELL_PRODUCT_5720_MINICARD_VZW 0x8133 #define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134 #define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135 #define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136 #define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137 #define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138 #define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180 #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a #define ANYDATA_VENDOR_ID 0x16d5 #define ANYDATA_PRODUCT_ADU_620UW 0x6202 #define ANYDATA_PRODUCT_ADU_E100A 0x6501 #define ANYDATA_PRODUCT_ADU_500A 0x6502 #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_1004 0x1004 #define BANDRICH_PRODUCT_1005 0x1005 #define BANDRICH_PRODUCT_1006 0x1006 #define BANDRICH_PRODUCT_1007 0x1007 #define BANDRICH_PRODUCT_1008 0x1008 #define BANDRICH_PRODUCT_1009 0x1009 #define BANDRICH_PRODUCT_100A 0x100a #define BANDRICH_PRODUCT_100B 0x100b #define BANDRICH_PRODUCT_100C 0x100c #define BANDRICH_PRODUCT_100D 0x100d #define BANDRICH_PRODUCT_100E 0x100e #define BANDRICH_PRODUCT_100F 0x100f #define BANDRICH_PRODUCT_1010 0x1010 #define BANDRICH_PRODUCT_1011 0x1011 #define BANDRICH_PRODUCT_1012 0x1012 #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_9508 0x0800 #define QUALCOMM_VENDOR_ID 0x05C6 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6008 0x6008 #define CMOTECH_PRODUCT_6280 0x6280 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864G 0x1004 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_AC8710 0xfff1 #define ZTE_PRODUCT_AC2726 0xfff5 #define ZTE_PRODUCT_AC8710T 0xffff /* ZTE PRODUCTS -- alternate vendor ID */ #define ZTE_VENDOR_ID2 0x1d6b #define ZTE_PRODUCT_MF_330 0x0002 #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 #define DLINK_PRODUCT_DWM_652_U5 0xce16 #define DLINK_PRODUCT_DWM_652_U5A 0xce1e #define QISDA_VENDOR_ID 0x1da5 #define QISDA_PRODUCT_H21_4512 0x4512 #define QISDA_PRODUCT_H21_4523 0x4523 #define QISDA_PRODUCT_H20_4515 0x4515 #define QISDA_PRODUCT_H20_4518 0x4518 #define QISDA_PRODUCT_H20_4519 0x4519 /* TLAYTECH PRODUCTS */ #define TLAYTECH_VENDOR_ID 0x20B9 #define TLAYTECH_PRODUCT_TEU800 0x1682 /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 #define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 #define TOSHIBA_PRODUCT_G450 0x0d45 #define ALINK_VENDOR_ID 0x1e0e #define ALINK_PRODUCT_3GU 0x9200 /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S 0x0000 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 #define PIRELLI_PRODUCT_C100_2 0x1003 #define PIRELLI_PRODUCT_1004 0x1004 #define PIRELLI_PRODUCT_1005 0x1005 #define PIRELLI_PRODUCT_1006 0x1006 #define PIRELLI_PRODUCT_1007 0x1007 #define PIRELLI_PRODUCT_1008 0x1008 #define PIRELLI_PRODUCT_1009 0x1009 #define PIRELLI_PRODUCT_100A 0x100a #define PIRELLI_PRODUCT_100B 0x100b #define PIRELLI_PRODUCT_100C 0x100c #define PIRELLI_PRODUCT_100D 0x100d #define PIRELLI_PRODUCT_100E 0x100e #define PIRELLI_PRODUCT_100F 0x100f #define PIRELLI_PRODUCT_1011 0x1011 #define PIRELLI_PRODUCT_1012 0x1012 /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 /* Longcheer/Longsung vendor ID; makes whitelabel devices that * many other vendors like 4G Systems, Alcatel, ChinaBird, * Mobidata, etc sell under their own brand names. */ #define LONGCHEER_VENDOR_ID 0x1c9e /* 4G Systems products */ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 #define CINTERION_VENDOR_ID 0x0681 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 /* Celot products */ #define CELOT_VENDOR_ID 0x211f #define CELOT_PRODUCT_CT680M 0x6801 /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, OPTION_BLACKLIST_SENDSETUP = 1, OPTION_BLACKLIST_RESERVED_IF = 2 }; struct option_blacklist_info { const u32 infolen; /* number of interface numbers on blacklist */ const u8 *ifaceinfo; /* pointer to the array holding the numbers */ enum option_blacklist_reason reason; }; static const u8 four_g_w14_no_sendsetup[] = { 0, 1 }; static const struct option_blacklist_info four_g_w14_blacklist = { .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup), .ifaceinfo = four_g_w14_no_sendsetup, .reason = OPTION_BLACKLIST_SENDSETUP }; static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, /* Pirelli */ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); static struct usb_driver option_driver = { .name = "option", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, #ifdef CONFIG_PM .suspend = usb_serial_suspend, .resume = usb_serial_resume, .supports_autosuspend = 1, #endif .id_table = option_ids, .no_dynamic_id = 1, }; /* The card has three separate interfaces, which the serial driver * recognizes separately, thus num_port=1. */ static struct usb_serial_driver option_1port_device = { .driver = { .owner = THIS_MODULE, .name = "option1", }, .description = "GSM modem (1-port)", .usb_driver = &option_driver, .id_table = option_ids, .num_ports = 1, .probe = option_probe, .open = usb_wwan_open, .close = usb_wwan_close, .dtr_rts = usb_wwan_dtr_rts, .write = usb_wwan_write, .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .set_termios = usb_wwan_set_termios, .tiocmget = usb_wwan_tiocmget, .tiocmset = usb_wwan_tiocmset, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, .release = usb_wwan_release, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, #endif }; static int debug; /* per port private data */ #define N_IN_URB 4 #define N_OUT_URB 4 #define IN_BUFLEN 4096 #define OUT_BUFLEN 4096 struct option_port_private { /* Input endpoints and buffer for this port */ struct urb *in_urbs[N_IN_URB]; u8 *in_buffer[N_IN_URB]; /* Output endpoints and buffer for this port */ struct urb *out_urbs[N_OUT_URB]; u8 *out_buffer[N_OUT_URB]; unsigned long out_busy; /* Bit vector of URBs in use */ int opened; struct usb_anchor delayed; /* Settings for the port */ int rts_state; /* Handshaking pins (outputs) */ int dtr_state; int cts_state; /* Handshaking pins (inputs) */ int dsr_state; int dcd_state; int ri_state; unsigned long tx_start_time[N_OUT_URB]; }; /* Functions used by new usb-serial code. */ static int __init option_init(void) { int retval; retval = usb_serial_register(&option_1port_device); if (retval) goto failed_1port_device_register; retval = usb_register(&option_driver); if (retval) goto failed_driver_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_driver_register: usb_serial_deregister(&option_1port_device); failed_1port_device_register: return retval; } static void __exit option_exit(void) { usb_deregister(&option_driver); usb_serial_deregister(&option_1port_device); } module_init(option_init); module_exit(option_exit); static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_wwan_intf_private *data; /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) return -ENODEV; /* Bandrich modem and AT command interface is 0xff */ if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) return -ENODEV; /* Don't bind network interfaces on Huawei K3765 & K4505 */ if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) return -ENODEV; data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; data->send_setup = option_send_setup; spin_lock_init(&data->susp_lock); data->private = (void *)id->driver_info; return 0; } static enum option_blacklist_reason is_blacklisted(const u8 ifnum, const struct option_blacklist_info *blacklist) { const u8 *info; int i; if (blacklist) { info = blacklist->ifaceinfo; for (i = 0; i < blacklist->infolen; i++) { if (info[i] == ifnum) return blacklist->reason; } } return OPTION_BLACKLIST_NONE; } static void option_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct option_port_private *portdata = usb_get_serial_port_data(port); dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dbg("%s: NULL req_pkt", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); dbg("%s: signal x%x", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); if (old_dcd_state && !portdata->dcd_state) { struct tty_struct *tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty)) tty_hangup(tty); tty_kref_put(tty); } } else { dbg("%s: type %x req %x", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else err("%s: error %d", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (status != -ESHUTDOWN && status != -ENOENT) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dbg("%s: resubmit intr urb failed. (%d)", __func__, err); } } /** send RTS/DTR state to the port. * * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN * CDC. */ static int option_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = (struct usb_wwan_intf_private *) serial->private; struct option_port_private *portdata; int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; int val = 0; dbg("%s", __func__); if (is_blacklisted(ifNum, (struct option_blacklist_info *) intfdata->private) == OPTION_BLACKLIST_SENDSETUP) { dbg("No send_setup on blacklisted interface #%d\n", ifNum); return -EIO; } portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= 0x01; if (portdata->rts_state) val |= 0x02; return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); } MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
cvpcs/android_kernel_omap
drivers/char/ppdev.c
586
19542
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/smp_lock.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) { if (!bytes_written) { bytes_written = -EINTR; } break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); if (name == NULL) return -ENOMEM; sprintf (name, CHRDEV "%x", minor); port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; lock_kernel(); ret = pp_do_ioctl(file, cmd, arg); unlock_kernel(); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; cycle_kernel_lock(); if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } if (parport_register_driver(&pp_driver)) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
gpl-2.0
markgross/kernel
drivers/video/backlight/l4f00242t03.c
1354
6990
/* * l4f00242t03.c -- support for Epson L4F00242T03 LCD * * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. * * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * Inspired by Marek Vasut work in l4f00242t03.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/lcd.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <linux/spi/l4f00242t03.h> struct l4f00242t03_priv { struct spi_device *spi; struct lcd_device *ld; int lcd_state; struct regulator *io_reg; struct regulator *core_reg; }; static void l4f00242t03_reset(unsigned int gpio) { pr_debug("l4f00242t03_reset.\n"); gpio_set_value(gpio, 1); mdelay(100); gpio_set_value(gpio, 0); mdelay(10); /* tRES >= 100us */ gpio_set_value(gpio, 1); mdelay(20); } #define param(x) ((x) | 0x100) static void l4f00242t03_lcd_init(struct spi_device *spi) { struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev); struct l4f00242t03_priv *priv = spi_get_drvdata(spi); const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) }; int ret; dev_dbg(&spi->dev, "initializing LCD\n"); ret = regulator_set_voltage(priv->io_reg, 1800000, 1800000); if (ret) { dev_err(&spi->dev, "failed to set the IO regulator voltage.\n"); return; } ret = regulator_enable(priv->io_reg); if (ret) { dev_err(&spi->dev, "failed to enable the IO regulator.\n"); return; } ret = regulator_set_voltage(priv->core_reg, 2800000, 2800000); if (ret) { dev_err(&spi->dev, "failed to set the core regulator voltage.\n"); regulator_disable(priv->io_reg); return; } ret = regulator_enable(priv->core_reg); if (ret) { dev_err(&spi->dev, "failed to enable the core regulator.\n"); regulator_disable(priv->io_reg); return; } l4f00242t03_reset(pdata->reset_gpio); gpio_set_value(pdata->data_enable_gpio, 1); msleep(60); spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16)); } static void l4f00242t03_lcd_powerdown(struct spi_device *spi) { struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev); struct l4f00242t03_priv *priv = spi_get_drvdata(spi); dev_dbg(&spi->dev, "Powering down LCD\n"); gpio_set_value(pdata->data_enable_gpio, 0); regulator_disable(priv->io_reg); regulator_disable(priv->core_reg); } static int l4f00242t03_lcd_power_get(struct lcd_device *ld) { struct l4f00242t03_priv *priv = lcd_get_data(ld); return priv->lcd_state; } static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) { struct l4f00242t03_priv *priv = lcd_get_data(ld); struct spi_device *spi = priv->spi; const u16 slpout = 0x11; const u16 dison = 0x29; const u16 slpin = 0x10; const u16 disoff = 0x28; if (power <= FB_BLANK_NORMAL) { if (priv->lcd_state <= FB_BLANK_NORMAL) { /* Do nothing, the LCD is running */ } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { dev_dbg(&spi->dev, "Resuming LCD\n"); spi_write(spi, (const u8 *)&slpout, sizeof(u16)); msleep(60); spi_write(spi, (const u8 *)&dison, sizeof(u16)); } else { /* priv->lcd_state == FB_BLANK_POWERDOWN */ l4f00242t03_lcd_init(spi); priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; l4f00242t03_lcd_power_set(priv->ld, power); } } else if (power < FB_BLANK_POWERDOWN) { if (priv->lcd_state <= FB_BLANK_NORMAL) { /* Send the display in standby */ dev_dbg(&spi->dev, "Standby the LCD\n"); spi_write(spi, (const u8 *)&disoff, sizeof(u16)); msleep(60); spi_write(spi, (const u8 *)&slpin, sizeof(u16)); } else if (priv->lcd_state < FB_BLANK_POWERDOWN) { /* Do nothing, the LCD is already in standby */ } else { /* priv->lcd_state == FB_BLANK_POWERDOWN */ l4f00242t03_lcd_init(spi); priv->lcd_state = FB_BLANK_UNBLANK; l4f00242t03_lcd_power_set(ld, power); } } else { /* power == FB_BLANK_POWERDOWN */ if (priv->lcd_state != FB_BLANK_POWERDOWN) { /* Clear the screen before shutting down */ spi_write(spi, (const u8 *)&disoff, sizeof(u16)); msleep(60); l4f00242t03_lcd_powerdown(spi); } } priv->lcd_state = power; return 0; } static struct lcd_ops l4f_ops = { .set_power = l4f00242t03_lcd_power_set, .get_power = l4f00242t03_lcd_power_get, }; static int l4f00242t03_probe(struct spi_device *spi) { struct l4f00242t03_priv *priv; struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev); int ret; if (pdata == NULL) { dev_err(&spi->dev, "Uninitialized platform data.\n"); return -EINVAL; } priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; spi_set_drvdata(spi, priv); spi->bits_per_word = 9; spi_setup(spi); priv->spi = spi; ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "lcd l4f00242t03 reset"); if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 reset gpio.\n"); return ret; } ret = devm_gpio_request_one(&spi->dev, pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW, "lcd l4f00242t03 data enable"); if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 data en gpio.\n"); return ret; } priv->io_reg = devm_regulator_get(&spi->dev, "vdd"); if (IS_ERR(priv->io_reg)) { dev_err(&spi->dev, "%s: Unable to get the IO regulator\n", __func__); return PTR_ERR(priv->io_reg); } priv->core_reg = devm_regulator_get(&spi->dev, "vcore"); if (IS_ERR(priv->core_reg)) { dev_err(&spi->dev, "%s: Unable to get the core regulator\n", __func__); return PTR_ERR(priv->core_reg); } priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev, priv, &l4f_ops); if (IS_ERR(priv->ld)) return PTR_ERR(priv->ld); /* Init the LCD */ l4f00242t03_lcd_init(spi); priv->lcd_state = FB_BLANK_VSYNC_SUSPEND; l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK); dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n"); return 0; } static int l4f00242t03_remove(struct spi_device *spi) { struct l4f00242t03_priv *priv = spi_get_drvdata(spi); l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); return 0; } static void l4f00242t03_shutdown(struct spi_device *spi) { struct l4f00242t03_priv *priv = spi_get_drvdata(spi); if (priv) l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); } static struct spi_driver l4f00242t03_driver = { .driver = { .name = "l4f00242t03", .owner = THIS_MODULE, }, .probe = l4f00242t03_probe, .remove = l4f00242t03_remove, .shutdown = l4f00242t03_shutdown, }; module_spi_driver(l4f00242t03_driver); MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>"); MODULE_DESCRIPTION("EPSON L4F00242T03 LCD"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanogenMod/htc-kernel-incrediblec
net/ipv4/netfilter/nf_nat_proto_unknown.c
1610
1529
/* The "unknown" protocol. This is what is used for protocols we * don't understand. It's returned by ip_ct_find_proto(). */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/netfilter.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_nat_protocol.h> static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type manip_type, const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *max) { return true; } static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { /* Sorry: we can't help you; if it's not unique, we can't frob anything. */ return false; } static bool unknown_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { return true; } const struct nf_nat_protocol nf_nat_unknown_protocol = { /* .me isn't set: getting a ref to this cannot fail. */ .manip_pkt = unknown_manip_pkt, .in_range = unknown_in_range, .unique_tuple = unknown_unique_tuple, };
gpl-2.0
facchinm/linux
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
2122
5091
/* * Copyright (c) 2012 Mellanox Technologies. - All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/netdevice.h> #include <linux/if_arp.h> /* For ARPHRD_xxx */ #include <linux/module.h> #include <net/rtnetlink.h> #include "ipoib.h" static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { [IFLA_IPOIB_PKEY] = { .type = NLA_U16 }, [IFLA_IPOIB_MODE] = { .type = NLA_U16 }, [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, }; static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); u16 val; if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey)) goto nla_put_failure; val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); if (nla_put_u16(skb, IFLA_IPOIB_MODE, val)) goto nla_put_failure; val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags); if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { u16 mode, umcast; int ret = 0; if (data[IFLA_IPOIB_MODE]) { mode = nla_get_u16(data[IFLA_IPOIB_MODE]); if (mode == IPOIB_MODE_DATAGRAM) ret = ipoib_set_mode(dev, "datagram\n"); else if (mode == IPOIB_MODE_CONNECTED) ret = ipoib_set_mode(dev, "connected\n"); else ret = -EINVAL; if (ret < 0) goto out_err; } if (data[IFLA_IPOIB_UMCAST]) { umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]); ipoib_set_umcast(dev, umcast); } out_err: return ret; } static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *pdev; struct ipoib_dev_priv *ppriv; u16 child_pkey; int err; if (!tb[IFLA_LINK]) return -EINVAL; pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!pdev || pdev->type != ARPHRD_INFINIBAND) return -ENODEV; ppriv = netdev_priv(pdev); if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) { ipoib_warn(ppriv, "child creation disallowed for child devices\n"); return -EINVAL; } if (!data || !data[IFLA_IPOIB_PKEY]) { ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n"); child_pkey = ppriv->pkey; } else child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); if (child_pkey == 0 || child_pkey == 0x8000) return -EINVAL; /* * Set the full membership bit, so that we join the right * broadcast group, etc. */ child_pkey |= 0x8000; err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); if (!err && data) err = ipoib_changelink(dev, tb, data); return err; } static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head) { struct ipoib_dev_priv *priv, *ppriv; priv = netdev_priv(dev); ppriv = netdev_priv(priv->parent); down_write(&ppriv->vlan_rwsem); unregister_netdevice_queue(dev, head); list_del(&priv->list); up_write(&ppriv->vlan_rwsem); } static size_t ipoib_get_size(const struct net_device *dev) { return nla_total_size(2) + /* IFLA_IPOIB_PKEY */ nla_total_size(2) + /* IFLA_IPOIB_MODE */ nla_total_size(2); /* IFLA_IPOIB_UMCAST */ } static struct rtnl_link_ops ipoib_link_ops __read_mostly = { .kind = "ipoib", .maxtype = IFLA_IPOIB_MAX, .policy = ipoib_policy, .priv_size = sizeof(struct ipoib_dev_priv), .setup = ipoib_setup, .newlink = ipoib_new_child_link, .changelink = ipoib_changelink, .dellink = ipoib_unregister_child_dev, .get_size = ipoib_get_size, .fill_info = ipoib_fill_info, }; int __init ipoib_netlink_init(void) { return rtnl_link_register(&ipoib_link_ops); } void __exit ipoib_netlink_fini(void) { rtnl_link_unregister(&ipoib_link_ops); } MODULE_ALIAS_RTNL_LINK("ipoib");
gpl-2.0
meizuosc/m75
kernel/arch/sparc/kernel/ptrace_64.c
2122
25585
/* ptrace.c: Sparc process tracing support. * * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, * and David Mosberger. * * Added Linux support -miguel (weird, eh?, the original code was meant * to emulate SunOS). */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/seccomp.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <trace/syscall.h> #include <linux/compat.h> #include <linux/elf.h> #include <asm/asi.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/psrcompat.h> #include <asm/visasm.h> #include <asm/spitfire.h> #include <asm/page.h> #include <asm/cpudata.h> #include <asm/cacheflush.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> #include "entry.h" /* #define ALLOW_INIT_TRACING */ /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* nothing to do */ } /* To get the necessary page struct, access_process_vm() first calls * get_user_pages(). This has done a flush_dcache_page() on the * accessed page. Then our caller (copy_{to,from}_user_page()) did * to memcpy to read/write the data from that page. * * Now, the only thing we have to do is: * 1) flush the D-cache if it's possible than an illegal alias * has been created * 2) flush the I-cache if this is pre-cheetah and we did a write */ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { BUG_ON(len > PAGE_SIZE); if (tlb_type == hypervisor) return; preempt_disable(); #ifdef DCACHE_ALIASING_POSSIBLE /* If bit 13 of the kernel address we used to access the * user page is the same as the virtual address that page * is mapped to in the user's address space, we can skip the * D-cache flush. */ if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { unsigned long start = __pa(kaddr); unsigned long end = start + len; unsigned long dcache_line_size; dcache_line_size = local_cpu_data().dcache_line_size; if (tlb_type == spitfire) { for (; start < end; start += dcache_line_size) spitfire_put_dcache_tag(start & 0x3fe0, 0x0); } else { start &= ~(dcache_line_size - 1); for (; start < end; start += dcache_line_size) __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (start), "i" (ASI_DCACHE_INVALIDATE)); } } #endif if (write && tlb_type == spitfire) { unsigned long start = (unsigned long) kaddr; unsigned long end = start + len; unsigned long icache_line_size; icache_line_size = local_cpu_data().icache_line_size; for (; start < end; start += icache_line_size) flushi(start); } preempt_enable(); } static int get_from_target(struct task_struct *target, unsigned long uaddr, void *kbuf, int len) { if (target == current) { if (copy_from_user(kbuf, (void __user *) uaddr, len)) return -EFAULT; } else { int len2 = access_process_vm(target, uaddr, kbuf, len, 0); if (len2 != len) return -EFAULT; } return 0; } static int set_to_target(struct task_struct *target, unsigned long uaddr, void *kbuf, int len) { if (target == current) { if (copy_to_user((void __user *) uaddr, kbuf, len)) return -EFAULT; } else { int len2 = access_process_vm(target, uaddr, kbuf, len, 1); if (len2 != len) return -EFAULT; } return 0; } static int regwindow64_get(struct task_struct *target, const struct pt_regs *regs, struct reg_window *wbuf) { unsigned long rw_addr = regs->u_regs[UREG_I6]; if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; if (get_from_target(target, rw_addr, &win32, sizeof(win32))) return -EFAULT; for (i = 0; i < 8; i++) wbuf->locals[i] = win32.locals[i]; for (i = 0; i < 8; i++) wbuf->ins[i] = win32.ins[i]; } else { rw_addr += STACK_BIAS; if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf))) return -EFAULT; } return 0; } static int regwindow64_set(struct task_struct *target, const struct pt_regs *regs, struct reg_window *wbuf) { unsigned long rw_addr = regs->u_regs[UREG_I6]; if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; for (i = 0; i < 8; i++) win32.locals[i] = wbuf->locals[i]; for (i = 0; i < 8; i++) win32.ins[i] = wbuf->ins[i]; if (set_to_target(target, rw_addr, &win32, sizeof(win32))) return -EFAULT; } else { rw_addr += STACK_BIAS; if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf))) return -EFAULT; } return 0; } enum sparc_regset { REGSET_GENERAL, REGSET_FP, }; static int genregs64_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = task_pt_regs(target); int ret; if (target == current) flushw_user(); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs->u_regs, 0, 16 * sizeof(u64)); if (!ret && count && pos < (32 * sizeof(u64))) { struct reg_window window; if (regwindow64_get(target, regs, &window)) return -EFAULT; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &window, 16 * sizeof(u64), 32 * sizeof(u64)); } if (!ret) { /* TSTATE, TPC, TNPC */ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs->tstate, 32 * sizeof(u64), 35 * sizeof(u64)); } if (!ret) { unsigned long y = regs->y; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &y, 35 * sizeof(u64), 36 * sizeof(u64)); } if (!ret) { ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 36 * sizeof(u64), -1); } return ret; } static int genregs64_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; if (target == current) flushw_user(); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->u_regs, 0, 16 * sizeof(u64)); if (!ret && count && pos < (32 * sizeof(u64))) { struct reg_window window; if (regwindow64_get(target, regs, &window)) return -EFAULT; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &window, 16 * sizeof(u64), 32 * sizeof(u64)); if (!ret && regwindow64_set(target, regs, &window)) return -EFAULT; } if (!ret && count > 0) { unsigned long tstate; /* TSTATE */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tstate, 32 * sizeof(u64), 33 * sizeof(u64)); if (!ret) { /* Only the condition codes and the "in syscall" * state can be modified in the %tstate register. */ tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); regs->tstate |= tstate; } } if (!ret) { /* TPC, TNPC */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs->tpc, 33 * sizeof(u64), 35 * sizeof(u64)); } if (!ret) { unsigned long y; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &y, 35 * sizeof(u64), 36 * sizeof(u64)); if (!ret) regs->y = y; } if (!ret) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 36 * sizeof(u64), -1); return ret; } static int fpregs64_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const unsigned long *fpregs = task_thread_info(target)->fpregs; unsigned long fprs, fsr, gsr; int ret; if (target == current) save_and_clear_fpu(); fprs = task_thread_info(target)->fpsaved[0]; if (fprs & FPRS_DL) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fpregs, 0, 16 * sizeof(u64)); else ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 16 * sizeof(u64)); if (!ret) { if (fprs & FPRS_DU) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fpregs + 16, 16 * sizeof(u64), 32 * sizeof(u64)); else ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 16 * sizeof(u64), 32 * sizeof(u64)); } if (fprs & FPRS_FEF) { fsr = task_thread_info(target)->xfsr[0]; gsr = task_thread_info(target)->gsr[0]; } else { fsr = gsr = 0; } if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fsr, 32 * sizeof(u64), 33 * sizeof(u64)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &gsr, 33 * sizeof(u64), 34 * sizeof(u64)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fprs, 34 * sizeof(u64), 35 * sizeof(u64)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 35 * sizeof(u64), -1); return ret; } static int fpregs64_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *fpregs = task_thread_info(target)->fpregs; unsigned long fprs; int ret; if (target == current) save_and_clear_fpu(); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fpregs, 0, 32 * sizeof(u64)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, task_thread_info(target)->xfsr, 32 * sizeof(u64), 33 * sizeof(u64)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, task_thread_info(target)->gsr, 33 * sizeof(u64), 34 * sizeof(u64)); fprs = task_thread_info(target)->fpsaved[0]; if (!ret && count > 0) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fprs, 34 * sizeof(u64), 35 * sizeof(u64)); } fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU); task_thread_info(target)->fpsaved[0] = fprs; if (!ret) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 35 * sizeof(u64), -1); return ret; } static const struct user_regset sparc64_regsets[] = { /* Format is: * G0 --> G7 * O0 --> O7 * L0 --> L7 * I0 --> I7 * TSTATE, TPC, TNPC, Y */ [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = 36, .size = sizeof(u64), .align = sizeof(u64), .get = genregs64_get, .set = genregs64_set }, /* Format is: * F0 --> F63 * FSR * GSR * FPRS */ [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = 35, .size = sizeof(u64), .align = sizeof(u64), .get = fpregs64_get, .set = fpregs64_set }, }; static const struct user_regset_view user_sparc64_view = { .name = "sparc64", .e_machine = EM_SPARCV9, .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) }; #ifdef CONFIG_COMPAT static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = task_pt_regs(target); compat_ulong_t __user *reg_window; compat_ulong_t *k = kbuf; compat_ulong_t __user *u = ubuf; compat_ulong_t reg; if (target == current) flushw_user(); pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) { for (; count > 0 && pos < 16; count--) *k++ = regs->u_regs[pos++]; reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; reg_window -= 16; if (target == current) { for (; count > 0 && pos < 32; count--) { if (get_user(*k++, &reg_window[pos++])) return -EFAULT; } } else { for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) &reg_window[pos], k, sizeof(*k), 0) != sizeof(*k)) return -EFAULT; k++; pos++; } } } else { for (; count > 0 && pos < 16; count--) { if (put_user((compat_ulong_t) regs->u_regs[pos++], u++)) return -EFAULT; } reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; reg_window -= 16; if (target == current) { for (; count > 0 && pos < 32; count--) { if (get_user(reg, &reg_window[pos++]) || put_user(reg, u++)) return -EFAULT; } } else { for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) &reg_window[pos], &reg, sizeof(reg), 0) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) u, &reg, sizeof(reg), 1) != sizeof(reg)) return -EFAULT; pos++; u++; } } } while (count > 0) { switch (pos) { case 32: /* PSR */ reg = tstate_to_psr(regs->tstate); break; case 33: /* PC */ reg = regs->tpc; break; case 34: /* NPC */ reg = regs->tnpc; break; case 35: /* Y */ reg = regs->y; break; case 36: /* WIM */ case 37: /* TBR */ reg = 0; break; default: goto finish; } if (kbuf) *k++ = reg; else if (put_user(reg, u++)) return -EFAULT; pos++; count--; } finish: pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 38 * sizeof(reg), -1); } static int genregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); compat_ulong_t __user *reg_window; const compat_ulong_t *k = kbuf; const compat_ulong_t __user *u = ubuf; compat_ulong_t reg; if (target == current) flushw_user(); pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) { for (; count > 0 && pos < 16; count--) regs->u_regs[pos++] = *k++; reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; reg_window -= 16; if (target == current) { for (; count > 0 && pos < 32; count--) { if (put_user(*k++, &reg_window[pos++])) return -EFAULT; } } else { for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) &reg_window[pos], (void *) k, sizeof(*k), 1) != sizeof(*k)) return -EFAULT; k++; pos++; } } } else { for (; count > 0 && pos < 16; count--) { if (get_user(reg, u++)) return -EFAULT; regs->u_regs[pos++] = reg; } reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; reg_window -= 16; if (target == current) { for (; count > 0 && pos < 32; count--) { if (get_user(reg, u++) || put_user(reg, &reg_window[pos++])) return -EFAULT; } } else { for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) u, &reg, sizeof(reg), 0) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) &reg_window[pos], &reg, sizeof(reg), 1) != sizeof(reg)) return -EFAULT; pos++; u++; } } } while (count > 0) { unsigned long tstate; if (kbuf) reg = *k++; else if (get_user(reg, u++)) return -EFAULT; switch (pos) { case 32: /* PSR */ tstate = regs->tstate; tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); tstate |= psr_to_tstate_icc(reg); if (reg & PSR_SYSCALL) tstate |= TSTATE_SYSCALL; regs->tstate = tstate; break; case 33: /* PC */ regs->tpc = reg; break; case 34: /* NPC */ regs->tnpc = reg; break; case 35: /* Y */ regs->y = reg; break; case 36: /* WIM */ case 37: /* TBR */ break; default: goto finish; } pos++; count--; } finish: pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 38 * sizeof(reg), -1); } static int fpregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const unsigned long *fpregs = task_thread_info(target)->fpregs; compat_ulong_t enabled; unsigned long fprs; compat_ulong_t fsr; int ret = 0; if (target == current) save_and_clear_fpu(); fprs = task_thread_info(target)->fpsaved[0]; if (fprs & FPRS_FEF) { fsr = task_thread_info(target)->xfsr[0]; enabled = 1; } else { fsr = 0; enabled = 0; } ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fpregs, 0, 32 * sizeof(u32)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 32 * sizeof(u32), 33 * sizeof(u32)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fsr, 33 * sizeof(u32), 34 * sizeof(u32)); if (!ret) { compat_ulong_t val; val = (enabled << 8) | (8 << 16); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &val, 34 * sizeof(u32), 35 * sizeof(u32)); } if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 35 * sizeof(u32), -1); return ret; } static int fpregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *fpregs = task_thread_info(target)->fpregs; unsigned long fprs; int ret; if (target == current) save_and_clear_fpu(); fprs = task_thread_info(target)->fpsaved[0]; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fpregs, 0, 32 * sizeof(u32)); if (!ret) user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 32 * sizeof(u32), 33 * sizeof(u32)); if (!ret && count > 0) { compat_ulong_t fsr; unsigned long val; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fsr, 33 * sizeof(u32), 34 * sizeof(u32)); if (!ret) { val = task_thread_info(target)->xfsr[0]; val &= 0xffffffff00000000UL; val |= fsr; task_thread_info(target)->xfsr[0] = val; } } fprs |= (FPRS_FEF | FPRS_DL); task_thread_info(target)->fpsaved[0] = fprs; if (!ret) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 34 * sizeof(u32), -1); return ret; } static const struct user_regset sparc32_regsets[] = { /* Format is: * G0 --> G7 * O0 --> O7 * L0 --> L7 * I0 --> I7 * PSR, PC, nPC, Y, WIM, TBR */ [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = 38, .size = sizeof(u32), .align = sizeof(u32), .get = genregs32_get, .set = genregs32_set }, /* Format is: * F0 --> F31 * empty 32-bit word * FSR (32--bit word) * FPU QUEUE COUNT (8-bit char) * FPU QUEUE ENTRYSIZE (8-bit char) * FPU ENABLED (8-bit char) * empty 8-bit char * FPU QUEUE (64 32-bit ints) */ [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = 99, .size = sizeof(u32), .align = sizeof(u32), .get = fpregs32_get, .set = fpregs32_set }, }; static const struct user_regset_view user_sparc32_view = { .name = "sparc", .e_machine = EM_SPARC, .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) }; #endif /* CONFIG_COMPAT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_32BIT)) return &user_sparc32_view; #endif return &user_sparc64_view; } #ifdef CONFIG_COMPAT struct compat_fps { unsigned int regs[32]; unsigned int fsr; unsigned int flags; unsigned int extra; unsigned int fpqd; struct compat_fq { unsigned int insnaddr; unsigned int insn; } fpq[16]; }; long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { const struct user_regset_view *view = task_user_regset_view(current); compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4]; struct pt_regs32 __user *pregs; struct compat_fps __user *fps; unsigned long addr2 = caddr2; unsigned long addr = caddr; unsigned long data = cdata; int ret; pregs = (struct pt_regs32 __user *) addr; fps = (struct compat_fps __user *) addr; switch (request) { case PTRACE_PEEKUSR: ret = (addr != 0) ? -EIO : 0; break; case PTRACE_GETREGS: ret = copy_regset_to_user(child, view, REGSET_GENERAL, 32 * sizeof(u32), 4 * sizeof(u32), &pregs->psr); if (!ret) ret = copy_regset_to_user(child, view, REGSET_GENERAL, 1 * sizeof(u32), 15 * sizeof(u32), &pregs->u_regs[0]); break; case PTRACE_SETREGS: ret = copy_regset_from_user(child, view, REGSET_GENERAL, 32 * sizeof(u32), 4 * sizeof(u32), &pregs->psr); if (!ret) ret = copy_regset_from_user(child, view, REGSET_GENERAL, 1 * sizeof(u32), 15 * sizeof(u32), &pregs->u_regs[0]); break; case PTRACE_GETFPREGS: ret = copy_regset_to_user(child, view, REGSET_FP, 0 * sizeof(u32), 32 * sizeof(u32), &fps->regs[0]); if (!ret) ret = copy_regset_to_user(child, view, REGSET_FP, 33 * sizeof(u32), 1 * sizeof(u32), &fps->fsr); if (!ret) { if (__put_user(0, &fps->flags) || __put_user(0, &fps->extra) || __put_user(0, &fps->fpqd) || clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) ret = -EFAULT; } break; case PTRACE_SETFPREGS: ret = copy_regset_from_user(child, view, REGSET_FP, 0 * sizeof(u32), 32 * sizeof(u32), &fps->regs[0]); if (!ret) ret = copy_regset_from_user(child, view, REGSET_FP, 33 * sizeof(u32), 1 * sizeof(u32), &fps->fsr); break; case PTRACE_READTEXT: case PTRACE_READDATA: ret = ptrace_readdata(child, addr, (char __user *)addr2, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; case PTRACE_WRITETEXT: case PTRACE_WRITEDATA: ret = ptrace_writedata(child, (char __user *) addr2, addr, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; default: if (request == PTRACE_SPARC_DETACH) request = PTRACE_DETACH; ret = compat_ptrace_request(child, request, addr, data); break; } return ret; } #endif /* CONFIG_COMPAT */ struct fps { unsigned int regs[64]; unsigned long fsr; }; long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { const struct user_regset_view *view = task_user_regset_view(current); unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; struct pt_regs __user *pregs; struct fps __user *fps; void __user *addr2p; int ret; pregs = (struct pt_regs __user *) addr; fps = (struct fps __user *) addr; addr2p = (void __user *) addr2; switch (request) { case PTRACE_PEEKUSR: ret = (addr != 0) ? -EIO : 0; break; case PTRACE_GETREGS64: ret = copy_regset_to_user(child, view, REGSET_GENERAL, 1 * sizeof(u64), 15 * sizeof(u64), &pregs->u_regs[0]); if (!ret) { /* XXX doesn't handle 'y' register correctly XXX */ ret = copy_regset_to_user(child, view, REGSET_GENERAL, 32 * sizeof(u64), 4 * sizeof(u64), &pregs->tstate); } break; case PTRACE_SETREGS64: ret = copy_regset_from_user(child, view, REGSET_GENERAL, 1 * sizeof(u64), 15 * sizeof(u64), &pregs->u_regs[0]); if (!ret) { /* XXX doesn't handle 'y' register correctly XXX */ ret = copy_regset_from_user(child, view, REGSET_GENERAL, 32 * sizeof(u64), 4 * sizeof(u64), &pregs->tstate); } break; case PTRACE_GETFPREGS64: ret = copy_regset_to_user(child, view, REGSET_FP, 0 * sizeof(u64), 33 * sizeof(u64), fps); break; case PTRACE_SETFPREGS64: ret = copy_regset_from_user(child, view, REGSET_FP, 0 * sizeof(u64), 33 * sizeof(u64), fps); break; case PTRACE_READTEXT: case PTRACE_READDATA: ret = ptrace_readdata(child, addr, addr2p, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; case PTRACE_WRITETEXT: case PTRACE_WRITEDATA: ret = ptrace_writedata(child, addr2p, addr, data); if (ret == data) ret = 0; else if (ret >= 0) ret = -EIO; break; default: if (request == PTRACE_SPARC_DETACH) request = PTRACE_DETACH; ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace_enter(struct pt_regs *regs) { int ret = 0; /* do the secure computing check first */ secure_computing_strict(regs->u_regs[UREG_G1]); if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->u_regs[UREG_G1]); audit_syscall_entry((test_thread_flag(TIF_32BIT) ? AUDIT_ARCH_SPARC : AUDIT_ARCH_SPARC64), regs->u_regs[UREG_G1], regs->u_regs[UREG_I0], regs->u_regs[UREG_I1], regs->u_regs[UREG_I2], regs->u_regs[UREG_I3]); return ret; } asmlinkage void syscall_trace_leave(struct pt_regs *regs) { audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->u_regs[UREG_G1]); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); }
gpl-2.0
viaembedded/springboard-kernel-bsp
arch/s390/kernel/mem_detect.c
2890
1501
/* * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/ipl.h> #include <asm/sclp.h> #include <asm/setup.h> #define ADDR2G (1ULL << 31) static void find_memory_chunks(struct mem_chunk chunk[]) { unsigned long long memsize, rnmax, rzm; unsigned long addr = 0, size; int i = 0, type; rzm = sclp_get_rzm(); rnmax = sclp_get_rnmax(); memsize = rzm * rnmax; if (!rzm) rzm = 1ULL << 17; if (sizeof(long) == 4) { rzm = min(ADDR2G, rzm); memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; } do { size = 0; type = tprot(addr); do { size += rzm; if (memsize && addr + size >= memsize) break; } while (type == tprot(addr + size)); if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { chunk[i].addr = addr; chunk[i].size = size; chunk[i].type = type; i++; } addr += size; } while (addr < memsize && i < MEMORY_CHUNKS); } void detect_memory_layout(struct mem_chunk chunk[]) { unsigned long flags, cr0; memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); /* Disable IRQs, DAT and low address protection so tprot does the * right thing and we don't get scheduled away with low address * protection disabled. */ flags = __arch_local_irq_stnsm(0xf8); __ctl_store(cr0, 0, 0); __ctl_clear_bit(0, 28); find_memory_chunks(chunk); __ctl_load(cr0, 0, 0); arch_local_irq_restore(flags); } EXPORT_SYMBOL(detect_memory_layout);
gpl-2.0
EPDCenter/android_kernel_bq_dc_v1
fs/cifs/dns_resolve.c
2890
2864
/* * fs/cifs/dns_resolve.c * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * Contains the CIFS DFS upcall routines used for hostname to * IP address translation. * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/dns_resolver.h> #include "dns_resolve.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" /** * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. * @unc: UNC path specifying the server * @ip_addr: Where to return the IP address. * * The IP address will be returned in string form, and the caller is * responsible for freeing it. * * Returns length of result on success, -ve on error. */ int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) { struct sockaddr_storage ss; const char *hostname, *sep; char *name; int len, rc; if (!ip_addr || !unc) return -EINVAL; len = strlen(unc); if (len < 3) { cFYI(1, "%s: unc is too short: %s", __func__, unc); return -EINVAL; } /* Discount leading slashes for cifs */ len -= 2; hostname = unc + 2; /* Search for server name delimiter */ sep = memchr(hostname, '\\', len); if (sep) len = sep - hostname; else cFYI(1, "%s: probably server name is whole unc: %s", __func__, unc); /* Try to interpret hostname as an IPv4 or IPv6 address */ rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len); if (rc > 0) goto name_is_IP_address; /* Perform the upcall */ rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL); if (rc < 0) cERROR(1, "%s: unable to resolve: %*.*s", __func__, len, len, hostname); else cFYI(1, "%s: resolved: %*.*s to %s", __func__, len, len, hostname, *ip_addr); return rc; name_is_IP_address: name = kmalloc(len + 1, GFP_KERNEL); if (!name) return -ENOMEM; memcpy(name, hostname, len); name[len] = 0; cFYI(1, "%s: unc is IP, skipping dns upcall: %s", __func__, name); *ip_addr = name; return 0; }
gpl-2.0
golden-guy/android_kernel_samsung_golden
drivers/net/wireless/iwlegacy/iwl-4965-sta.c
3146
21524
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <net/mac80211.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" #include "iwl-4965.h" static struct iwl_link_quality_cmd * iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) { int i, r; struct iwl_link_quality_cmd *link_cmd; u32 rate_flags = 0; __le32 rate_n_flags; link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); if (!link_cmd) { IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); return NULL; } /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ if (priv->band == IEEE80211_BAND_5GHZ) r = IWL_RATE_6M_INDEX; else r = IWL_RATE_1M_INDEX; if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) rate_flags |= RATE_MCS_CCK_MSK; rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) << RATE_MCS_ANT_POS; rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp, rate_flags); for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) link_cmd->rs_table[i].rate_n_flags = rate_n_flags; link_cmd->general_params.single_stream_ant_msk = iwl4965_first_antenna(priv->hw_params.valid_tx_ant); link_cmd->general_params.dual_stream_ant_msk = priv->hw_params.valid_tx_ant & ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); if (!link_cmd->general_params.dual_stream_ant_msk) { link_cmd->general_params.dual_stream_ant_msk = ANT_AB; } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { link_cmd->general_params.dual_stream_ant_msk = priv->hw_params.valid_tx_ant; } link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; link_cmd->agg_params.agg_time_limit = cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); link_cmd->sta_id = sta_id; return link_cmd; } /* * iwl4965_add_bssid_station - Add the special IBSS BSSID station * * Function sleeps. */ int iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, u8 *sta_id_r) { int ret; u8 sta_id; struct iwl_link_quality_cmd *link_cmd; unsigned long flags; if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM\n", addr); return ret; } if (sta_id_r) *sta_id_r = sta_id; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].used |= IWL_STA_LOCAL; spin_unlock_irqrestore(&priv->sta_lock, flags); /* Set up default rate scaling table in device's station table */ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n", addr); return -ENOMEM; } ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); if (ret) IWL_ERR(priv, "Link quality command failed (%d)\n", ret); spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].lq = link_cmd; spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, bool send_if_empty) { int i, not_empty = 0; u8 buff[sizeof(struct iwl_wep_cmd) + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; size_t cmd_size = sizeof(struct iwl_wep_cmd); struct iwl_host_cmd cmd = { .id = ctx->wep_key_cmd, .data = wep_cmd, .flags = CMD_SYNC, }; might_sleep(); memset(wep_cmd, 0, cmd_size + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); for (i = 0; i < WEP_KEYS_MAX ; i++) { wep_cmd->key[i].key_index = i; if (ctx->wep_keys[i].key_size) { wep_cmd->key[i].key_offset = i; not_empty = 1; } else { wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; } wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, ctx->wep_keys[i].key_size); } wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; wep_cmd->num_keys = WEP_KEYS_MAX; cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; cmd.len = cmd_size; if (not_empty || send_if_empty) return iwl_legacy_send_cmd(priv, &cmd); else return 0; } int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { lockdep_assert_held(&priv->mutex); return iwl4965_static_wepkey_cmd(priv, ctx, false); } int iwl4965_remove_default_wep_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf) { int ret; lockdep_assert_held(&priv->mutex); IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", keyconf->keyidx); memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); if (iwl_legacy_is_rfkill(priv)) { IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); /* but keys in device are clear anyway so return success */ return 0; } ret = iwl4965_static_wepkey_cmd(priv, ctx, 1); IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret); return ret; } int iwl4965_set_default_wep_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf) { int ret; lockdep_assert_held(&priv->mutex); if (keyconf->keylen != WEP_KEY_LEN_128 && keyconf->keylen != WEP_KEY_LEN_64) { IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); return -EINVAL; } keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; keyconf->hw_key_idx = HW_KEY_DEFAULT; priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, keyconf->keylen); ret = iwl4965_static_wepkey_cmd(priv, ctx, false); IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen, keyconf->keyidx, ret); return ret; } static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, u8 sta_id) { unsigned long flags; __le16 key_flags = 0; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); key_flags &= ~STA_KEY_FLG_INVALID; if (keyconf->keylen == WEP_KEY_LEN_128) key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; if (sta_id == ctx->bcast_sta_id) key_flags |= STA_KEY_MULTICAST_MSK; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); memcpy(&priv->stations[sta_id].sta.key.key[3], keyconf->key, keyconf->keylen); if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) priv->stations[sta_id].sta.key.key_offset = iwl_legacy_get_free_ucode_key_index(priv); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, u8 sta_id) { unsigned long flags; __le16 key_flags = 0; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); key_flags &= ~STA_KEY_FLG_INVALID; if (sta_id == ctx->bcast_sta_id) key_flags |= STA_KEY_MULTICAST_MSK; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) priv->stations[sta_id].sta.key.key_offset = iwl_legacy_get_free_ucode_key_index(priv); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, u8 sta_id) { unsigned long flags; int ret = 0; __le16 key_flags = 0; key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); key_flags &= ~STA_KEY_FLG_INVALID; if (sta_id == ctx->bcast_sta_id) key_flags |= STA_KEY_MULTICAST_MSK; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; priv->stations[sta_id].keyinfo.keylen = 16; if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) priv->stations[sta_id].sta.key.key_offset = iwl_legacy_get_free_ucode_key_index(priv); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); priv->stations[sta_id].sta.key.key_flags = key_flags; /* This copy is acutally not needed: we get the key with each TX */ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); spin_unlock_irqrestore(&priv->sta_lock, flags); return ret; } void iwl4965_update_tkip_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { u8 sta_id; unsigned long flags; int i; if (iwl_legacy_scan_cancel(priv)) { /* cancel scan failed, just live w/ bad key and rely briefly on SW decryption */ return; } sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta); if (sta_id == IWL_INVALID_STATION) return; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; for (i = 0; i < 5; i++) priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = cpu_to_le16(phase1key[i]); priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); } int iwl4965_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, u8 sta_id) { unsigned long flags; u16 key_flags; u8 keyidx; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); ctx->key_mapping_keys--; spin_lock_irqsave(&priv->sta_lock, flags); key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); if (keyconf->keyidx != keyidx) { /* We need to remove a key with index different that the one * in the uCode. This means that the key we need to remove has * been replaced by another one with different index. * Don't do anything and return ok */ spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { IWL_WARN(priv, "Removing wrong key %d 0x%x\n", keyconf->keyidx, key_flags); spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, &priv->ucode_key_table)) IWL_ERR(priv, "index %d not used in uCode key table.\n", priv->stations[sta_id].sta.key.key_offset); memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo)); priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; if (iwl_legacy_is_rfkill(priv)) { IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, u8 sta_id) { int ret; lockdep_assert_held(&priv->mutex); ctx->key_mapping_keys++; keyconf->hw_key_idx = HW_KEY_DYNAMIC; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id); break; case WLAN_CIPHER_SUITE_TKIP: ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = iwl4965_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id); break; default: IWL_ERR(priv, "Unknown alg: %s cipher = %x\n", __func__, keyconf->cipher); ret = -EINVAL; } IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); return ret; } /** * iwl4965_alloc_bcast_station - add broadcast station into driver's station table. * * This adds the broadcast station into the driver's station table * and marks it driver active, so that it will be restored to the * device at the next best time. */ int iwl4965_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { struct iwl_link_quality_cmd *link_cmd; unsigned long flags; u8 sta_id; spin_lock_irqsave(&priv->sta_lock, flags); sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr, false, NULL); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Unable to prepare broadcast station\n"); spin_unlock_irqrestore(&priv->sta_lock, flags); return -EINVAL; } priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; priv->stations[sta_id].used |= IWL_STA_BCAST; spin_unlock_irqrestore(&priv->sta_lock, flags); link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); return -ENOMEM; } spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].lq = link_cmd; spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } /** * iwl4965_update_bcast_station - update broadcast station's LQ command * * Only used by iwl4965. Placed here to have all bcast station management * code together. */ static int iwl4965_update_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { unsigned long flags; struct iwl_link_quality_cmd *link_cmd; u8 sta_id = ctx->bcast_sta_id; link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); return -ENOMEM; } spin_lock_irqsave(&priv->sta_lock, flags); if (priv->stations[sta_id].lq) kfree(priv->stations[sta_id].lq); else IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n"); priv->stations[sta_id].lq = link_cmd; spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } int iwl4965_update_bcast_stations(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; int ret = 0; for_each_context(priv, ctx) { ret = iwl4965_update_bcast_station(priv, ctx); if (ret) break; } return ret; } /** * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table */ int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) { unsigned long flags; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); /* Remove "disable" flag, to enable Tx for this TID */ spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, int tid, u16 ssn) { unsigned long flags; int sta_id; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); sta_id = iwl_legacy_sta_id(sta); if (sta_id == IWL_INVALID_STATION) return -ENXIO; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.station_flags_msk = 0; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, int tid) { unsigned long flags; int sta_id; struct iwl_legacy_addsta_cmd sta_cmd; lockdep_assert_held(&priv->mutex); sta_id = iwl_legacy_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); return -ENXIO; } spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.station_flags_msk = 0; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) { unsigned long flags; spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK; priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); }
gpl-2.0
Amperific/kernel_tuna_4.3
arch/ia64/kernel/unwind.c
3146
63914
/* * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com> * - Change pt_regs_off() to make it less dependent on pt_regs structure. */ /* * This file implements call frame unwind support for the Linux * kernel. Parsing and processing the unwind information is * time-consuming, so this implementation translates the unwind * descriptors into unwind scripts. These scripts are very simple * (basically a sequence of assignments) and efficient to execute. * They are cached for later re-use. Each script is specific for a * given instruction pointer address and the set of predicate values * that the script depends on (most unwind descriptors are * unconditional and scripts often do not depend on predicates at * all). This code is based on the unwind conventions described in * the "IA-64 Software Conventions and Runtime Architecture" manual. * * SMP conventions: * o updates to the global unwind data (in structure "unw") are serialized * by the unw.lock spinlock * o each unwind script has its own read-write lock; a thread must acquire * a read lock before executing a script and must acquire a write lock * before modifying a script * o if both the unw.lock spinlock and a script's read-write lock must be * acquired, then the read-write lock must be acquired first. */ #include <linux/module.h> #include <linux/bootmem.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/unwind.h> #include <asm/delay.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/ptrace_offsets.h> #include <asm/rse.h> #include <asm/sections.h> #include <asm/system.h> #include <asm/uaccess.h> #include "entry.h" #include "unwind_i.h" #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */ #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE) #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1) #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE) #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */ #ifdef UNW_DEBUG static unsigned int unw_debug_level = UNW_DEBUG; # define UNW_DEBUG_ON(n) unw_debug_level >= n /* Do not code a printk level, not all debug lines end in newline */ # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) # undef inline # define inline #else /* !UNW_DEBUG */ # define UNW_DEBUG_ON(n) 0 # define UNW_DPRINT(n, ...) #endif /* UNW_DEBUG */ #if UNW_STATS # define STAT(x...) x #else # define STAT(x...) #endif #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC) #define free_reg_state(usr) kfree(usr) #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC) #define free_labeled_state(usr) kfree(usr) typedef unsigned long unw_word; typedef unsigned char unw_hash_index_t; static struct { spinlock_t lock; /* spinlock for unwind data */ /* list of unwind tables (one per load-module) */ struct unw_table *tables; unsigned long r0; /* constant 0 for r0 */ /* table of registers that prologues can save (and order in which they're saved): */ const unsigned char save_order[8]; /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */ unsigned short sw_off[sizeof(struct unw_frame_info) / 8]; unsigned short lru_head; /* index of lead-recently used script */ unsigned short lru_tail; /* index of most-recently used script */ /* index into unw_frame_info for preserved register i */ unsigned short preg_index[UNW_NUM_REGS]; short pt_regs_offsets[32]; /* unwind table for the kernel: */ struct unw_table kernel_table; /* unwind table describing the gate page (kernel code that is mapped into user space): */ size_t gate_table_size; unsigned long *gate_table; /* hash table that maps instruction pointer to script index: */ unsigned short hash[UNW_HASH_SIZE]; /* script cache: */ struct unw_script cache[UNW_CACHE_SIZE]; # ifdef UNW_DEBUG const char *preg_name[UNW_NUM_REGS]; # endif # if UNW_STATS struct { struct { int lookups; int hinted_hits; int normal_hits; int collision_chain_traversals; } cache; struct { unsigned long build_time; unsigned long run_time; unsigned long parse_time; int builds; int news; int collisions; int runs; } script; struct { unsigned long init_time; unsigned long unwind_time; int inits; int unwinds; } api; } stat; # endif } unw = { .tables = &unw.kernel_table, .lock = __SPIN_LOCK_UNLOCKED(unw.lock), .save_order = { UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR }, .preg_index = { offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */ offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */ offsetof(struct unw_frame_info, bsp_loc)/8, offsetof(struct unw_frame_info, bspstore_loc)/8, offsetof(struct unw_frame_info, pfs_loc)/8, offsetof(struct unw_frame_info, rnat_loc)/8, offsetof(struct unw_frame_info, psp)/8, offsetof(struct unw_frame_info, rp_loc)/8, offsetof(struct unw_frame_info, r4)/8, offsetof(struct unw_frame_info, r5)/8, offsetof(struct unw_frame_info, r6)/8, offsetof(struct unw_frame_info, r7)/8, offsetof(struct unw_frame_info, unat_loc)/8, offsetof(struct unw_frame_info, pr_loc)/8, offsetof(struct unw_frame_info, lc_loc)/8, offsetof(struct unw_frame_info, fpsr_loc)/8, offsetof(struct unw_frame_info, b1_loc)/8, offsetof(struct unw_frame_info, b2_loc)/8, offsetof(struct unw_frame_info, b3_loc)/8, offsetof(struct unw_frame_info, b4_loc)/8, offsetof(struct unw_frame_info, b5_loc)/8, offsetof(struct unw_frame_info, f2_loc)/8, offsetof(struct unw_frame_info, f3_loc)/8, offsetof(struct unw_frame_info, f4_loc)/8, offsetof(struct unw_frame_info, f5_loc)/8, offsetof(struct unw_frame_info, fr_loc[16 - 16])/8, offsetof(struct unw_frame_info, fr_loc[17 - 16])/8, offsetof(struct unw_frame_info, fr_loc[18 - 16])/8, offsetof(struct unw_frame_info, fr_loc[19 - 16])/8, offsetof(struct unw_frame_info, fr_loc[20 - 16])/8, offsetof(struct unw_frame_info, fr_loc[21 - 16])/8, offsetof(struct unw_frame_info, fr_loc[22 - 16])/8, offsetof(struct unw_frame_info, fr_loc[23 - 16])/8, offsetof(struct unw_frame_info, fr_loc[24 - 16])/8, offsetof(struct unw_frame_info, fr_loc[25 - 16])/8, offsetof(struct unw_frame_info, fr_loc[26 - 16])/8, offsetof(struct unw_frame_info, fr_loc[27 - 16])/8, offsetof(struct unw_frame_info, fr_loc[28 - 16])/8, offsetof(struct unw_frame_info, fr_loc[29 - 16])/8, offsetof(struct unw_frame_info, fr_loc[30 - 16])/8, offsetof(struct unw_frame_info, fr_loc[31 - 16])/8, }, .pt_regs_offsets = { [0] = -1, offsetof(struct pt_regs, r1), offsetof(struct pt_regs, r2), offsetof(struct pt_regs, r3), [4] = -1, [5] = -1, [6] = -1, [7] = -1, offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), offsetof(struct pt_regs, r16), offsetof(struct pt_regs, r17), offsetof(struct pt_regs, r18), offsetof(struct pt_regs, r19), offsetof(struct pt_regs, r20), offsetof(struct pt_regs, r21), offsetof(struct pt_regs, r22), offsetof(struct pt_regs, r23), offsetof(struct pt_regs, r24), offsetof(struct pt_regs, r25), offsetof(struct pt_regs, r26), offsetof(struct pt_regs, r27), offsetof(struct pt_regs, r28), offsetof(struct pt_regs, r29), offsetof(struct pt_regs, r30), offsetof(struct pt_regs, r31), }, .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 }, #ifdef UNW_DEBUG .preg_name = { "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp", "r4", "r5", "r6", "r7", "ar.unat", "pr", "ar.lc", "ar.fpsr", "b1", "b2", "b3", "b4", "b5", "f2", "f3", "f4", "f5", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" } #endif }; static inline int read_only (void *addr) { return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0); } /* * Returns offset of rREG in struct pt_regs. */ static inline unsigned long pt_regs_off (unsigned long reg) { short off = -1; if (reg < ARRAY_SIZE(unw.pt_regs_offsets)) off = unw.pt_regs_offsets[reg]; if (off < 0) { UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg); off = 0; } return (unsigned long) off; } static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt); return (struct pt_regs *) info->pt; } /* Unwind accessors. */ int unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) { unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat; struct unw_ireg *ireg; struct pt_regs *pt; if ((unsigned) regnum - 1 >= 127) { if (regnum == 0 && !write) { *val = 0; /* read r0 always returns 0 */ *nat = 0; return 0; } UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", __func__, regnum); return -1; } if (regnum < 32) { if (regnum >= 4 && regnum <= 7) { /* access a preserved register */ ireg = &info->r4 + (regnum - 4); addr = ireg->loc; if (addr) { nat_addr = addr + ireg->nat.off; switch (ireg->nat.type) { case UNW_NAT_VAL: /* simulate getf.sig/setf.sig */ if (write) { if (*nat) { /* write NaTVal and be done with it */ addr[0] = 0; addr[1] = 0x1fffe; return 0; } addr[1] = 0x1003e; } else { if (addr[0] == 0 && addr[1] == 0x1ffe) { /* return NaT and be done with it */ *val = 0; *nat = 1; return 0; } } /* fall through */ case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; break; case UNW_NAT_MEMSTK: nat_mask = (1UL << ((long) addr & 0x1f8)/8); break; case UNW_NAT_REGSTK: nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { UNW_DPRINT(0, "unwind.%s: %p outside of regstk " "[0x%lx-0x%lx)\n", __func__, (void *) addr, info->regstk.limit, info->regstk.top); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); break; } } else { addr = &info->sw->r4 + (regnum - 4); nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a scratch register */ pt = get_scratch_regs(info); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum)); if (info->pri_unat_loc) nat_addr = info->pri_unat_loc; else nat_addr = &info->sw->caller_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a stacked register */ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32); nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " "of rbs\n", __func__); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); } if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else { *addr = *val; if (*nat) *nat_addr |= nat_mask; else *nat_addr &= ~nat_mask; } } else { if ((*nat_addr & nat_mask) == 0) { *val = *addr; *nat = 0; } else { *val = 0; /* if register is a NaT, *addr may contain kernel data! */ *nat = 1; } } return 0; } EXPORT_SYMBOL(unw_access_gr); int unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write) { unsigned long *addr; struct pt_regs *pt; switch (regnum) { /* scratch: */ case 0: pt = get_scratch_regs(info); addr = &pt->b0; break; case 6: pt = get_scratch_regs(info); addr = &pt->b6; break; case 7: pt = get_scratch_regs(info); addr = &pt->b7; break; /* preserved: */ case 1: case 2: case 3: case 4: case 5: addr = *(&info->b1_loc + (regnum - 1)); if (!addr) addr = &info->sw->b1 + (regnum - 1); break; default: UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", __func__, regnum); return -1; } if (write) if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_br); int unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = NULL; struct pt_regs *pt; if ((unsigned) (regnum - 2) >= 126) { UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", __func__, regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); } if (write) if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_fr); int unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write) { unsigned long *addr; struct pt_regs *pt; switch (regnum) { case UNW_AR_BSP: addr = info->bsp_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_BSPSTORE: addr = info->bspstore_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_PFS: addr = info->pfs_loc; if (!addr) addr = &info->sw->ar_pfs; break; case UNW_AR_RNAT: addr = info->rnat_loc; if (!addr) addr = &info->sw->ar_rnat; break; case UNW_AR_UNAT: addr = info->unat_loc; if (!addr) addr = &info->sw->caller_unat; break; case UNW_AR_LC: addr = info->lc_loc; if (!addr) addr = &info->sw->ar_lc; break; case UNW_AR_EC: if (!info->cfm_loc) return -1; if (write) *info->cfm_loc = (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52); else *val = (*info->cfm_loc >> 52) & 0x3f; return 0; case UNW_AR_FPSR: addr = info->fpsr_loc; if (!addr) addr = &info->sw->ar_fpsr; break; case UNW_AR_RSC: pt = get_scratch_regs(info); addr = &pt->ar_rsc; break; case UNW_AR_CCV: pt = get_scratch_regs(info); addr = &pt->ar_ccv; break; case UNW_AR_CSD: pt = get_scratch_regs(info); addr = &pt->ar_csd; break; case UNW_AR_SSD: pt = get_scratch_regs(info); addr = &pt->ar_ssd; break; default: UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", __func__, regnum); return -1; } if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; } else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_ar); int unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) { unsigned long *addr; addr = info->pr_loc; if (!addr) addr = &info->sw->pr; if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; } else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_pr); /* Routines to manipulate the state stack. */ static inline void push (struct unw_state_record *sr) { struct unw_reg_state *rs; rs = alloc_reg_state(); if (!rs) { printk(KERN_ERR "unwind: cannot stack reg state!\n"); return; } memcpy(rs, &sr->curr, sizeof(*rs)); sr->curr.next = rs; } static void pop (struct unw_state_record *sr) { struct unw_reg_state *rs = sr->curr.next; if (!rs) { printk(KERN_ERR "unwind: stack underflow!\n"); return; } memcpy(&sr->curr, rs, sizeof(*rs)); free_reg_state(rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct unw_reg_state * dup_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state(); if (!copy) { printk(KERN_ERR "unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy(copy, rs, sizeof(*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state(p); } rs->next = NULL; } /* Unwind decoder routines */ static enum unw_register_index __attribute_const__ decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41); case 0x60: return UNW_REG_PR; case 0x61: return UNW_REG_PSP; case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR; case 0x63: return UNW_REG_RP; case 0x64: return UNW_REG_BSP; case 0x65: return UNW_REG_BSPSTORE; case 0x66: return UNW_REG_RNAT; case 0x67: return UNW_REG_UNAT; case 0x68: return UNW_REG_FPSR; case 0x69: return UNW_REG_PFS; case 0x6a: return UNW_REG_LC; default: break; } UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg); return UNW_REG_LC; } static void set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == UNW_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct unw_reg_info *lo, struct unw_reg_info *hi) { struct unw_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->where = UNW_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; } } } static inline void spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t) { struct unw_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__); } static inline void finish_prologue (struct unw_state_record *sr) { struct unw_reg_info *reg; unsigned long off; int i; /* * First, resolve implicit register save locations (see Section "11.4.2.3 Rules * for Using Unwind Descriptors", rule 3): */ for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == UNW_WHERE_GR_SAVE) { reg->where = UNW_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* * Next, compute when the fp, general, and branch registers get * saved. This must come before alloc_spill_area() because * we need to know which registers are spilled to their home * locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; int t; static const unsigned char limit[3] = { UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 }; struct unw_reg_info *(regs[3]); regs[0] = sr->curr.reg + UNW_REG_F2; regs[1] = sr->curr.reg + UNW_REG_R4; regs[2] = sr->curr.reg + UNW_REG_B1; for (t = 0; t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2*(3-(t & 3))) & 3; if (kind > 0) spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* * Next, lay out the memory stack spill area: */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); } } /* * Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct unw_state_record *sr) { int i, region_start; if (!(sr->in_body || sr->first_region)) finish_prologue(sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } region_start = sr->region_start + sr->region_len; for (i = 0; i < sr->epilogue_count; ++i) pop(sr); sr->epilogue_count = 0; sr->epilogue_start = UNW_WHEN_NEVER; sr->region_start = region_start; sr->region_len = rlen; sr->in_body = body; if (!body) { push(sr); for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = NULL; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* * Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) { if (abi == 3 && context == 'i') { sr->flags |= UNW_FLAG_INTERRUPT_FRAME; UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__); } else UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", __func__, abi, context); } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4; set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_fr_mem (unsigned char frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr) { set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, sr->region_start + min_t(int, t, sr->region_len - 1), 16*size); } static inline void desc_mem_stack_v (unw_word t, struct unw_state_record *sr) { sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4*pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4*spoff); } static inline void desc_rp_br (unsigned char dst, struct unw_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr) { struct unw_reg_info *reg = sr->curr.reg + regnum; if (reg->where == UNW_WHERE_NONE) reg->where = UNW_WHERE_GR_SAVE; reg->when = sr->region_start + min_t(int, t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct unw_state_record *sr) { sr->spill_offset = 0x10 - 4*pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr) { sr->imask = imaskp; return imaskp + (2*sr->region_len + 7)/8; } /* * Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr) { sr->epilogue_start = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack(&sr->curr); memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); sr->curr.next = dup_state_stack(ls->saved_state.next); return; } } printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label); } static inline void desc_label_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; ls = alloc_labeled_state(); if (!ls) { printk(KERN_ERR "unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); ls->saved_state.next = dup_state_stack(sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* * General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) { if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & (1UL << qp)) == 0) return 0; sr->pr_mask |= (1UL << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) { enum unw_where where = UNW_WHERE_GR; struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; if (x) where = UNW_WHERE_BR; else if (ytreg & 0x80) where = UNW_WHERE_FR; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = where; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_PSPREL; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = 0x10 - 4*pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_SPREL; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = 4*spoff; } #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \ code); /* * region headers: */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* * prologue descriptors: */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* * body descriptors: */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* * general unwind descriptors: */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.c" /* Unwind scripts. */ static inline unw_hash_index_t hash (unsigned long ip) { /* magic number = ((sqrt(5)-1)/2)*2^64 */ static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL; return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE); } static inline long cache_match (struct unw_script *script, unsigned long ip, unsigned long pr) { read_lock(&script->lock); if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0) /* keep the read lock... */ return 1; read_unlock(&script->lock); return 0; } static inline struct unw_script * script_lookup (struct unw_frame_info *info) { struct unw_script *script = unw.cache + info->hint; unsigned short index; unsigned long ip, pr; if (UNW_DEBUG_ON(0)) return NULL; /* Always regenerate scripts in debug mode */ STAT(++unw.stat.cache.lookups); ip = info->ip; pr = info->pr; if (cache_match(script, ip, pr)) { STAT(++unw.stat.cache.hinted_hits); return script; } index = unw.hash[hash(ip)]; if (index >= UNW_CACHE_SIZE) return NULL; script = unw.cache + index; while (1) { if (cache_match(script, ip, pr)) { /* update hint; no locking required as single-word writes are atomic */ STAT(++unw.stat.cache.normal_hits); unw.cache[info->prev_script].hint = script - unw.cache; return script; } if (script->coll_chain >= UNW_HASH_SIZE) return NULL; script = unw.cache + script->coll_chain; STAT(++unw.stat.cache.collision_chain_traversals); } } /* * On returning, a write lock for the SCRIPT is still being held. */ static inline struct unw_script * script_new (unsigned long ip) { struct unw_script *script, *prev, *tmp; unw_hash_index_t index; unsigned short head; STAT(++unw.stat.script.news); /* * Can't (easily) use cmpxchg() here because of ABA problem * that is intrinsic in cmpxchg()... */ head = unw.lru_head; script = unw.cache + head; unw.lru_head = script->lru_chain; /* * We'd deadlock here if we interrupted a thread that is holding a read lock on * script->lock. Thus, if the write_trylock() fails, we simply bail out. The * alternative would be to disable interrupts whenever we hold a read-lock, but * that seems silly. */ if (!write_trylock(&script->lock)) return NULL; /* re-insert script at the tail of the LRU chain: */ unw.cache[unw.lru_tail].lru_chain = head; unw.lru_tail = head; /* remove the old script from the hash table (if it's there): */ if (script->ip) { index = hash(script->ip); tmp = unw.cache + unw.hash[index]; prev = NULL; while (1) { if (tmp == script) { if (prev) prev->coll_chain = tmp->coll_chain; else unw.hash[index] = tmp->coll_chain; break; } else prev = tmp; if (tmp->coll_chain >= UNW_CACHE_SIZE) /* old script wasn't in the hash-table */ break; tmp = unw.cache + tmp->coll_chain; } } /* enter new script in the hash table */ index = hash(ip); script->coll_chain = unw.hash[index]; unw.hash[index] = script - unw.cache; script->ip = ip; /* set new IP while we're holding the locks */ STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions); script->flags = 0; script->hint = 0; script->count = 0; return script; } static void script_finalize (struct unw_script *script, struct unw_state_record *sr) { script->pr_mask = sr->pr_mask; script->pr_val = sr->pr_val; /* * We could down-grade our write-lock on script->lock here but * the rwlock API doesn't offer atomic lock downgrading, so * we'll just keep the write-lock and release it later when * we're done using the script. */ } static inline void script_emit (struct unw_script *script, struct unw_insn insn) { if (script->count >= UNW_MAX_SCRIPT_LEN) { UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", __func__, UNW_MAX_SCRIPT_LEN); return; } script->insn[script->count++] = insn; } static inline void emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; struct unw_insn insn; unsigned long val = 0; switch (r->where) { case UNW_WHERE_GR: if (r->val >= 32) { /* register got spilled to a stacked register */ opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_REGSTK; } else /* register got spilled to a scratch register */ opc = UNW_INSN_SETNAT_MEMSTK; break; case UNW_WHERE_FR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_VAL; break; case UNW_WHERE_BR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_NONE; break; case UNW_WHERE_PSPREL: case UNW_WHERE_SPREL: opc = UNW_INSN_SETNAT_MEMSTK; break; default: UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", __func__, r->where); return; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); } static void compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else if (rval == 0) { opc = UNW_INSN_MOVE_CONST; val = 0; } else { /* register got spilled to a scratch register */ opc = UNW_INSN_MOVE_SCRATCH; val = pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval <= 11) val = offsetof(struct pt_regs, f6) + 16*(rval - 6); else UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", __func__, rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval == 0) val = offsetof(struct pt_regs, b0); else if (rval == 6) val = offsetof(struct pt_regs, b6); else val = offsetof(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", __func__, i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } static inline const struct unw_table_entry * lookup (struct unw_table *table, unsigned long rel_ip) { const struct unw_table_entry *e = NULL; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table->length; lo < hi; ) { mid = (lo + hi) / 2; e = &table->array[mid]; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } /* * Build an unwind script that unwinds from state OLD_STATE to the * entrypoint of the function that called OLD_STATE. */ static inline struct unw_script * build_script (struct unw_frame_info *info) { const struct unw_table_entry *e = NULL; struct unw_script *script = NULL; struct unw_labeled_state *ls, *next; unsigned long ip = info->ip; struct unw_state_record sr; struct unw_table *table, *prev; struct unw_reg_info *r; struct unw_insn insn; u8 *dp, *desc_end; u64 hdr; int i; STAT(unsigned long start, parse_start;) STAT(++unw.stat.script.builds; start = ia64_get_itc()); /* build state record */ memset(&sr, 0, sizeof(sr)); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) r->when = UNW_WHEN_NEVER; sr.pr_val = info->pr; UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip); script = script_new(ip); if (!script) { UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return NULL; } unw.cache[info->prev_script].hint = script - unw.cache; /* search the kernels and the modules' unwind tables for IP: */ STAT(parse_start = ia64_get_itc()); prev = NULL; for (table = unw.tables; table; table = table->next) { if (ip >= table->start && ip < table->end) { /* * Leave the kernel unwind table at the very front, * lest moving it breaks some assumption elsewhere. * Otherwise, move the matching table to the second * position in the list so that traversals can benefit * from commonality in backtrace paths. */ if (prev && prev != unw.tables) { /* unw is safe - we're already spinlocked */ prev->next = table->next; table->next = unw.tables->next; unw.tables->next = table; } e = lookup(table, ip - table->segment_base); break; } prev = table; } if (!e) { /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", __func__, ip, unw.cache[info->prev_script].ip); sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = 0; compile_reg(&sr, UNW_REG_RP, script); script_finalize(script, &sr); STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return script; } sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 + (ip & 0xfUL)); hdr = *(u64 *) (table->segment_base + e->info_offset); dp = (u8 *) (table->segment_base + e->info_offset + 8); desc_end = dp + 8*UNW_LENGTH(hdr); while (!sr.done && dp < desc_end) dp = unw_decode(dp, sr.in_body, &sr); if (sr.when_target > sr.epilogue_start) { /* * sp has been restored and all values on the memory stack below * psp also have been restored. */ sr.curr.reg[UNW_REG_PSP].val = 0; sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER; for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10) || r->where == UNW_WHERE_SPREL) { r->val = 0; r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; } } script->flags = sr.flags; /* * If RP did't get saved, generate entry for the return link * register. */ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", __func__, ip, sr.curr.reg[UNW_REG_RP].where, sr.curr.reg[UNW_REG_RP].val); } #ifdef UNW_DEBUG UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", __func__, table->segment_base + e->start_offset, sr.when_target); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); switch (r->where) { case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break; case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break; case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break; case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break; case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break; case UNW_WHERE_NONE: UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val); break; default: UNW_DPRINT(1, "BADWHERE(%d)", r->where); break; } UNW_DPRINT(1, "\t\t%d\n", r->when); } } #endif STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); /* translate state record into unwinder instructions: */ /* * First, set psp if we're dealing with a fixed-size frame; * subsequent instructions may depend on this value. */ if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE) && sr.curr.reg[UNW_REG_PSP].val != 0) { /* new psp is sp plus frame size */ insn.opc = UNW_INSN_ADD; insn.dst = offsetof(struct unw_frame_info, psp)/8; insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */ script_emit(script, insn); } /* determine where the primary UNaT is: */ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) i = UNW_REG_PRI_UNAT_GR; else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else i = UNW_REG_PRI_UNAT_GR; compile_reg(&sr, i, script); for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i) compile_reg(&sr, i, script); /* free labeled register states & stack: */ STAT(parse_start = ia64_get_itc()); for (ls = sr.labeled_states; ls; ls = next) { next = ls->next; free_state_stack(&ls->saved_state); free_labeled_state(ls); } free_state_stack(&sr.curr); STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); script_finalize(script, &sr); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return script; } /* * Apply the unwinding actions represented by OPS and update SR to * reflect the state that existed upon entry to the function that this * unwinder represents. */ static inline void run_script (struct unw_script *script, struct unw_frame_info *state) { struct unw_insn *ip, *limit, next_insn; unsigned long opc, dst, val, off; unsigned long *s = (unsigned long *) state; STAT(unsigned long start;) STAT(++unw.stat.script.runs; start = ia64_get_itc()); state->flags = script->flags; ip = script->insn; limit = script->insn + script->count; next_insn = *ip; while (ip++ < limit) { opc = next_insn.opc; dst = next_insn.dst; val = next_insn.val; next_insn = *ip; redo: switch (opc) { case UNW_INSN_ADD: s[dst] += val; break; case UNW_INSN_MOVE2: if (!s[val]) goto lazy_init; s[dst+1] = s[val+1]; s[dst] = s[val]; break; case UNW_INSN_MOVE: if (!s[val]) goto lazy_init; s[dst] = s[val]; break; case UNW_INSN_MOVE_SCRATCH: if (state->pt) { s[dst] = (unsigned long) get_scratch_regs(state) + val; } else { s[dst] = 0; UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", __func__, dst, val); } break; case UNW_INSN_MOVE_CONST: if (val == 0) s[dst] = (unsigned long) &unw.r0; else { s[dst] = 0; UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", __func__, val); } break; case UNW_INSN_MOVE_STACKED: s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp, val); break; case UNW_INSN_ADD_PSP: s[dst] = state->psp + val; break; case UNW_INSN_ADD_SP: s[dst] = state->sp + val; break; case UNW_INSN_SETNAT_MEMSTK: if (!state->pri_unat_loc) state->pri_unat_loc = &state->sw->caller_unat; /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */ s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK; break; case UNW_INSN_SETNAT_TYPE: s[dst+1] = val; break; case UNW_INSN_LOAD: #ifdef UNW_DEBUG if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0 || s[val] < TASK_SIZE) { UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", __func__, s[val]); break; } #endif s[dst] = *(unsigned long *) s[val]; break; } } STAT(unw.stat.script.run_time += ia64_get_itc() - start); return; lazy_init: off = unw.sw_off[val]; s[val] = (unsigned long) state->sw + off; if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7)) /* * We're initializing a general register: init NaT info, too. Note that * the offset is a multiple of 8 which gives us the 3 bits needed for * the type field. */ s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK; goto redo; } static int find_save_locs (struct unw_frame_info *info) { int have_write_lock = 0; struct unw_script *scr; unsigned long flags = 0; if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { /* don't let obviously bad addresses pollute the cache */ /* FIXME: should really be level 0 but it occurs too often. KAO */ UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip); info->rp_loc = NULL; return -1; } scr = script_lookup(info); if (!scr) { spin_lock_irqsave(&unw.lock, flags); scr = build_script(info); if (!scr) { spin_unlock_irqrestore(&unw.lock, flags); UNW_DPRINT(0, "unwind.%s: failed to locate/build unwind script for ip %lx\n", __func__, info->ip); return -1; } have_write_lock = 1; } info->hint = scr->hint; info->prev_script = scr - unw.cache; run_script(scr, info); if (have_write_lock) { write_unlock(&scr->lock); spin_unlock_irqrestore(&unw.lock, flags); } else read_unlock(&scr->lock); return 0; } static int unw_valid(const struct unw_frame_info *info, unsigned long* p) { unsigned long loc = (unsigned long)p; return (loc >= info->regstk.limit && loc < info->regstk.top) || (loc >= info->memstk.top && loc < info->memstk.limit); } int unw_unwind (struct unw_frame_info *info) { unsigned long prev_ip, prev_sp, prev_bsp; unsigned long ip, pr, num_regs; STAT(unsigned long start, flags;) int retval; STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc()); prev_ip = info->ip; prev_sp = info->sp; prev_bsp = info->bsp; /* validate the return IP pointer */ if (!unw_valid(info, info->rp_loc)) { /* FIXME: should really be level 0 but it occurs too often. KAO */ UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", __func__, info->ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the ip */ ip = info->ip = *info->rp_loc; if (ip < GATE_ADDR) { UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* validate the previous stack frame pointer */ if (!unw_valid(info, info->pfs_loc)) { UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the cfm: */ info->cfm_loc = info->pfs_loc; /* restore the bsp: */ pr = info->pr; num_regs = 0; if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) { info->pt = info->sp + 16; if ((pr & (1UL << PRED_NON_SYSCALL)) != 0) num_regs = *info->cfm_loc & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt); } else num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", __func__, info->bsp, info->regstk.limit, info->regstk.top); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the sp: */ info->sp = info->psp; if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", __func__, info->sp, info->memstk.top, info->memstk.limit); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", __func__, ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* as we unwind, the saved ar.unat becomes the primary unat: */ info->pri_unat_loc = info->unat_loc; /* finally, restore the predicates: */ unw_get_pr(info, &info->pr); retval = find_save_locs(info); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return retval; } EXPORT_SYMBOL(unw_unwind); int unw_unwind_to_user (struct unw_frame_info *info) { unsigned long ip, sp, pr = info->pr; do { unw_get_sp(info, &sp); if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) < IA64_PT_REGS_SIZE) { UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", __func__); break; } if (unw_is_intr_frame(info) && (pr & (1UL << PRED_USER_STACK))) return 0; if (unw_get_pr (info, &pr) < 0) { unw_get_rp(info, &ip); UNW_DPRINT(0, "unwind.%s: failed to read " "predicate register (ip=0x%lx)\n", __func__, ip); return -1; } } while (unw_unwind(info) >= 0); unw_get_ip(info, &ip); UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __func__, ip); return -1; } EXPORT_SYMBOL(unw_unwind_to_user); static void init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw, unsigned long stktop) { unsigned long rbslimit, rbstop, stklimit; STAT(unsigned long start, flags;) STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc()); /* * Subtle stuff here: we _could_ unwind through the switch_stack frame but we * don't want to do that because it would be slow as each preserved register would * have to be processed. Instead, what we do here is zero out the frame info and * start the unwind process at the function that created the switch_stack frame. * When a preserved value in switch_stack needs to be accessed, run_script() will * initialize the appropriate pointer on demand. */ memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; stklimit = (unsigned long) t + IA64_STK_OFFSET; rbstop = sw->ar_bspstore; if (rbstop > stklimit || rbstop < rbslimit) rbstop = rbslimit; if (stktop <= rbstop) stktop = rbstop; if (stktop > stklimit) stktop = stklimit; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = t; info->sw = sw; info->sp = info->psp = stktop; info->pr = sw->pr; UNW_DPRINT(3, "unwind.%s:\n" " task 0x%lx\n" " rbs = [0x%lx-0x%lx)\n" " stk = [0x%lx-0x%lx)\n" " pr 0x%lx\n" " sw 0x%lx\n" " sp 0x%lx\n", __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, info->pr, (unsigned long) info->sw, info->sp); STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); } void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) { unsigned long sol; init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16); info->cfm_loc = &sw->ar_pfs; sol = (*info->cfm_loc >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); info->ip = sw->b0; UNW_DPRINT(3, "unwind.%s:\n" " bsp 0x%lx\n" " sol 0x%lx\n" " ip 0x%lx\n", __func__, info->bsp, sol, info->ip); find_save_locs(info); } EXPORT_SYMBOL(unw_init_frame_info); void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t) { struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); UNW_DPRINT(1, "unwind.%s\n", __func__); unw_init_frame_info(info, t, sw); } EXPORT_SYMBOL(unw_init_from_blocked_task); static void init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end) { const struct unw_table_entry *start = table_start, *end = table_end; table->name = name; table->segment_base = segment_base; table->gp = gp; table->start = segment_base + start[0].start_offset; table->end = segment_base + end[-1].end_offset; table->array = start; table->length = end - start; } void * unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end) { const struct unw_table_entry *start = table_start, *end = table_end; struct unw_table *table; unsigned long flags; if (end - start <= 0) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", __func__); return NULL; } table = kmalloc(sizeof(*table), GFP_USER); if (!table) return NULL; init_unwind_table(table, name, segment_base, gp, table_start, table_end); spin_lock_irqsave(&unw.lock, flags); { /* keep kernel unwind table at the front (it's searched most commonly): */ table->next = unw.tables->next; unw.tables->next = table; } spin_unlock_irqrestore(&unw.lock, flags); return table; } void unw_remove_unwind_table (void *handle) { struct unw_table *table, *prev; struct unw_script *tmp; unsigned long flags; long index; if (!handle) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", __func__); return; } table = handle; if (table == &unw.kernel_table) { UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " "no-can-do!\n", __func__); return; } spin_lock_irqsave(&unw.lock, flags); { /* first, delete the table: */ for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next) if (prev->next == table) break; if (!prev) { UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", __func__, (void *) table); spin_unlock_irqrestore(&unw.lock, flags); return; } prev->next = table->next; } spin_unlock_irqrestore(&unw.lock, flags); /* next, remove hash table entries for this table */ for (index = 0; index < UNW_HASH_SIZE; ++index) { tmp = unw.cache + unw.hash[index]; if (unw.hash[index] >= UNW_CACHE_SIZE || tmp->ip < table->start || tmp->ip >= table->end) continue; write_lock(&tmp->lock); { if (tmp->ip >= table->start && tmp->ip < table->end) { unw.hash[index] = tmp->coll_chain; tmp->ip = 0; } } write_unlock(&tmp->lock); } kfree(table); } static int __init create_gate_table (void) { const struct unw_table_entry *entry, *start, *end; unsigned long *lp, segbase = GATE_ADDR; size_t info_size, size; char *info; Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr) if (phdr->p_type == PT_IA_64_UNWIND) { punw = phdr; break; } if (!punw) { printk("%s: failed to find gate DSO's unwind table!\n", __func__); return 0; } start = (const struct unw_table_entry *) punw->p_vaddr; end = (struct unw_table_entry *) ((char *) start + punw->p_memsz); size = 0; unw_add_unwind_table("linux-gate.so", segbase, 0, start, end); for (entry = start; entry < end; ++entry) size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); size += 8; /* reserve space for "end of table" marker */ unw.gate_table = kmalloc(size, GFP_KERNEL); if (!unw.gate_table) { unw.gate_table_size = 0; printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__); return 0; } unw.gate_table_size = size; lp = unw.gate_table; info = (char *) unw.gate_table + size; for (entry = start; entry < end; ++entry, lp += 3) { info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); info -= info_size; memcpy(info, (char *) segbase + entry->info_offset, info_size); lp[0] = segbase + entry->start_offset; /* start */ lp[1] = segbase + entry->end_offset; /* end */ lp[2] = info - (char *) unw.gate_table; /* info */ } *lp = 0; /* end-of-table marker */ return 0; } __initcall(create_gate_table); void __init unw_init (void) { extern char __gp[]; extern void unw_hash_index_t_is_too_narrow (void); long i, off; if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE) unw_hash_index_t_is_too_narrow(); unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT); unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE); unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS); unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0); unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT); unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR); unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC); unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR); for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16) unw.sw_off[unw.preg_index[i]] = off; for (i = 0; i < UNW_CACHE_SIZE; ++i) { if (i > 0) unw.cache[i].lru_chain = (i - 1); unw.cache[i].coll_chain = -1; rwlock_init(&unw.cache[i].lock); } unw.lru_head = UNW_CACHE_SIZE - 1; unw.lru_tail = 0; init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp, __start_unwind, __end_unwind); } /* * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED * * This system call has been deprecated. The new and improved way to get * at the kernel's unwind info is via the gate DSO. The address of the * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR. * * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED * * This system call copies the unwind data into the buffer pointed to by BUF and returns * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data * or if BUF is NULL, nothing is copied, but the system call still returns the size of the * unwind data. * * The first portion of the unwind data contains an unwind table and rest contains the * associated unwind info (in no particular order). The unwind table consists of a table * of entries of the form: * * u64 start; (64-bit address of start of function) * u64 end; (64-bit address of start of function) * u64 info; (BUF-relative offset to unwind info) * * The end of the unwind table is indicated by an entry with a START address of zero. * * Please see the IA-64 Software Conventions and Runtime Architecture manual for details * on the format of the unwind info. * * ERRORS * EFAULT BUF points outside your accessible address space. */ asmlinkage long sys_getunwind (void __user *buf, size_t buf_size) { if (buf && buf_size >= unw.gate_table_size) if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0) return -EFAULT; return unw.gate_table_size; }
gpl-2.0
talnoah/Leaping_Lemur-AOSP
drivers/spi/spidev.c
3402
19723
/* * Simple synchronous userspace interface to SPI devices * * Copyright (C) 2006 SWAPP * Andrea Paterniani <a.paterniani@swapp-eng.it> * Copyright (C) 2007 David Brownell (simplification, cleanup) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/ioctl.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/err.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/compat.h> #include <linux/spi/spi.h> #include <linux/spi/spidev.h> #include <asm/uaccess.h> /* * This supports access to SPI devices using normal userspace I/O calls. * Note that while traditional UNIX/POSIX I/O semantics are half duplex, * and often mask message boundaries, full SPI support requires full duplex * transfers. There are several kinds of internal message boundaries to * handle chipselect management and other protocol options. * * SPI has a character major number assigned. We allocate minor numbers * dynamically using a bitmask. You must use hotplug tools, such as udev * (or mdev with busybox) to create and destroy the /dev/spidevB.C device * nodes, since there is no fixed association of minor numbers with any * particular SPI bus or device. */ #define SPIDEV_MAJOR 153 /* assigned */ #define N_SPI_MINORS 32 /* ... up to 256 */ static DECLARE_BITMAP(minors, N_SPI_MINORS); /* Bit masks for spi_device.mode management. Note that incorrect * settings for some settings can cause *lots* of trouble for other * devices on a shared bus: * * - CS_HIGH ... this device will be active when it shouldn't be * - 3WIRE ... when active, it won't behave as it should * - NO_CS ... there will be no explicit message boundaries; this * is completely incompatible with the shared bus model * - READY ... transfers may proceed when they shouldn't. * * REVISIT should changing those flags be privileged? */ #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ | SPI_NO_CS | SPI_READY) struct spidev_data { dev_t devt; spinlock_t spi_lock; struct spi_device *spi; struct list_head device_entry; /* buffer is NULL unless this device is open (users > 0) */ struct mutex buf_lock; unsigned users; u8 *buffer; u8 *bufferrx; }; static LIST_HEAD(device_list); static DEFINE_MUTEX(device_list_lock); static unsigned bufsiz = 4096; module_param(bufsiz, uint, S_IRUGO); MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); /* * This can be used for testing the controller, given the busnum and the * cs required to use. If those parameters are used, spidev is * dynamically added as device on the busnum, and messages can be sent * via this interface. */ static int busnum = -1; module_param(busnum, int, S_IRUGO); MODULE_PARM_DESC(busnum, "bus num of the controller"); static int chipselect = -1; module_param(chipselect, int, S_IRUGO); MODULE_PARM_DESC(chipselect, "chip select of the desired device"); static int maxspeed = 10000000; module_param(maxspeed, int, S_IRUGO); MODULE_PARM_DESC(maxspeed, "max_speed of the desired device"); static int spimode = SPI_MODE_3; module_param(spimode, int, S_IRUGO); MODULE_PARM_DESC(spimode, "mode of the desired device"); static struct spi_device *spi; /*-------------------------------------------------------------------------*/ /* * We can't use the standard synchronous wrappers for file I/O; we * need to protect against async removal of the underlying spi_device. */ static void spidev_complete(void *arg) { complete(arg); } static ssize_t spidev_sync(struct spidev_data *spidev, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); int status; message->complete = spidev_complete; message->context = &done; spin_lock_irq(&spidev->spi_lock); if (spidev->spi == NULL) status = -ESHUTDOWN; else status = spi_async(spidev->spi, message); spin_unlock_irq(&spidev->spi_lock); if (status == 0) { wait_for_completion(&done); status = message->status; if (status == 0) status = message->actual_length; } return status; } static inline ssize_t spidev_sync_write(struct spidev_data *spidev, size_t len) { struct spi_transfer t = { .tx_buf = spidev->buffer, .len = len, }; struct spi_message m; spi_message_init(&m); spi_message_add_tail(&t, &m); return spidev_sync(spidev, &m); } static inline ssize_t spidev_sync_read(struct spidev_data *spidev, size_t len) { struct spi_transfer t = { .rx_buf = spidev->buffer, .len = len, }; struct spi_message m; spi_message_init(&m); spi_message_add_tail(&t, &m); return spidev_sync(spidev, &m); } /*-------------------------------------------------------------------------*/ /* Read-only message with current device setup */ static ssize_t spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct spidev_data *spidev; ssize_t status = 0; /* chipselect only toggles at start or end of operation */ if (count > bufsiz) return -EMSGSIZE; spidev = filp->private_data; mutex_lock(&spidev->buf_lock); status = spidev_sync_read(spidev, count); if (status > 0) { unsigned long missing; missing = copy_to_user(buf, spidev->buffer, status); if (missing == status) status = -EFAULT; else status = status - missing; } mutex_unlock(&spidev->buf_lock); return status; } /* Write-only message with current device setup */ static ssize_t spidev_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct spidev_data *spidev; ssize_t status = 0; unsigned long missing; /* chipselect only toggles at start or end of operation */ if (count > bufsiz) return -EMSGSIZE; spidev = filp->private_data; mutex_lock(&spidev->buf_lock); missing = copy_from_user(spidev->buffer, buf, count); if (missing == 0) { status = spidev_sync_write(spidev, count); } else status = -EFAULT; mutex_unlock(&spidev->buf_lock); return status; } static int spidev_message(struct spidev_data *spidev, struct spi_ioc_transfer *u_xfers, unsigned n_xfers) { struct spi_message msg; struct spi_transfer *k_xfers; struct spi_transfer *k_tmp; struct spi_ioc_transfer *u_tmp; unsigned n, total; u8 *buf, *bufrx; int status = -EFAULT; spi_message_init(&msg); k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); if (k_xfers == NULL) return -ENOMEM; /* Construct spi_message, copying any tx data to bounce buffer. * We walk the array of user-provided transfers, using each one * to initialize a kernel version of the same transfer. */ buf = spidev->buffer; bufrx = spidev->bufferrx; total = 0; for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) { k_tmp->len = u_tmp->len; total += k_tmp->len; if (total > bufsiz) { status = -EMSGSIZE; goto done; } if (u_tmp->rx_buf) { k_tmp->rx_buf = bufrx; if (!access_ok(VERIFY_WRITE, (u8 __user *) (uintptr_t) u_tmp->rx_buf, u_tmp->len)) goto done; } if (u_tmp->tx_buf) { k_tmp->tx_buf = buf; if (copy_from_user(buf, (const u8 __user *) (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; } buf += k_tmp->len; bufrx += k_tmp->len; k_tmp->cs_change = !!u_tmp->cs_change; k_tmp->bits_per_word = u_tmp->bits_per_word; k_tmp->delay_usecs = u_tmp->delay_usecs; k_tmp->speed_hz = u_tmp->speed_hz; #ifdef VERBOSE dev_dbg(&spidev->spi->dev, " xfer len %zd %s%s%s%dbits %u usec %uHz\n", u_tmp->len, u_tmp->rx_buf ? "rx " : "", u_tmp->tx_buf ? "tx " : "", u_tmp->cs_change ? "cs " : "", u_tmp->bits_per_word ? : spidev->spi->bits_per_word, u_tmp->delay_usecs, u_tmp->speed_hz ? : spidev->spi->max_speed_hz); #endif spi_message_add_tail(k_tmp, &msg); } status = spidev_sync(spidev, &msg); if (status < 0) goto done; /* copy any rx data out of bounce buffer */ buf = spidev->bufferrx; for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { if (u_tmp->rx_buf) { if (__copy_to_user((u8 __user *) (uintptr_t) u_tmp->rx_buf, buf, u_tmp->len)) { status = -EFAULT; goto done; } } buf += u_tmp->len; } status = total; done: kfree(k_xfers); return status; } static long spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0; int retval = 0; struct spidev_data *spidev; struct spi_device *spi; u32 tmp; unsigned n_ioc; struct spi_ioc_transfer *ioc; /* Check type and command number */ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) return -ENOTTY; /* Check access direction once here; don't repeat below. * IOC_DIR is from the user perspective, while access_ok is * from the kernel perspective; so they look reversed. */ if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; /* guard against device removal before, or while, * we issue this ioctl. */ spidev = filp->private_data; spin_lock_irq(&spidev->spi_lock); spi = spi_dev_get(spidev->spi); spin_unlock_irq(&spidev->spi_lock); if (spi == NULL) return -ESHUTDOWN; /* use the buffer lock here for triple duty: * - prevent I/O (from us) so calling spi_setup() is safe; * - prevent concurrent SPI_IOC_WR_* from morphing * data fields while SPI_IOC_RD_* reads them; * - SPI_IOC_MESSAGE needs the buffer locked "normally". */ mutex_lock(&spidev->buf_lock); switch (cmd) { /* read requests */ case SPI_IOC_RD_MODE: retval = __put_user(spi->mode & SPI_MODE_MASK, (__u8 __user *)arg); break; case SPI_IOC_RD_LSB_FIRST: retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, (__u8 __user *)arg); break; case SPI_IOC_RD_BITS_PER_WORD: retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); break; case SPI_IOC_RD_MAX_SPEED_HZ: retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg); break; /* write requests */ case SPI_IOC_WR_MODE: retval = __get_user(tmp, (u8 __user *)arg); if (retval == 0) { u8 save = spi->mode; if (tmp & ~SPI_MODE_MASK) { retval = -EINVAL; break; } tmp |= spi->mode & ~SPI_MODE_MASK; spi->mode = (u8)tmp; retval = spi_setup(spi); if (retval < 0) spi->mode = save; else dev_dbg(&spi->dev, "spi mode %02x\n", tmp); } break; case SPI_IOC_WR_LSB_FIRST: retval = __get_user(tmp, (__u8 __user *)arg); if (retval == 0) { u8 save = spi->mode; if (tmp) spi->mode |= SPI_LSB_FIRST; else spi->mode &= ~SPI_LSB_FIRST; retval = spi_setup(spi); if (retval < 0) spi->mode = save; else dev_dbg(&spi->dev, "%csb first\n", tmp ? 'l' : 'm'); } break; case SPI_IOC_WR_BITS_PER_WORD: retval = __get_user(tmp, (__u8 __user *)arg); if (retval == 0) { u8 save = spi->bits_per_word; spi->bits_per_word = tmp; retval = spi_setup(spi); if (retval < 0) spi->bits_per_word = save; else dev_dbg(&spi->dev, "%d bits per word\n", tmp); } break; case SPI_IOC_WR_MAX_SPEED_HZ: retval = __get_user(tmp, (__u32 __user *)arg); if (retval == 0) { u32 save = spi->max_speed_hz; spi->max_speed_hz = tmp; retval = spi_setup(spi); if (retval < 0) spi->max_speed_hz = save; else dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); } break; default: /* segmented and/or full-duplex I/O request */ if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) || _IOC_DIR(cmd) != _IOC_WRITE) { retval = -ENOTTY; break; } tmp = _IOC_SIZE(cmd); if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { retval = -EINVAL; break; } n_ioc = tmp / sizeof(struct spi_ioc_transfer); if (n_ioc == 0) break; /* copy into scratch area */ ioc = kmalloc(tmp, GFP_KERNEL); if (!ioc) { retval = -ENOMEM; break; } if (__copy_from_user(ioc, (void __user *)arg, tmp)) { kfree(ioc); retval = -EFAULT; break; } /* translate to spi_message, execute */ retval = spidev_message(spidev, ioc, n_ioc); kfree(ioc); break; } mutex_unlock(&spidev->buf_lock); spi_dev_put(spi); return retval; } #ifdef CONFIG_COMPAT static long spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #else #define spidev_compat_ioctl NULL #endif /* CONFIG_COMPAT */ static int spidev_open(struct inode *inode, struct file *filp) { struct spidev_data *spidev; int status = -ENXIO; mutex_lock(&device_list_lock); list_for_each_entry(spidev, &device_list, device_entry) { if (spidev->devt == inode->i_rdev) { status = 0; break; } } if (status == 0) { if (!spidev->buffer) { spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->buffer) { dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; } } if (!spidev->bufferrx) { spidev->bufferrx = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->bufferrx) { dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); kfree(spidev->buffer); spidev->buffer = NULL; status = -ENOMEM; } } if (status == 0) { spidev->users++; filp->private_data = spidev; nonseekable_open(inode, filp); } } else pr_debug("spidev: nothing for minor %d\n", iminor(inode)); mutex_unlock(&device_list_lock); return status; } static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; int status = 0; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; /* last close? */ spidev->users--; if (!spidev->users) { int dofree; kfree(spidev->buffer); spidev->buffer = NULL; kfree(spidev->bufferrx); spidev->bufferrx = NULL; /* ... after we unbound from the underlying device? */ spin_lock_irq(&spidev->spi_lock); dofree = (spidev->spi == NULL); spin_unlock_irq(&spidev->spi_lock); if (dofree) kfree(spidev); } mutex_unlock(&device_list_lock); return status; } static const struct file_operations spidev_fops = { .owner = THIS_MODULE, /* REVISIT switch to aio primitives, so that userspace * gets more complete API coverage. It'll simplify things * too, except for the locking. */ .write = spidev_write, .read = spidev_read, .unlocked_ioctl = spidev_ioctl, .compat_ioctl = spidev_compat_ioctl, .open = spidev_open, .release = spidev_release, .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ /* The main reason to have this class is to make mdev/udev create the * /dev/spidevB.C character device nodes exposing our userspace API. * It also simplifies memory management. */ static struct class *spidev_class; /*-------------------------------------------------------------------------*/ static int __devinit spidev_probe(struct spi_device *spi) { struct spidev_data *spidev; int status; unsigned long minor; /* Allocate driver data */ spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); if (!spidev) return -ENOMEM; /* Initialize the driver data */ spidev->spi = spi; spin_lock_init(&spidev->spi_lock); mutex_init(&spidev->buf_lock); INIT_LIST_HEAD(&spidev->device_entry); /* If we can allocate a minor number, hook up this device. * Reusing minors is fine so long as udev or mdev is working. */ mutex_lock(&device_list_lock); minor = find_first_zero_bit(minors, N_SPI_MINORS); if (minor < N_SPI_MINORS) { struct device *dev; spidev->devt = MKDEV(SPIDEV_MAJOR, minor); dev = device_create(spidev_class, &spi->dev, spidev->devt, spidev, "spidev%d.%d", spi->master->bus_num, spi->chip_select); status = IS_ERR(dev) ? PTR_ERR(dev) : 0; } else { dev_dbg(&spi->dev, "no minor number available!\n"); status = -ENODEV; } if (status == 0) { set_bit(minor, minors); list_add(&spidev->device_entry, &device_list); } mutex_unlock(&device_list_lock); if (status == 0) spi_set_drvdata(spi, spidev); else kfree(spidev); return status; } static int __devexit spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spi_set_drvdata(spi, NULL); spin_unlock_irq(&spidev->spi_lock); /* prevent new opens */ mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); if (spidev->users == 0) kfree(spidev); mutex_unlock(&device_list_lock); return 0; } static struct spi_driver spidev_spi_driver = { .driver = { .name = "spidev", .owner = THIS_MODULE, }, .probe = spidev_probe, .remove = __devexit_p(spidev_remove), /* NOTE: suspend/resume methods are not necessary here. * We don't do anything except pass the requests to/from * the underlying controller. The refrigerator handles * most issues; the controller driver handles the rest. */ }; /*-------------------------------------------------------------------------*/ static int __init spidev_init(void) { int status; /* Claim our 256 reserved device numbers. Then register a class * that will key udev/mdev to add/remove /dev nodes. Last, register * the driver which manages those device numbers. */ BUILD_BUG_ON(N_SPI_MINORS > 256); status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); if (status < 0) return status; spidev_class = class_create(THIS_MODULE, "spidev"); if (IS_ERR(spidev_class)) { status = PTR_ERR(spidev_class); goto error_class; } status = spi_register_driver(&spidev_spi_driver); if (status < 0) goto error_register; if (busnum != -1 && chipselect != -1) { struct spi_board_info chip = { .modalias = "spidev", .mode = spimode, .bus_num = busnum, .chip_select = chipselect, .max_speed_hz = maxspeed, }; struct spi_master *master; master = spi_busnum_to_master(busnum); if (!master) { status = -ENODEV; goto error_busnum; } /* We create a virtual device that will sit on the bus */ spi = spi_new_device(master, &chip); if (!spi) { status = -EBUSY; goto error_mem; } dev_dbg(&spi->dev, "busnum=%d cs=%d bufsiz=%d maxspeed=%d", busnum, chipselect, bufsiz, maxspeed); } return 0; error_mem: error_busnum: spi_unregister_driver(&spidev_spi_driver); error_register: class_destroy(spidev_class); error_class: unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); return status; } module_init(spidev_init); static void __exit spidev_exit(void) { if (spi) { spi_unregister_device(spi); spi = NULL; } spi_unregister_driver(&spidev_spi_driver); class_destroy(spidev_class); unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); } module_exit(spidev_exit); MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); MODULE_DESCRIPTION("User mode SPI device interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:spidev");
gpl-2.0
Colonel-Corn/kernel_htc_msm8974
drivers/leds/leds-da903x.c
4938
4361
/* * LEDs driver for Dialog Semiconductor DA9030/DA9034 * * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * Copyright (C) 2006-2008 Marvell International Ltd. * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/workqueue.h> #include <linux/mfd/da903x.h> #include <linux/slab.h> #define DA9030_LED1_CONTROL 0x20 #define DA9030_LED2_CONTROL 0x21 #define DA9030_LED3_CONTROL 0x22 #define DA9030_LED4_CONTROL 0x23 #define DA9030_LEDPC_CONTROL 0x24 #define DA9030_MISC_CONTROL_A 0x26 /* Vibrator Control */ #define DA9034_LED1_CONTROL 0x35 #define DA9034_LED2_CONTROL 0x36 #define DA9034_VIBRA 0x40 struct da903x_led { struct led_classdev cdev; struct work_struct work; struct device *master; enum led_brightness new_brightness; int id; int flags; }; #define DA9030_LED_OFFSET(id) ((id) - DA9030_ID_LED_1) #define DA9034_LED_OFFSET(id) ((id) - DA9034_ID_LED_1) static void da903x_led_work(struct work_struct *work) { struct da903x_led *led = container_of(work, struct da903x_led, work); uint8_t val; int offset; switch (led->id) { case DA9030_ID_LED_1: case DA9030_ID_LED_2: case DA9030_ID_LED_3: case DA9030_ID_LED_4: case DA9030_ID_LED_PC: offset = DA9030_LED_OFFSET(led->id); val = led->flags & ~0x87; val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */ val |= (0x7 - (led->new_brightness >> 5)) & 0x7; /* PWM<2:0> */ da903x_write(led->master, DA9030_LED1_CONTROL + offset, val); break; case DA9030_ID_VIBRA: val = led->flags & ~0x80; val |= (led->new_brightness) ? 0x80 : 0; /* EN bit */ da903x_write(led->master, DA9030_MISC_CONTROL_A, val); break; case DA9034_ID_LED_1: case DA9034_ID_LED_2: offset = DA9034_LED_OFFSET(led->id); val = (led->new_brightness * 0x5f / LED_FULL) & 0x7f; val |= (led->flags & DA9034_LED_RAMP) ? 0x80 : 0; da903x_write(led->master, DA9034_LED1_CONTROL + offset, val); break; case DA9034_ID_VIBRA: val = led->new_brightness & 0xfe; da903x_write(led->master, DA9034_VIBRA, val); break; } } static void da903x_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct da903x_led *led; led = container_of(led_cdev, struct da903x_led, cdev); led->new_brightness = value; schedule_work(&led->work); } static int __devinit da903x_led_probe(struct platform_device *pdev) { struct led_info *pdata = pdev->dev.platform_data; struct da903x_led *led; int id, ret; if (pdata == NULL) return 0; id = pdev->id; if (!((id >= DA9030_ID_LED_1 && id <= DA9030_ID_VIBRA) || (id >= DA9034_ID_LED_1 && id <= DA9034_ID_VIBRA))) { dev_err(&pdev->dev, "invalid LED ID (%d) specified\n", id); return -EINVAL; } led = kzalloc(sizeof(struct da903x_led), GFP_KERNEL); if (led == NULL) { dev_err(&pdev->dev, "failed to alloc memory for LED%d\n", id); return -ENOMEM; } led->cdev.name = pdata->name; led->cdev.default_trigger = pdata->default_trigger; led->cdev.brightness_set = da903x_led_set; led->cdev.brightness = LED_OFF; led->id = id; led->flags = pdata->flags; led->master = pdev->dev.parent; led->new_brightness = LED_OFF; INIT_WORK(&led->work, da903x_led_work); ret = led_classdev_register(led->master, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", id); goto err; } platform_set_drvdata(pdev, led); return 0; err: kfree(led); return ret; } static int __devexit da903x_led_remove(struct platform_device *pdev) { struct da903x_led *led = platform_get_drvdata(pdev); led_classdev_unregister(&led->cdev); kfree(led); return 0; } static struct platform_driver da903x_led_driver = { .driver = { .name = "da903x-led", .owner = THIS_MODULE, }, .probe = da903x_led_probe, .remove = __devexit_p(da903x_led_remove), }; module_platform_driver(da903x_led_driver); MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034"); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>" "Mike Rapoport <mike@compulab.co.il>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da903x-led");
gpl-2.0
somcom3x/android_kernel_motorola_msm8226
drivers/media/video/vpx3220.c
7242
15330
/* * vpx3220a, vpx3216b & vpx3214c video decoder driver version 0.0.1 * * Copyright (C) 2001 Laurent Pinchart <lpinchart@freegates.be> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver"); MODULE_AUTHOR("Laurent Pinchart"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define VPX_TIMEOUT_COUNT 10 /* ----------------------------------------------------------------------- */ struct vpx3220 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; unsigned char reg[255]; v4l2_std_id norm; int ident; int input; int enable; }; static inline struct vpx3220 *to_vpx3220(struct v4l2_subdev *sd) { return container_of(sd, struct vpx3220, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct vpx3220, hdl)->sd; } static char *inputs[] = { "internal", "composite", "svideo" }; /* ----------------------------------------------------------------------- */ static inline int vpx3220_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct vpx3220 *decoder = i2c_get_clientdata(client); decoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int vpx3220_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int vpx3220_fp_status(struct v4l2_subdev *sd) { unsigned char status; unsigned int i; for (i = 0; i < VPX_TIMEOUT_COUNT; i++) { status = vpx3220_read(sd, 0x29); if (!(status & 4)) return 0; udelay(10); if (need_resched()) cond_resched(); } return -1; } static int vpx3220_fp_write(struct v4l2_subdev *sd, u8 fpaddr, u16 data) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* Write the 16-bit address to the FPWR register */ if (i2c_smbus_write_word_data(client, 0x27, swab16(fpaddr)) == -1) { v4l2_dbg(1, debug, sd, "%s: failed\n", __func__); return -1; } if (vpx3220_fp_status(sd) < 0) return -1; /* Write the 16-bit data to the FPDAT register */ if (i2c_smbus_write_word_data(client, 0x28, swab16(data)) == -1) { v4l2_dbg(1, debug, sd, "%s: failed\n", __func__); return -1; } return 0; } static u16 vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr) { struct i2c_client *client = v4l2_get_subdevdata(sd); s16 data; /* Write the 16-bit address to the FPRD register */ if (i2c_smbus_write_word_data(client, 0x26, swab16(fpaddr)) == -1) { v4l2_dbg(1, debug, sd, "%s: failed\n", __func__); return -1; } if (vpx3220_fp_status(sd) < 0) return -1; /* Read the 16-bit data from the FPDAT register */ data = i2c_smbus_read_word_data(client, 0x28); if (data == -1) { v4l2_dbg(1, debug, sd, "%s: failed\n", __func__); return -1; } return swab16(data); } static int vpx3220_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len) { u8 reg; int ret = -1; while (len >= 2) { reg = *data++; ret = vpx3220_write(sd, reg, *data++); if (ret < 0) break; len -= 2; } return ret; } static int vpx3220_write_fp_block(struct v4l2_subdev *sd, const u16 *data, unsigned int len) { u8 reg; int ret = 0; while (len > 1) { reg = *data++; ret |= vpx3220_fp_write(sd, reg, *data++); len -= 2; } return ret; } /* ---------------------------------------------------------------------- */ static const unsigned short init_ntsc[] = { 0x1c, 0x00, /* NTSC tint angle */ 0x88, 17, /* Window 1 vertical */ 0x89, 240, /* Vertical lines in */ 0x8a, 240, /* Vertical lines out */ 0x8b, 000, /* Horizontal begin */ 0x8c, 640, /* Horizontal length */ 0x8d, 640, /* Number of pixels */ 0x8f, 0xc00, /* Disable window 2 */ 0xf0, 0x73, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x13, /* NTSC M, composite input */ 0xe7, 0x1e1, /* Enable vertical standard * locking @ 240 lines */ }; static const unsigned short init_pal[] = { 0x88, 23, /* Window 1 vertical begin */ 0x89, 288, /* Vertical lines in (16 lines * skipped by the VFE) */ 0x8a, 288, /* Vertical lines out (16 lines * skipped by the VFE) */ 0x8b, 16, /* Horizontal begin */ 0x8c, 768, /* Horizontal length */ 0x8d, 784, /* Number of pixels * Must be >= Horizontal begin + Horizontal length */ 0x8f, 0xc00, /* Disable window 2 */ 0xf0, 0x77, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x3d1, /* PAL B,G,H,I, composite input */ 0xe7, 0x241, /* PAL/SECAM set to 288 lines */ }; static const unsigned short init_secam[] = { 0x88, 23, /* Window 1 vertical begin */ 0x89, 288, /* Vertical lines in (16 lines * skipped by the VFE) */ 0x8a, 288, /* Vertical lines out (16 lines * skipped by the VFE) */ 0x8b, 16, /* Horizontal begin */ 0x8c, 768, /* Horizontal length */ 0x8d, 784, /* Number of pixels * Must be >= Horizontal begin + Horizontal length */ 0x8f, 0xc00, /* Disable window 2 */ 0xf0, 0x77, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x3d5, /* SECAM, composite input */ 0xe7, 0x241, /* PAL/SECAM set to 288 lines */ }; static const unsigned char init_common[] = { 0xf2, 0x00, /* Disable all outputs */ 0x33, 0x0d, /* Luma : VIN2, Chroma : CIN * (clamp off) */ 0xd8, 0xa8, /* HREF/VREF active high, VREF * pulse = 2, Odd/Even flag */ 0x20, 0x03, /* IF compensation 0dB/oct */ 0xe0, 0xff, /* Open up all comparators */ 0xe1, 0x00, 0xe2, 0x7f, 0xe3, 0x80, 0xe4, 0x7f, 0xe5, 0x80, 0xe6, 0x00, /* Brightness set to 0 */ 0xe7, 0xe0, /* Contrast to 1.0, noise shaping * 10 to 8 2-bit error diffusion */ 0xe8, 0xf8, /* YUV422, CbCr binary offset, * ... (p.32) */ 0xea, 0x18, /* LLC2 connected, output FIFO * reset with VACTintern */ 0xf0, 0x8a, /* Half full level to 10, bus * shuffler [7:0, 23:16, 15:8] */ 0xf1, 0x18, /* Single clock, sync mode, no * FE delay, no HLEN counter */ 0xf8, 0x12, /* Port A, PIXCLK, HF# & FE# * strength to 2 */ 0xf9, 0x24, /* Port B, HREF, VREF, PREF & * ALPHA strength to 4 */ }; static const unsigned short init_fp[] = { 0x59, 0, 0xa0, 2070, /* ACC reference */ 0xa3, 0, 0xa4, 0, 0xa8, 30, 0xb2, 768, 0xbe, 27, 0x58, 0, 0x26, 0, 0x4b, 0x298, /* PLL gain */ }; static int vpx3220_init(struct v4l2_subdev *sd, u32 val) { struct vpx3220 *decoder = to_vpx3220(sd); vpx3220_write_block(sd, init_common, sizeof(init_common)); vpx3220_write_fp_block(sd, init_fp, sizeof(init_fp) >> 1); if (decoder->norm & V4L2_STD_NTSC) vpx3220_write_fp_block(sd, init_ntsc, sizeof(init_ntsc) >> 1); else if (decoder->norm & V4L2_STD_PAL) vpx3220_write_fp_block(sd, init_pal, sizeof(init_pal) >> 1); else if (decoder->norm & V4L2_STD_SECAM) vpx3220_write_fp_block(sd, init_secam, sizeof(init_secam) >> 1); else vpx3220_write_fp_block(sd, init_pal, sizeof(init_pal) >> 1); return 0; } static int vpx3220_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd) { int res = V4L2_IN_ST_NO_SIGNAL, status; v4l2_std_id std = 0; status = vpx3220_fp_read(sd, 0x0f3); v4l2_dbg(1, debug, sd, "status: 0x%04x\n", status); if (status < 0) return status; if ((status & 0x20) == 0) { res = 0; switch (status & 0x18) { case 0x00: case 0x10: case 0x14: case 0x18: std = V4L2_STD_PAL; break; case 0x08: std = V4L2_STD_SECAM; break; case 0x04: case 0x0c: case 0x1c: std = V4L2_STD_NTSC; break; } } if (pstd) *pstd = std; if (pstatus) *pstatus = res; return 0; } static int vpx3220_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { v4l2_dbg(1, debug, sd, "querystd\n"); return vpx3220_status(sd, NULL, std); } static int vpx3220_g_input_status(struct v4l2_subdev *sd, u32 *status) { v4l2_dbg(1, debug, sd, "g_input_status\n"); return vpx3220_status(sd, status, NULL); } static int vpx3220_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct vpx3220 *decoder = to_vpx3220(sd); int temp_input; /* Here we back up the input selection because it gets overwritten when we fill the registers with the chosen video norm */ temp_input = vpx3220_fp_read(sd, 0xf2); v4l2_dbg(1, debug, sd, "s_std %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { vpx3220_write_fp_block(sd, init_ntsc, sizeof(init_ntsc) >> 1); v4l2_dbg(1, debug, sd, "norm switched to NTSC\n"); } else if (std & V4L2_STD_PAL) { vpx3220_write_fp_block(sd, init_pal, sizeof(init_pal) >> 1); v4l2_dbg(1, debug, sd, "norm switched to PAL\n"); } else if (std & V4L2_STD_SECAM) { vpx3220_write_fp_block(sd, init_secam, sizeof(init_secam) >> 1); v4l2_dbg(1, debug, sd, "norm switched to SECAM\n"); } else { return -EINVAL; } decoder->norm = std; /* And here we set the backed up video input again */ vpx3220_fp_write(sd, 0xf2, temp_input | 0x0010); udelay(10); return 0; } static int vpx3220_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { int data; /* RJ: input = 0: ST8 (PCTV) input input = 1: COMPOSITE input input = 2: SVHS input */ const int input_vals[3][2] = { {0x0c, 0}, {0x0d, 0}, {0x0e, 1} }; if (input > 2) return -EINVAL; v4l2_dbg(1, debug, sd, "input switched to %s\n", inputs[input]); vpx3220_write(sd, 0x33, input_vals[input][0]); data = vpx3220_fp_read(sd, 0xf2) & ~(0x0020); if (data < 0) return data; /* 0x0010 is required to latch the setting */ vpx3220_fp_write(sd, 0xf2, data | (input_vals[input][1] << 5) | 0x0010); udelay(10); return 0; } static int vpx3220_s_stream(struct v4l2_subdev *sd, int enable) { v4l2_dbg(1, debug, sd, "s_stream %s\n", enable ? "on" : "off"); vpx3220_write(sd, 0xf2, (enable ? 0x1b : 0x00)); return 0; } static int vpx3220_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: vpx3220_write(sd, 0xe6, ctrl->val); return 0; case V4L2_CID_CONTRAST: /* Bit 7 and 8 is for noise shaping */ vpx3220_write(sd, 0xe7, ctrl->val + 192); return 0; case V4L2_CID_SATURATION: vpx3220_fp_write(sd, 0xa0, ctrl->val); return 0; case V4L2_CID_HUE: vpx3220_fp_write(sd, 0x1c, ctrl->val); return 0; } return -EINVAL; } static int vpx3220_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct vpx3220 *decoder = to_vpx3220(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, decoder->ident, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops vpx3220_ctrl_ops = { .s_ctrl = vpx3220_s_ctrl, }; static const struct v4l2_subdev_core_ops vpx3220_core_ops = { .g_chip_ident = vpx3220_g_chip_ident, .init = vpx3220_init, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .s_std = vpx3220_s_std, }; static const struct v4l2_subdev_video_ops vpx3220_video_ops = { .s_routing = vpx3220_s_routing, .s_stream = vpx3220_s_stream, .querystd = vpx3220_querystd, .g_input_status = vpx3220_g_input_status, }; static const struct v4l2_subdev_ops vpx3220_ops = { .core = &vpx3220_core_ops, .video = &vpx3220_video_ops, }; /* ----------------------------------------------------------------------- * Client management code */ static int vpx3220_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vpx3220 *decoder; struct v4l2_subdev *sd; const char *name = NULL; u8 ver; u16 pn; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; decoder = kzalloc(sizeof(struct vpx3220), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &vpx3220_ops); decoder->norm = V4L2_STD_PAL; decoder->input = 0; decoder->enable = 1; v4l2_ctrl_handler_init(&decoder->hdl, 4); v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops, V4L2_CID_CONTRAST, 0, 63, 1, 32); v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops, V4L2_CID_SATURATION, 0, 4095, 1, 2048); v4l2_ctrl_new_std(&decoder->hdl, &vpx3220_ctrl_ops, V4L2_CID_HUE, -512, 511, 1, 0); sd->ctrl_handler = &decoder->hdl; if (decoder->hdl.error) { int err = decoder->hdl.error; v4l2_ctrl_handler_free(&decoder->hdl); kfree(decoder); return err; } v4l2_ctrl_handler_setup(&decoder->hdl); ver = i2c_smbus_read_byte_data(client, 0x00); pn = (i2c_smbus_read_byte_data(client, 0x02) << 8) + i2c_smbus_read_byte_data(client, 0x01); decoder->ident = V4L2_IDENT_VPX3220A; if (ver == 0xec) { switch (pn) { case 0x4680: name = "vpx3220a"; break; case 0x4260: name = "vpx3216b"; decoder->ident = V4L2_IDENT_VPX3216B; break; case 0x4280: name = "vpx3214c"; decoder->ident = V4L2_IDENT_VPX3214C; break; } } if (name) v4l2_info(sd, "%s found @ 0x%x (%s)\n", name, client->addr << 1, client->adapter->name); else v4l2_info(sd, "chip (%02x:%04x) found @ 0x%x (%s)\n", ver, pn, client->addr << 1, client->adapter->name); vpx3220_write_block(sd, init_common, sizeof(init_common)); vpx3220_write_fp_block(sd, init_fp, sizeof(init_fp) >> 1); /* Default to PAL */ vpx3220_write_fp_block(sd, init_pal, sizeof(init_pal) >> 1); return 0; } static int vpx3220_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct vpx3220 *decoder = to_vpx3220(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&decoder->hdl); kfree(decoder); return 0; } static const struct i2c_device_id vpx3220_id[] = { { "vpx3220a", 0 }, { "vpx3216b", 0 }, { "vpx3214c", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, vpx3220_id); static struct i2c_driver vpx3220_driver = { .driver = { .owner = THIS_MODULE, .name = "vpx3220", }, .probe = vpx3220_probe, .remove = vpx3220_remove, .id_table = vpx3220_id, }; module_i2c_driver(vpx3220_driver);
gpl-2.0
davidmueller13/flo_kernel
drivers/media/video/bt856.c
7242
7118
/* * bt856 - BT856A Digital Video Encoder (Rockwell Part) * * Copyright (C) 1999 Mike Bernson <mike@mlb.org> * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * * Modifications for LML33/DC10plus unified driver * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * * This code was modify/ported from the saa7111 driver written * by Dave Perks. * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (9/9/2002) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Brooktree-856A video encoder driver"); MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ #define BT856_REG_OFFSET 0xDA #define BT856_NR_REG 6 struct bt856 { struct v4l2_subdev sd; unsigned char reg[BT856_NR_REG]; v4l2_std_id norm; }; static inline struct bt856 *to_bt856(struct v4l2_subdev *sd) { return container_of(sd, struct bt856, sd); } /* ----------------------------------------------------------------------- */ static inline int bt856_write(struct bt856 *encoder, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(&encoder->sd); encoder->reg[reg - BT856_REG_OFFSET] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int bt856_setbit(struct bt856 *encoder, u8 reg, u8 bit, u8 value) { return bt856_write(encoder, reg, (encoder->reg[reg - BT856_REG_OFFSET] & ~(1 << bit)) | (value ? (1 << bit) : 0)); } static void bt856_dump(struct bt856 *encoder) { int i; v4l2_info(&encoder->sd, "register dump:\n"); for (i = 0; i < BT856_NR_REG; i += 2) printk(KERN_CONT " %02x", encoder->reg[i]); printk(KERN_CONT "\n"); } /* ----------------------------------------------------------------------- */ static int bt856_init(struct v4l2_subdev *sd, u32 arg) { struct bt856 *encoder = to_bt856(sd); /* This is just for testing!!! */ v4l2_dbg(1, debug, sd, "init\n"); bt856_write(encoder, 0xdc, 0x18); bt856_write(encoder, 0xda, 0); bt856_write(encoder, 0xde, 0); bt856_setbit(encoder, 0xdc, 3, 1); /*bt856_setbit(encoder, 0xdc, 6, 0);*/ bt856_setbit(encoder, 0xdc, 4, 1); if (encoder->norm & V4L2_STD_NTSC) bt856_setbit(encoder, 0xdc, 2, 0); else bt856_setbit(encoder, 0xdc, 2, 1); bt856_setbit(encoder, 0xdc, 1, 1); bt856_setbit(encoder, 0xde, 4, 0); bt856_setbit(encoder, 0xde, 3, 1); if (debug != 0) bt856_dump(encoder); return 0; } static int bt856_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct bt856 *encoder = to_bt856(sd); v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { bt856_setbit(encoder, 0xdc, 2, 0); } else if (std & V4L2_STD_PAL) { bt856_setbit(encoder, 0xdc, 2, 1); bt856_setbit(encoder, 0xda, 0, 0); /*bt856_setbit(encoder, 0xda, 0, 1);*/ } else { return -EINVAL; } encoder->norm = std; if (debug != 0) bt856_dump(encoder); return 0; } static int bt856_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct bt856 *encoder = to_bt856(sd); v4l2_dbg(1, debug, sd, "set input %d\n", input); /* We only have video bus. * input= 0: input is from bt819 * input= 1: input is from ZR36060 */ switch (input) { case 0: bt856_setbit(encoder, 0xde, 4, 0); bt856_setbit(encoder, 0xde, 3, 1); bt856_setbit(encoder, 0xdc, 3, 1); bt856_setbit(encoder, 0xdc, 6, 0); break; case 1: bt856_setbit(encoder, 0xde, 4, 0); bt856_setbit(encoder, 0xde, 3, 1); bt856_setbit(encoder, 0xdc, 3, 1); bt856_setbit(encoder, 0xdc, 6, 1); break; case 2: /* Color bar */ bt856_setbit(encoder, 0xdc, 3, 0); bt856_setbit(encoder, 0xde, 4, 1); break; default: return -EINVAL; } if (debug != 0) bt856_dump(encoder); return 0; } static int bt856_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_BT856, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops bt856_core_ops = { .g_chip_ident = bt856_g_chip_ident, .init = bt856_init, }; static const struct v4l2_subdev_video_ops bt856_video_ops = { .s_std_output = bt856_s_std_output, .s_routing = bt856_s_routing, }; static const struct v4l2_subdev_ops bt856_ops = { .core = &bt856_core_ops, .video = &bt856_video_ops, }; /* ----------------------------------------------------------------------- */ static int bt856_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct bt856 *encoder; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(struct bt856), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &bt856_ops); encoder->norm = V4L2_STD_NTSC; bt856_write(encoder, 0xdc, 0x18); bt856_write(encoder, 0xda, 0); bt856_write(encoder, 0xde, 0); bt856_setbit(encoder, 0xdc, 3, 1); /*bt856_setbit(encoder, 0xdc, 6, 0);*/ bt856_setbit(encoder, 0xdc, 4, 1); if (encoder->norm & V4L2_STD_NTSC) bt856_setbit(encoder, 0xdc, 2, 0); else bt856_setbit(encoder, 0xdc, 2, 1); bt856_setbit(encoder, 0xdc, 1, 1); bt856_setbit(encoder, 0xde, 4, 0); bt856_setbit(encoder, 0xde, 3, 1); if (debug != 0) bt856_dump(encoder); return 0; } static int bt856_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_bt856(sd)); return 0; } static const struct i2c_device_id bt856_id[] = { { "bt856", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bt856_id); static struct i2c_driver bt856_driver = { .driver = { .owner = THIS_MODULE, .name = "bt856", }, .probe = bt856_probe, .remove = bt856_remove, .id_table = bt856_id, }; module_i2c_driver(bt856_driver);
gpl-2.0
jgcaap/NewKernel
net/wireless/reg.c
75
65429
/* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Wireless regulatory infrastructure * * The usual implementation is for a driver to read a device EEPROM to * determine which regulatory domain it should be operating under, then * looking up the allowable channels in a driver-local table and finally * registering those channels in the wiphy structure. * * Another set of compliance enforcement is for drivers to use their * own compliance limits which can be stored on the EEPROM. The host * driver or firmware may ensure these are used. * * In addition to all this we provide an extra layer of regulatory * conformance. For drivers which do not have any regulatory * information CRDA provides the complete regulatory solution. * For others it provides a community effort on further restrictions * to enhance compliance. * * Note: When number of rules --> infinity we will not be able to * index on alpha2 any more, instead we'll probably have to * rely on some SHA1 checksum of the regdomain for example. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/random.h> #include <linux/ctype.h> #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/moduleparam.h> #include <net/cfg80211.h> #include "core.h" #include "reg.h" #include "regdb.h" #include "nl80211.h" #ifdef CONFIG_CFG80211_REG_DEBUG #define REG_DBG_PRINT(format, args...) \ printk(KERN_DEBUG pr_fmt(format), ##args) #else #define REG_DBG_PRINT(args...) #endif static struct regulatory_request core_request_world = { .initiator = NL80211_REGDOM_SET_BY_CORE, .alpha2[0] = '0', .alpha2[1] = '0', .intersect = false, .processed = true, .country_ie_env = ENVIRON_ANY, }; /* Receipt of information from last regulatory request */ static struct regulatory_request *last_request = &core_request_world; /* To trigger userspace events */ static struct platform_device *reg_pdev; static struct device_type reg_device_type = { .uevent = reg_device_uevent, }; /* * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2 */ const struct ieee80211_regdomain *cfg80211_regdomain; /* * Protects static reg.c components: * - cfg80211_world_regdom * - cfg80211_regdom * - last_request */ static DEFINE_MUTEX(reg_mutex); static inline void assert_reg_lock(void) { lockdep_assert_held(&reg_mutex); } /* Used to queue up regulatory hints */ static LIST_HEAD(reg_requests_list); static spinlock_t reg_requests_lock; /* Used to queue up beacon hints for review */ static LIST_HEAD(reg_pending_beacons); static spinlock_t reg_pending_beacons_lock; /* Used to keep track of processed beacon hints */ static LIST_HEAD(reg_beacon_list); struct reg_beacon { struct list_head list; struct ieee80211_channel chan; }; static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); static void reg_timeout_work(struct work_struct *work); static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 5, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. */ REG_RULE(2467-10, 2472+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ REG_RULE(5180-10, 5240+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* NB: 5260 MHz - 5700 MHz requies DFS */ /* IEEE 802.11a, channel 149..165 */ REG_RULE(5745-10, 5825+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), } }; static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; static char *ieee80211_regdom = "00"; static char user_alpha2[2]; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reset_regdomains(bool full_reset) { /* avoid freeing static information or freeing something twice */ if (cfg80211_regdomain == cfg80211_world_regdom) cfg80211_regdomain = NULL; if (cfg80211_world_regdom == &world_regdom) cfg80211_world_regdom = NULL; if (cfg80211_regdomain == &world_regdom) cfg80211_regdomain = NULL; kfree(cfg80211_regdomain); kfree(cfg80211_world_regdom); cfg80211_world_regdom = &world_regdom; cfg80211_regdomain = NULL; if (!full_reset) return; if (last_request != &core_request_world) kfree(last_request); last_request = &core_request_world; } /* * Dynamic world regulatory domain requested by the wireless * core upon initialization */ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { BUG_ON(!last_request); reset_regdomains(false); cfg80211_world_regdom = rd; cfg80211_regdomain = rd; } bool is_world_regdom(const char *alpha2) { if (!alpha2) return false; if (alpha2[0] == '0' && alpha2[1] == '0') return true; return false; } static bool is_alpha2_set(const char *alpha2) { if (!alpha2) return false; if (alpha2[0] != 0 && alpha2[1] != 0) return true; return false; } static bool is_unknown_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain was built by driver * but a specific alpha2 cannot be determined */ if (alpha2[0] == '9' && alpha2[1] == '9') return true; return false; } static bool is_intersected_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain is the * result of an intersection between two regulatory domain * structures */ if (alpha2[0] == '9' && alpha2[1] == '8') return true; return false; } static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; if (isalpha(alpha2[0]) && isalpha(alpha2[1])) return true; return false; } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) { if (!alpha2_x || !alpha2_y) return false; if (alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1]) return true; return false; } static bool regdom_changes(const char *alpha2) { assert_cfg80211_lock(); if (!cfg80211_regdomain) return true; if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) return false; return true; } /* * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER * has ever been issued. */ static bool is_user_regdom_saved(void) { if (user_alpha2[0] == '9' && user_alpha2[1] == '7') return false; /* This would indicate a mistake on the design */ if (WARN((!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2)), "Unexpected user alpha2: %c%c\n", user_alpha2[0], user_alpha2[1])) return false; return true; } static bool is_cfg80211_regdom_intersected(void) { return is_intersected_alpha2(cfg80211_regdomain->alpha2); } static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; int size_of_regd = 0; unsigned int i; size_of_regd = sizeof(struct ieee80211_regdomain) + ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule)); regd = kzalloc(size_of_regd, GFP_KERNEL); if (!regd) return -ENOMEM; memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); *dst_regd = regd; return 0; } #ifdef CONFIG_CFG80211_INTERNAL_REGDB struct reg_regdb_search_request { char alpha2[2]; struct list_head list; }; static LIST_HEAD(reg_regdb_search_list); static DEFINE_MUTEX(reg_regdb_search_mutex); static void reg_regdb_search(struct work_struct *work) { struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; bool set_reg = false; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_regdb_search_mutex); while (!list_empty(&reg_regdb_search_list)) { request = list_first_entry(&reg_regdb_search_list, struct reg_regdb_search_request, list); list_del(&request->list); for (i=0; i<reg_regdb_size; i++) { curdom = reg_regdb[i]; if (!memcmp(request->alpha2, curdom->alpha2, 2)) { r = reg_copy_regd(&regdom, curdom); if (r) break; set_reg = true; break; } } kfree(request); } mutex_unlock(&reg_regdb_search_mutex); if (set_reg) set_regdom(regdom); mutex_unlock(&cfg80211_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); static void reg_regdb_query(const char *alpha2) { struct reg_regdb_search_request *request; if (!alpha2) return; request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL); if (!request) return; memcpy(request->alpha2, alpha2, 2); mutex_lock(&reg_regdb_search_mutex); list_add_tail(&request->list, &reg_regdb_search_list); mutex_unlock(&reg_regdb_search_mutex); schedule_work(&reg_regdb_work); } /* Feel free to add any other sanity checks here */ static void reg_regdb_size_check(void) { /* We should ideally BUILD_BUG_ON() but then random builds would fail */ WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it..."); } #else static inline void reg_regdb_size_check(void) {} static inline void reg_regdb_query(const char *alpha2) {} #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. Country information is filled in by * reg_device_uevent */ static int call_crda(const char *alpha2) { if (!is_world_regdom((char *) alpha2)) pr_info("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else pr_info("Calling CRDA to update world regulatory domain\n"); /* query internal regulatory database (if it exists) */ reg_regdb_query(alpha2); return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE); } /* Used by nl80211 before kmalloc'ing our regulatory domain */ bool reg_is_valid_request(const char *alpha2) { assert_cfg80211_lock(); if (!last_request) return false; return alpha2_equal(last_request->alpha2, alpha2); } /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; u32 freq_diff; if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) return false; if (freq_range->start_freq_khz > freq_range->end_freq_khz) return false; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->end_freq_khz <= freq_range->start_freq_khz || freq_range->max_bandwidth_khz > freq_diff) return false; return true; } static bool is_valid_rd(const struct ieee80211_regdomain *rd) { const struct ieee80211_reg_rule *reg_rule = NULL; unsigned int i; if (!rd->n_reg_rules) return false; if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return false; for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; if (!is_valid_reg_rule(reg_rule)) return false; } return true; } static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz) { u32 start_freq_khz, end_freq_khz; start_freq_khz = center_freq_khz - (bw_khz/2); end_freq_khz = center_freq_khz + (bw_khz/2); if (start_freq_khz >= freq_range->start_freq_khz && end_freq_khz <= freq_range->end_freq_khz) return true; return false; } /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query * @freq_khz: frequency we are inquiring about * * This lets us know if a specific frequency rule is or is not relevant to * a specific frequency's band. Bands are device specific and artificial * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is * safe for now to assume that a frequency rule should not be part of a * frequency's band if the start freq or end freq are off by more than 2 GHz. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". **/ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, u32 freq_khz) { #define ONE_GHZ_IN_KHZ 1000000 if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) return true; return false; #undef ONE_GHZ_IN_KHZ } /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun */ static int reg_rules_intersect( const struct ieee80211_reg_rule *rule1, const struct ieee80211_reg_rule *rule2, struct ieee80211_reg_rule *intersected_rule) { const struct ieee80211_freq_range *freq_range1, *freq_range2; struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule1, *power_rule2; struct ieee80211_power_rule *power_rule; u32 freq_diff; freq_range1 = &rule1->freq_range; freq_range2 = &rule2->freq_range; freq_range = &intersected_rule->freq_range; power_rule1 = &rule1->power_rule; power_rule2 = &rule2->power_rule; power_rule = &intersected_rule->power_rule; freq_range->start_freq_khz = max(freq_range1->start_freq_khz, freq_range2->start_freq_khz); freq_range->end_freq_khz = min(freq_range1->end_freq_khz, freq_range2->end_freq_khz); freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz, freq_range2->max_bandwidth_khz); freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->max_bandwidth_khz > freq_diff) freq_range->max_bandwidth_khz = freq_diff; power_rule->max_eirp = min(power_rule1->max_eirp, power_rule2->max_eirp); power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, power_rule2->max_antenna_gain); intersected_rule->flags = (rule1->flags | rule2->flags); if (!is_valid_reg_rule(intersected_rule)) return -EINVAL; return 0; } /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain * @rd2: second regulatory domain * * Use this function to get the intersection between two regulatory domains. * Once completed we will mark the alpha2 for the rd as intersected, "98", * as no one single alpha2 can represent this regulatory domain. * * Returns a pointer to the regulatory domain structure which will hold the * resulting intersection of rules between rd1 and rd2. We will * kzalloc() this structure for you. */ static struct ieee80211_regdomain *regdom_intersect( const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2) { int r, size_of_regd; unsigned int x, y; unsigned int num_rules = 0, rule_idx = 0; const struct ieee80211_reg_rule *rule1, *rule2; struct ieee80211_reg_rule *intersected_rule; struct ieee80211_regdomain *rd; /* This is just a dummy holder to help us count */ struct ieee80211_reg_rule irule; /* Uses the stack temporarily for counter arithmetic */ intersected_rule = &irule; memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule)); if (!rd1 || !rd2) return NULL; /* * First we get a count of the rules we'll need, then we actually * build them. This is to so we can malloc() and free() a * regdomain once. The reason we use reg_rules_intersect() here * is it will return -EINVAL if the rule computed makes no sense. * All rules that do check out OK are valid. */ for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rule1, rule2, intersected_rule)) num_rules++; memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule)); } } if (!num_rules) return NULL; size_of_regd = sizeof(struct ieee80211_regdomain) + ((num_rules + 1) * sizeof(struct ieee80211_reg_rule)); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) return NULL; for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; /* * This time around instead of using the stack lets * write to the target rule directly saving ourselves * a memcpy() */ intersected_rule = &rd->reg_rules[rule_idx]; r = reg_rules_intersect(rule1, rule2, intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; rule_idx++; } } if (rule_idx != num_rules) { kfree(rd); return NULL; } rd->n_reg_rules = num_rules; rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; return rd; } /* * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may * want to just have the channel structure use these */ static u32 map_regdom_flags(u32 rd_flags) { u32 channel_flags = 0; if (rd_flags & NL80211_RRF_PASSIVE_SCAN) channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN; if (rd_flags & NL80211_RRF_NO_IBSS) channel_flags |= IEEE80211_CHAN_NO_IBSS; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; return channel_flags; } static int freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, u32 desired_bw_khz, const struct ieee80211_reg_rule **reg_rule, const struct ieee80211_regdomain *custom_regd) { int i; bool band_rule_found = false; const struct ieee80211_regdomain *regd; bool bw_fits = false; if (!desired_bw_khz) desired_bw_khz = MHZ_TO_KHZ(20); regd = custom_regd ? custom_regd : cfg80211_regdomain; /* * Follow the driver's regulatory domain, if present, unless a country * IE has been processed or a user wants to help complaince further */ if (!custom_regd && last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && last_request->initiator != NL80211_REGDOM_SET_BY_USER && wiphy->regd) regd = wiphy->regd; if (!regd) return -EINVAL; for (i = 0; i < regd->n_reg_rules; i++) { const struct ieee80211_reg_rule *rr; const struct ieee80211_freq_range *fr = NULL; rr = &regd->reg_rules[i]; fr = &rr->freq_range; /* * We only need to know if one frequency rule was * was in center_freq's band, that's enough, so lets * not overwrite it once found */ if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); bw_fits = reg_does_bw_fit(fr, center_freq, desired_bw_khz); if (band_rule_found && bw_fits) { *reg_rule = rr; return 0; } } if (!band_rule_found) return -ERANGE; return -EINVAL; } int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 desired_bw_khz, const struct ieee80211_reg_rule **reg_rule) { assert_cfg80211_lock(); return freq_reg_info_regd(wiphy, center_freq, desired_bw_khz, reg_rule, NULL); } EXPORT_SYMBOL(freq_reg_info); #ifdef CONFIG_CFG80211_REG_DEBUG static const char *reg_initiator_name(enum nl80211_reg_initiator initiator) { switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: return "Set by core"; case NL80211_REGDOM_SET_BY_USER: return "Set by user"; case NL80211_REGDOM_SET_BY_DRIVER: return "Set by driver"; case NL80211_REGDOM_SET_BY_COUNTRY_IE: return "Set by country IE"; default: WARN_ON(1); return "Set by bug"; } } static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, u32 desired_bw_khz, const struct ieee80211_reg_rule *reg_rule) { const struct ieee80211_power_rule *power_rule; const struct ieee80211_freq_range *freq_range; char max_antenna_gain[32]; power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (!power_rule->max_antenna_gain) snprintf(max_antenna_gain, 32, "N/A"); else snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain); REG_DBG_PRINT("Updating information on frequency %d MHz " "for a %d MHz width channel with regulatory rule:\n", chan->center_freq, KHZ_TO_MHZ(desired_bw_khz)); REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, max_antenna_gain, power_rule->max_eirp); } #else static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan, u32 desired_bw_khz, const struct ieee80211_reg_rule *reg_rule) { return; } #endif /* * Note that right now we assume the desired channel bandwidth * is always 20 MHz for each individual channel (HT40 uses 20 MHz * per channel, the primary and the extension channel). To support * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a * new ieee80211_channel.target_bw and re run the regulatory check * on the wiphy with the target_bw specified. Then we can simply use * that below for the desired_bw_khz below. */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, enum ieee80211_band band, unsigned int chan_idx) { int r; u32 flags, bw_flags = 0; u32 desired_bw_khz = MHZ_TO_KHZ(20); const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; struct wiphy *request_wiphy = NULL; assert_cfg80211_lock(); request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; flags = chan->orig_flags; r = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq), desired_bw_khz, &reg_rule); if (r) { /* * We will disable all channels that do not match our * received regulatory rule unless the hint is coming * from a Country IE and the Country IE had no information * about a band. The IEEE 802.11 spec allows for an AP * to send only a subset of the regulatory rules allowed, * so an AP in the US that only supports 2.4 GHz may only send * a country IE with information for the 2.4 GHz band * while 5 GHz is still supported. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && r == -ERANGE) return; if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { REG_DBG_PRINT("Disabling freq %d MHz for good\n", chan->center_freq); chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } else { REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq); chan->flags |= IEEE80211_CHAN_DISABLED; } return; } chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags = IEEE80211_CHAN_NO_HT40; if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { /* * This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = chan->orig_flags = map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = chan->orig_mag = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_power = chan->orig_mpwr = (int) MBM_TO_DBM(power_rule->max_eirp); return; } chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->orig_mpwr) { /* * Devices that have their own custom regulatory domain * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the * passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else chan->max_power = chan->max_reg_power; } static void handle_band(struct wiphy *wiphy, enum ieee80211_band band, enum nl80211_reg_initiator initiator) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) handle_channel(wiphy, initiator, band, i); } static bool ignore_reg_update(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { if (!last_request) { REG_DBG_PRINT("Ignoring regulatory request %s since " "last_request is not set\n", reg_initiator_name(initiator)); return true; } if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { REG_DBG_PRINT("Ignoring regulatory request %s " "since the driver uses its own custom " "regulatory domain\n", reg_initiator_name(initiator)); return true; } /* * wiphy->regd will be set once the device has its own * desired regulatory domain set */ if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd && initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && !is_world_regdom(last_request->alpha2)) { REG_DBG_PRINT("Ignoring regulatory request %s " "since the driver requires its own regulatory " "domain to be set first\n", reg_initiator_name(initiator)); return true; } return false; } static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, struct reg_beacon *reg_beacon) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; bool channel_changed = false; struct ieee80211_channel chan_before; assert_cfg80211_lock(); sband = wiphy->bands[reg_beacon->chan.band]; chan = &sband->channels[chan_idx]; if (likely(chan->center_freq != reg_beacon->chan.center_freq)) return; if (chan->beacon_found) return; chan->beacon_found = true; if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS) return; chan_before.center_freq = chan->center_freq; chan_before.flags = chan->flags; if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; channel_changed = true; } if (chan->flags & IEEE80211_CHAN_NO_IBSS) { chan->flags &= ~IEEE80211_CHAN_NO_IBSS; channel_changed = true; } if (channel_changed) nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); } /* * Called when a scan on a wiphy finds a beacon on * new channel */ static void wiphy_update_new_beacon(struct wiphy *wiphy, struct reg_beacon *reg_beacon) { unsigned int i; struct ieee80211_supported_band *sband; assert_cfg80211_lock(); if (!wiphy->bands[reg_beacon->chan.band]) return; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } /* * Called upon reg changes or a new wiphy is added */ static void wiphy_update_beacon_reg(struct wiphy *wiphy) { unsigned int i; struct ieee80211_supported_band *sband; struct reg_beacon *reg_beacon; assert_cfg80211_lock(); if (list_empty(&reg_beacon_list)) return; list_for_each_entry(reg_beacon, &reg_beacon_list, list) { if (!wiphy->bands[reg_beacon->chan.band]) continue; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } } static bool reg_is_world_roaming(struct wiphy *wiphy) { if (is_world_regdom(cfg80211_regdomain->alpha2) || (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) return true; if (last_request && last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) return true; return false; } /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { /* * Means we are just firing up cfg80211, so no beacons would * have been processed yet. */ if (!last_request) return; if (!reg_is_world_roaming(wiphy)) return; wiphy_update_beacon_reg(wiphy); } static bool is_ht40_not_allowed(struct ieee80211_channel *chan) { if (!chan) return true; if (chan->flags & IEEE80211_CHAN_DISABLED) return true; /* This would happen when regulatory rules disallow HT40 completely */ if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40))) return true; return false; } static void reg_process_ht_flags_channel(struct wiphy *wiphy, enum ieee80211_band band, unsigned int chan_idx) { struct ieee80211_supported_band *sband; struct ieee80211_channel *channel; struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; unsigned int i; assert_cfg80211_lock(); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); channel = &sband->channels[chan_idx]; if (is_ht40_not_allowed(channel)) { channel->flags |= IEEE80211_CHAN_NO_HT40; return; } /* * We need to ensure the extension channels exist to * be able to use HT40- or HT40+, this finds them (or not) */ for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *c = &sband->channels[i]; if (c->center_freq == (channel->center_freq - 20)) channel_before = c; if (c->center_freq == (channel->center_freq + 20)) channel_after = c; } /* * Please note that this assumes target bandwidth is 20 MHz, * if that ever changes we also need to change the below logic * to include that as well. */ if (is_ht40_not_allowed(channel_before)) channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; if (is_ht40_not_allowed(channel_after)) channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; } static void reg_process_ht_flags_band(struct wiphy *wiphy, enum ieee80211_band band) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) reg_process_ht_flags_channel(wiphy, band, i); } static void reg_process_ht_flags(struct wiphy *wiphy) { enum ieee80211_band band; if (!wiphy) return; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) reg_process_ht_flags_band(wiphy, band); } } static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum ieee80211_band band; assert_reg_lock(); if (ignore_reg_update(wiphy, initiator)) return; last_request->dfs_region = cfg80211_regdomain->dfs_region; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band(wiphy, band, initiator); } reg_process_beacons(wiphy); reg_process_ht_flags(wiphy); if (wiphy->reg_notifier) wiphy->reg_notifier(wiphy, last_request); } void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby) { mutex_lock(&reg_mutex); if (last_request) wiphy_update_regulatory(wiphy, last_request->initiator); else wiphy_update_regulatory(wiphy, setby); mutex_unlock(&reg_mutex); } static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; wiphy_update_regulatory(wiphy, initiator); /* * Regulatory updates set by CORE are ignored for custom * regulatory cards. Let us notify the changes to the driver, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && wiphy->reg_notifier) wiphy->reg_notifier(wiphy, last_request); } } static void handle_channel_custom(struct wiphy *wiphy, enum ieee80211_band band, unsigned int chan_idx, const struct ieee80211_regdomain *regd) { int r; u32 desired_bw_khz = MHZ_TO_KHZ(20); u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; assert_reg_lock(); sband = wiphy->bands[band]; BUG_ON(chan_idx >= sband->n_channels); chan = &sband->channels[chan_idx]; r = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), desired_bw_khz, &reg_rule, regd); if (r) { REG_DBG_PRINT("Disabling freq %d MHz as custom " "regd has no rule that fits a %d MHz " "wide channel\n", chan->center_freq, KHZ_TO_MHZ(desired_bw_khz)); chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; return; } chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule); power_rule = &reg_rule->power_rule; freq_range = &reg_rule->freq_range; if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags = IEEE80211_CHAN_NO_HT40; chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); } static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, const struct ieee80211_regdomain *regd) { unsigned int i; struct ieee80211_supported_band *sband; BUG_ON(!wiphy->bands[band]); sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) handle_channel_custom(wiphy, band, i, regd); } /* Used by drivers prior to wiphy registration */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { enum ieee80211_band band; unsigned int bands_set = 0; mutex_lock(&reg_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, band, regd); bands_set++; } mutex_unlock(&reg_mutex); /* * no point in calling this if it won't have any effect * on your device's supportd bands. */ WARN_ON(!bands_set); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); /* * Return value which can be used by ignore_request() to indicate * it has been determined we should intersect two regulatory domains */ #define REG_INTERSECT 1 /* This has the logic which determines when a new request * should be ignored. */ static int ignore_request(struct wiphy *wiphy, struct regulatory_request *pending_request) { struct wiphy *last_wiphy = NULL; assert_cfg80211_lock(); /* All initial requests are respected */ if (!last_request) return 0; switch (pending_request->initiator) { case NL80211_REGDOM_SET_BY_CORE: return 0; case NL80211_REGDOM_SET_BY_COUNTRY_IE: if (wiphy->country_ie_pref & NL80211_COUNTRY_IE_IGNORE_CORE) return -EALREADY; last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (unlikely(!is_an_alpha2(pending_request->alpha2))) return -EINVAL; if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { if (last_wiphy != wiphy) { /* * Two cards with two APs claiming different * Country IE alpha2s. We could * intersect them, but that seems unlikely * to be correct. Reject second one for now. */ if (regdom_changes(pending_request->alpha2)) return -EOPNOTSUPP; return -EALREADY; } /* * Two consecutive Country IE hints on the same wiphy. * This should be picked up early by the driver/stack */ if (WARN_ON(regdom_changes(pending_request->alpha2))) return 0; return -EALREADY; } return 0; case NL80211_REGDOM_SET_BY_DRIVER: if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(pending_request->alpha2)) return 0; return -EALREADY; } /* * This would happen if you unplug and plug your card * back in or if you add a new device for which the previously * loaded card also agrees on the regulatory domain. */ if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !regdom_changes(pending_request->alpha2)) return -EALREADY; return REG_INTERSECT; case NL80211_REGDOM_SET_BY_USER: if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_INTERSECT; /* * If the user knows better the user should set the regdom * to their country before the IE is picked up */ if (last_request->initiator == NL80211_REGDOM_SET_BY_USER && last_request->intersect) return -EOPNOTSUPP; /* * Process user requests only after previous user/driver/core * requests have been processed */ if ((last_request->initiator == NL80211_REGDOM_SET_BY_CORE || last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || last_request->initiator == NL80211_REGDOM_SET_BY_USER)) { if (last_request->intersect) { if (!is_cfg80211_regdom_intersected()) return -EAGAIN; } else if (regdom_changes(last_request->alpha2)) { return -EAGAIN; } } if (!regdom_changes(pending_request->alpha2)) return -EALREADY; return 0; } return -EINVAL; } static void reg_set_request_processed(void) { bool need_more_processing = false; last_request->processed = true; spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) need_more_processing = true; spin_unlock(&reg_requests_lock); if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) cancel_delayed_work(&reg_timeout); if (need_more_processing) schedule_work(&reg_work); } /** * __regulatory_hint - hint to the wireless core a regulatory domain * @wiphy: if the hint comes from country information from an AP, this * is required to be set to the wiphy that received the information * @pending_request: the regulatory request currently being processed * * The Wireless subsystem can use this function to hint to the wireless core * what it believes should be the current regulatory domain. * * Returns zero if all went fine, %-EALREADY if a regulatory domain had * already been set or other standard error codes. * * Caller must hold &cfg80211_mutex and &reg_mutex */ static int __regulatory_hint(struct wiphy *wiphy, struct regulatory_request *pending_request) { bool intersect = false; int r = 0; assert_cfg80211_lock(); r = ignore_request(wiphy, pending_request); if (r == REG_INTERSECT) { if (pending_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); if (r) { kfree(pending_request); return r; } } intersect = true; } else if (r) { /* * If the regulatory domain being requested by the * driver has already been set just copy it to the * wiphy */ if (r == -EALREADY && pending_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); if (r) { kfree(pending_request); return r; } r = -EALREADY; goto new_request; } kfree(pending_request); return r; } new_request: if (last_request != &core_request_world) kfree(last_request); last_request = pending_request; last_request->intersect = intersect; pending_request = NULL; if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) { user_alpha2[0] = last_request->alpha2[0]; user_alpha2[1] = last_request->alpha2[1]; } /* When r == REG_INTERSECT we do need to call CRDA */ if (r < 0) { /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just * inform userspace we have processed the request */ if (r == -EALREADY) { nl80211_send_reg_change_event(last_request); reg_set_request_processed(); } return r; } return call_crda(last_request->alpha2); } /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request, enum nl80211_reg_initiator reg_initiator) { int r = 0; struct wiphy *wiphy = NULL; BUG_ON(!reg_request->alpha2); if (wiphy_idx_valid(reg_request->wiphy_idx)) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); if ((reg_initiator == NL80211_REGDOM_SET_BY_DRIVER || reg_initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) && !wiphy) { kfree(reg_request); return; } r = __regulatory_hint(wiphy, reg_request); /* This is required so that the orig_* parameters are saved */ if (r == -EALREADY && wiphy && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { wiphy_update_regulatory(wiphy, reg_initiator); return; } /* * We only time out user hints, given that they should be the only * source of bogus requests. */ if (r != -EALREADY && reg_initiator == NL80211_REGDOM_SET_BY_USER) schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142)); } /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we * must process each one atomically. */ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); /* When last_request->processed becomes true this will be rescheduled */ if (last_request && !last_request->processed) { REG_DBG_PRINT("Pending regulatory request, waiting " "for it to be processed...\n"); goto out; } spin_lock(&reg_requests_lock); if (list_empty(&reg_requests_list)) { spin_unlock(&reg_requests_lock); goto out; } reg_request = list_first_entry(&reg_requests_list, struct regulatory_request, list); list_del_init(&reg_request->list); spin_unlock(&reg_requests_lock); reg_process_hint(reg_request, reg_request->initiator); out: mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); } /* Processes beacon hints -- this has nothing to do with country IEs */ static void reg_process_pending_beacon_hints(void) { struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; /* * No need to hold the reg_mutex here as we just touch wiphys * and do not read or access regulatory variables. */ mutex_lock(&cfg80211_mutex); /* This goes through the _pending_ beacon list */ spin_lock_bh(&reg_pending_beacons_lock); if (list_empty(&reg_pending_beacons)) { spin_unlock_bh(&reg_pending_beacons_lock); goto out; } list_for_each_entry_safe(pending_beacon, tmp, &reg_pending_beacons, list) { list_del_init(&pending_beacon->list); /* Applies the beacon hint to current wiphys */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); /* Remembers the beacon hint for new wiphys or reg changes */ list_add_tail(&pending_beacon->list, &reg_beacon_list); } spin_unlock_bh(&reg_pending_beacons_lock); out: mutex_unlock(&cfg80211_mutex); } static void reg_todo(struct work_struct *work) { reg_process_pending_hints(); reg_process_pending_beacon_hints(); } static void queue_regulatory_request(struct regulatory_request *request) { if (isalpha(request->alpha2[0])) request->alpha2[0] = toupper(request->alpha2[0]); if (isalpha(request->alpha2[1])) request->alpha2[1] = toupper(request->alpha2[1]); spin_lock(&reg_requests_lock); list_add_tail(&request->list, &reg_requests_list); spin_unlock(&reg_requests_lock); schedule_work(&reg_work); } /* * Core regulatory hint -- happens during cfg80211_init() * and when we restore regulatory settings. */ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; queue_regulatory_request(request); return 0; } /* User hints */ int regulatory_hint_user(const char *alpha2) { struct regulatory_request *request; BUG_ON(!alpha2); request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = WIPHY_IDX_STALE; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_USER; queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint_user); /* Driver hints */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) { struct regulatory_request *request; BUG_ON(!alpha2); BUG_ON(!wiphy); request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = get_wiphy_idx(wiphy); /* Must have registered wiphy first */ BUG_ON(!wiphy_idx_valid(request->wiphy_idx)); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_DRIVER; queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint); /* * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and * therefore cannot iterate over the rdev list here. */ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request; mutex_lock(&reg_mutex); if (unlikely(!last_request)) goto out; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) goto out; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) goto out; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; if (country_ie[2] == 'I') env = ENVIRON_INDOOR; else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold * cfg80211_mutex. */ if (likely(last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy_idx_valid(last_request->wiphy_idx))) goto out; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) goto out; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; request->country_ie_env = env; mutex_unlock(&reg_mutex); queue_regulatory_request(request); return; out: mutex_unlock(&reg_mutex); } static void restore_alpha2(char *alpha2, bool reset_user) { /* indicates there is no alpha2 to consider for restoration */ alpha2[0] = '9'; alpha2[1] = '7'; /* The user setting has precedence over the module parameter */ if (is_user_regdom_saved()) { /* Unless we're asked to ignore it and reset it */ if (reset_user) { REG_DBG_PRINT("Restoring regulatory settings " "including user preference\n"); user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* * If we're ignoring user settings, we still need to * check the module parameter to ensure we put things * back as they were for a full restore. */ if (!is_world_regdom(ieee80211_regdom)) { REG_DBG_PRINT("Keeping preference on " "module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } } else { REG_DBG_PRINT("Restoring regulatory settings " "while preserving user preference for: %c%c\n", user_alpha2[0], user_alpha2[1]); alpha2[0] = user_alpha2[0]; alpha2[1] = user_alpha2[1]; } } else if (!is_world_regdom(ieee80211_regdom)) { REG_DBG_PRINT("Keeping preference on " "module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } else REG_DBG_PRINT("Restoring regulatory settings\n"); } static void restore_custom_reg_settings(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; enum ieee80211_band band; struct ieee80211_channel *chan; int i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; } } } /* * Restoring regulatory settings involves ingoring any * possibly stale country IE information and user regulatory * settings if so desired, this includes any beacon hints * learned as we could have traveled outside to another country * after disconnection. To restore regulatory settings we do * exactly what we did at bootup: * * - send a core regulatory hint * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country * keep their own regulatory domain on wiphy->regd so that does does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user) { char alpha2[2]; char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; struct regulatory_request *reg_request, *tmp; LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); reset_regdomains(true); restore_alpha2(alpha2, reset_user); /* * If there's any pending requests we simply * stash them to a temporary pending queue and * add then after we've restored regulatory * settings. */ spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) { list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) { if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER) continue; list_del(&reg_request->list); list_add_tail(&reg_request->list, &tmp_reg_req_list); } } spin_unlock(&reg_requests_lock); /* Clear beacon hints */ spin_lock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_pending_beacons)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_unlock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_beacon_list)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } /* First restore to the basic regulatory settings */ cfg80211_regdomain = cfg80211_world_regdom; world_alpha2[0] = cfg80211_regdomain->alpha2[0]; world_alpha2[1] = cfg80211_regdomain->alpha2[1]; list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY) restore_custom_reg_settings(&rdev->wiphy); } mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter * preference or the last user requested regulatory * settings, user regulatory settings takes precedence. */ if (is_an_alpha2(alpha2)) regulatory_hint_user(user_alpha2); if (list_empty(&tmp_reg_req_list)) return; mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); spin_lock(&reg_requests_lock); list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) { REG_DBG_PRINT("Adding request for country %c%c back " "into the queue\n", reg_request->alpha2[0], reg_request->alpha2[1]); list_del(&reg_request->list); list_add_tail(&reg_request->list, &reg_requests_list); } spin_unlock(&reg_requests_lock); mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); REG_DBG_PRINT("Kicking the queue\n"); schedule_work(&reg_work); } void regulatory_hint_disconnect(void) { REG_DBG_PRINT("All devices are disconnected, going to " "restore regulatory settings\n"); restore_regulatory_settings(false); } static bool freq_is_chan_12_13_14(u16 freq) { if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ)) return true; return false; } int regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp) { struct reg_beacon *reg_beacon; if (likely((beacon_chan->beacon_found || (beacon_chan->flags & IEEE80211_CHAN_RADAR) || (beacon_chan->band == IEEE80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))))) return 0; reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); if (!reg_beacon) return -ENOMEM; REG_DBG_PRINT("Found new beacon on " "frequency: %d MHz (Ch %d) on %s\n", beacon_chan->center_freq, ieee80211_frequency_to_channel(beacon_chan->center_freq), wiphy_name(wiphy)); memcpy(&reg_beacon->chan, beacon_chan, sizeof(struct ieee80211_channel)); /* * Since we can be called from BH or and non-BH context * we must use spin_lock_bh() */ spin_lock_bh(&reg_pending_beacons_lock); list_add_tail(&reg_beacon->list, &reg_pending_beacons); spin_unlock_bh(&reg_pending_beacons_lock); schedule_work(&reg_work); return 0; } static void print_rd_rules(const struct ieee80211_regdomain *rd) { unsigned int i; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; /* * There may not be documentation for max antenna gain * in certain regions */ if (power_rule->max_antenna_gain) pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, power_rule->max_antenna_gain, power_rule->max_eirp); else pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, freq_range->max_bandwidth_khz, power_rule->max_eirp); } } bool reg_supported_dfs_region(u8 dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: case NL80211_DFS_FCC: case NL80211_DFS_ETSI: case NL80211_DFS_JP: return true; default: REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n", dfs_region); return false; } } static void print_dfs_region(u8 dfs_region) { if (!dfs_region) return; switch (dfs_region) { case NL80211_DFS_FCC: pr_info(" DFS Master region FCC"); break; case NL80211_DFS_ETSI: pr_info(" DFS Master region ETSI"); break; case NL80211_DFS_JP: pr_info(" DFS Master region JP"); break; default: pr_info(" DFS Master region Uknown"); break; } } static void print_regdomain(const struct ieee80211_regdomain *rd) { if (is_intersected_alpha2(rd->alpha2)) { if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx( last_request->wiphy_idx); if (rdev) { pr_info("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else pr_info("Current regulatory domain intersected:\n"); } else pr_info("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) pr_info("World regulatory domain updated:\n"); else { if (is_unknown_alpha2(rd->alpha2)) pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n"); else pr_info("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } print_dfs_region(rd->dfs_region); print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } /* Takes ownership of rd only if it doesn't fail */ static int __set_regdom(const struct ieee80211_regdomain *rd) { const struct ieee80211_regdomain *intersected_rd = NULL; struct cfg80211_registered_device *rdev = NULL; struct wiphy *request_wiphy; /* Some basic sanity checks first */ if (is_world_regdom(rd->alpha2)) { if (WARN_ON(!reg_is_valid_request(rd->alpha2))) return -EINVAL; update_world_regdomain(rd); return 0; } if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && !is_unknown_alpha2(rd->alpha2)) return -EINVAL; if (!last_request) return -EINVAL; /* * Lets only bother proceeding on the same alpha2 if the current * rd is non static (it means CRDA was present and was used last) * and the pending request came in from a country IE */ if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { /* * If someone else asked us to change the rd lets only bother * checking if the alpha2 changes if CRDA was already called */ if (!regdom_changes(rd->alpha2)) return -EALREADY; } /* * Now lets set the regulatory domain, update all driver channels * and finally inform them of what we have done, in case they want * to review or adjust their own settings based on their own * internal EEPROM data */ if (WARN_ON(!reg_is_valid_request(rd->alpha2))) return -EINVAL; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected:\n"); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (!request_wiphy && (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) { schedule_delayed_work(&reg_timeout, 0); return -ENODEV; } if (!last_request->intersect) { int r; if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { reset_regdomains(false); cfg80211_regdomain = rd; return 0; } /* * For a driver hint, lets copy the regulatory domain the * driver wanted to the wiphy to deal with conflicts */ /* * Userspace could have sent two replies with only * one kernel request. */ if (request_wiphy->regd) return -EALREADY; r = reg_copy_regd(&request_wiphy->regd, rd); if (r) return r; reset_regdomains(false); cfg80211_regdomain = rd; return 0; } /* Intersection requires a bit more work */ if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { intersected_rd = regdom_intersect(rd, cfg80211_regdomain); if (!intersected_rd) return -EINVAL; /* * We can trash what CRDA provided now. * However if a driver requested this specific regulatory * domain we keep it for its private use */ if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { const struct ieee80211_regdomain *tmp; tmp = request_wiphy->regd; request_wiphy->regd = rd; kfree(tmp); } else { kfree(rd); } rd = NULL; reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; } if (!intersected_rd) return -EINVAL; rdev = wiphy_to_dev(request_wiphy); rdev->country_ie_alpha2[0] = rd->alpha2[0]; rdev->country_ie_alpha2[1] = rd->alpha2[1]; rdev->env = last_request->country_ie_env; BUG_ON(intersected_rd == rd); kfree(rd); rd = NULL; reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; } /* * Use this call to set the current regulatory domain. Conflicts with * multiple drivers can be ironed out later. Caller must've already * kmalloc'd the rd structure. Caller must hold cfg80211_mutex */ int set_regdom(const struct ieee80211_regdomain *rd) { int r; assert_cfg80211_lock(); mutex_lock(&reg_mutex); /* Note that this doesn't update the wiphys, this is done below */ r = __set_regdom(rd); if (r) { if (r == -EALREADY) reg_set_request_processed(); kfree(rd); mutex_unlock(&reg_mutex); return r; } /* This would make this whole thing pointless */ if (!last_request->intersect) BUG_ON(rd != cfg80211_regdomain); /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(last_request->initiator); print_regdomain(cfg80211_regdomain); nl80211_send_reg_change_event(last_request); reg_set_request_processed(); mutex_unlock(&reg_mutex); return r; } #ifdef CONFIG_HOTPLUG int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) { if (last_request && !last_request->processed) { if (add_uevent_var(env, "COUNTRY=%c%c", last_request->alpha2[0], last_request->alpha2[1])) return -ENOMEM; } return 0; } #else int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } #endif /* CONFIG_HOTPLUG */ /* Caller must hold cfg80211_mutex */ void reg_device_remove(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; assert_cfg80211_lock(); mutex_lock(&reg_mutex); kfree(wiphy->regd); if (last_request) request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) goto out; last_request->wiphy_idx = WIPHY_IDX_STALE; last_request->country_ie_env = ENVIRON_ANY; out: mutex_unlock(&reg_mutex); } static void reg_timeout_work(struct work_struct *work) { REG_DBG_PRINT("Timeout while waiting for CRDA to reply, " "restoring regulatory settings\n"); restore_regulatory_settings(true); } int __init regulatory_init(void) { int err = 0; reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); if (IS_ERR(reg_pdev)) return PTR_ERR(reg_pdev); reg_pdev->dev.type = &reg_device_type; spin_lock_init(&reg_requests_lock); spin_lock_init(&reg_pending_beacons_lock); reg_regdb_size_check(); cfg80211_regdomain = cfg80211_world_regdom; user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_regdomain->alpha2); if (err) { if (err == -ENOMEM) return err; /* * N.B. kobject_uevent_env() can fail mainly for when we're out * memory which is handled and propagated appropriately above * but it can also fail during a netlink_broadcast() or during * early boot for call_usermodehelper(). For now treat these * errors as non-fatal. */ pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); #ifdef CONFIG_CFG80211_REG_DEBUG /* We want to find out exactly why when debugging */ WARN_ON(err); #endif } /* * Finally, if the user set the module parameter treat it * as a user hint. */ if (!is_world_regdom(ieee80211_regdom)) regulatory_hint_user(ieee80211_regdom); return 0; } void /* __init_or_exit */ regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(&reg_work); cancel_delayed_work_sync(&reg_timeout); mutex_lock(&cfg80211_mutex); mutex_lock(&reg_mutex); reset_regdomains(true); dev_set_uevent_suppress(&reg_pdev->dev, true); platform_device_unregister(reg_pdev); spin_lock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_pending_beacons)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_unlock_bh(&reg_pending_beacons_lock); if (!list_empty(&reg_beacon_list)) { list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } } spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) { list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) { list_del(&reg_request->list); kfree(reg_request); } } spin_unlock(&reg_requests_lock); mutex_unlock(&reg_mutex); mutex_unlock(&cfg80211_mutex); }
gpl-2.0
pexip/os-valgrind
none/tests/process_vm_readv_writev.c
75
1985
#define _GNU_SOURCE 1 #include <config.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include <sys/types.h> #include <sys/uio.h> static int status = EXIT_SUCCESS; #ifdef HAVE_PROCESS_VM_READV static void test_process_vm_readv() { char lbuf[] = "123456"; char rbuf[] = "ABCDEF"; struct iovec lvec[2]; struct iovec rvec[2]; lvec[0].iov_base = lbuf + 1; lvec[0].iov_len = 1; lvec[1].iov_base = lbuf + 3; lvec[1].iov_len = 2; rvec[0].iov_base = rbuf + 1; rvec[0].iov_len = 2; rvec[1].iov_base = rbuf + 4; rvec[1].iov_len = 1; if (process_vm_readv(getpid(), lvec, 2, rvec, 2, 0 ) < 0 ) { perror("process_vm_readv"); status = EXIT_FAILURE; } if (strcmp(lbuf, "1B3CE6") != 0) { fprintf(stderr, "Expected: \"1B3CE6\"; Got: \"%s\"\n", lbuf); status = EXIT_FAILURE; } } #endif /* defined( HAVE_PROCESS_VM_READV ) */ #ifdef HAVE_PROCESS_VM_WRITEV static void test_process_vm_writev() { char lbuf[] = "123456"; char rbuf[] = "ABCDEF"; struct iovec lvec[2]; struct iovec rvec[2]; lvec[0].iov_base = lbuf + 1; lvec[0].iov_len = 1; lvec[1].iov_base = lbuf + 3; lvec[1].iov_len = 2; rvec[0].iov_base = rbuf + 1; rvec[0].iov_len = 2; rvec[1].iov_base = rbuf + 4; rvec[1].iov_len = 1; if (process_vm_writev(getpid(), lvec, 2, rvec, 2, 0 ) < 0 ) { perror("process_vm_writev"); status = EXIT_FAILURE; } if (strcmp(rbuf, "A24D5F") != 0) { fprintf(stderr, "Expected: \"A24D5F\"; Got: \"%s\"\n", rbuf); status = EXIT_FAILURE; } } #endif /* defined( HAVE_PROCESS_VM_WRITEV ) */ int main(int argc, char *argv[]) { #ifdef HAVE_PROCESS_VM_READV test_process_vm_readv(); #endif #ifdef HAVE_PROCESS_VM_WRITEV test_process_vm_writev(); #endif return status; }
gpl-2.0
LCameron/linux-xlnx
mm/mmap.c
75
91981
/* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/rbtree_augmented.h> #include <linux/sched/sysctl.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifndef arch_rebalance_pgtables #define arch_rebalance_pgtables(addr, len) (addr) #endif static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * */ pgprot_t protection_map[16] = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; pgprot_t vm_get_page_prot(unsigned long vm_flags) { return __pgprot(pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | pgprot_val(arch_vm_get_page_prot(vm_flags))); } EXPORT_SYMBOL(vm_get_page_prot); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma)) { vm_flags &= ~VM_SHARED; vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); } } int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. */ unsigned long vm_memory_committed(void) { return percpu_counter_read_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long free, allowed, reserve; VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < -(s64)vm_committed_as_batch * num_online_cpus(), "memory commitment underflow"); vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { free = global_page_state(NR_FREE_PAGES); free += global_page_state(NR_FILE_PAGES); /* * shmem pages shouldn't be counted as free in this * case, they can't be purged, only swapped out, and * that won't affect the overall amount of available * memory in the system. */ free -= global_page_state(NR_SHMEM); free += get_nr_swap_pages(); /* * Any slabs which are created with the * SLAB_RECLAIM_ACCOUNT flag claim to have contents * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave reserved pages. The pages are not for anonymous pages. */ if (free <= totalreserve_pages) goto error; else free -= totalreserve_pages; /* * Reserve some for root */ if (!cap_sys_admin) free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); if (free > pages) return 0; goto error; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: vm_unacct_memory(pages); return -ENOMEM; } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it, returning the next. */ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) { struct vm_area_struct *next = vma->vm_next; might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); return next; } static unsigned long do_brk(unsigned long addr, unsigned long len); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; unsigned long min_brk; bool populate; down_write(&mm->mmap_sem); #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) goto set_brk; /* Always allow shrinking brk. */ if (brk <= mm->brk) { if (!do_munmap(mm, newbrk, oldbrk-newbrk)) goto set_brk; goto out; } /* Check against existing mmap mappings. */ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; /* Ok, looks good - let it rip. */ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) goto out; set_brk: mm->brk = brk; populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; up_write(&mm->mmap_sem); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: retval = mm->brk; up_write(&mm->mmap_sem); return retval; } static long vma_compute_subtree_gap(struct vm_area_struct *vma) { unsigned long max, subtree_gap; max = vma->vm_start; if (vma->vm_prev) max -= vma->vm_prev->vm_end; if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } if (vma->vm_rb.rb_right) { subtree_gap = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb)->rb_subtree_gap; if (subtree_gap > max) max = subtree_gap; } return max; } #ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct rb_root *root) { int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); if (vma->vm_start < prev) { pr_emerg("vm_start %lx < prev %lx\n", vma->vm_start, prev); bug = 1; } if (vma->vm_start < pend) { pr_emerg("vm_start %lx < pend %lx\n", vma->vm_start, pend); bug = 1; } if (vma->vm_start > vma->vm_end) { pr_emerg("vm_start %lx > vm_end %lx\n", vma->vm_start, vma->vm_end); bug = 1; } if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } i++; pn = nd; prev = vma->vm_start; pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) j++; if (i != j) { pr_emerg("backwards %d, forwards %d\n", j, i); bug = 1; } return bug ? -1 : i; } static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct vm_area_struct *vma; vma = rb_entry(nd, struct vm_area_struct, vm_rb); VM_BUG_ON_VMA(vma != ignore && vma->rb_subtree_gap != vma_compute_subtree_gap(vma), vma); } } static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; unsigned long highest_address = 0; struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma_chain *avc; vma_lock_anon_vma(vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); vma_unlock_anon_vma(vma); highest_address = vma->vm_end; vma = vma->vm_next; i++; } if (i != mm->map_count) { pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); bug = 1; } if (highest_address != mm->highest_vm_end) { pr_emerg("mm->highest_vm_end %lx, found %lx\n", mm->highest_vm_end, highest_address); bug = 1; } i = browse_rb(&mm->mm_rb); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else #define validate_mm_rb(root, ignore) do { } while (0) #define validate_mm(mm) do { } while (0) #endif RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or * vma->vm_prev->vm_end values changed, without modifying the vma's position * in the rbtree. */ static void vma_gap_update(struct vm_area_struct *vma) { /* * As it turns out, RB_DECLARE_CALLBACKS() already created a callback * function that does exacltly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } static inline void vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) { /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ validate_mm_rb(root, vma); /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_sem and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static int find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; __rb_parent = *__rb_link; vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { /* Fail if an existing vma overlaps the area */ if (vma_tmp->vm_start < end) return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; __rb_link = &__rb_parent->rb_right; } } *pprev = NULL; if (rb_prev) *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; return 0; } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { unsigned long nr_pages = 0; struct vm_area_struct *vma; /* Find first overlaping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; nr_pages = (min(end, vma->vm_end) - max(addr, vma->vm_start)) >> PAGE_SHIFT; /* Iterate over the rest of the overlaps */ for (vma = vma->vm_next; vma; vma = vma->vm_next) { unsigned long overlap_len; if (vma->vm_start > end) break; overlap_len = min(end, vma->vm_end) - vma->vm_start; nr_pages += overlap_len >> PAGE_SHIFT; } return nr_pages; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) { /* Update tracking information for the gap following the new vma. */ if (vma->vm_next) vma_gap_update(vma->vm_next); else mm->highest_vm_end = vma->vm_end; /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not * update the vma vm_rb parents rb_subtree_gap values on the way down. * So, we first insert the vma with a zero rb_subtree_gap value * (to be consistent with what we did on the way down), and then * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); vma_rb_insert(vma, &mm->mm_rb); } static void __vma_link_file(struct vm_area_struct *vma) { struct file *file; file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) atomic_dec(&file_inode(file)->i_writecount); if (vma->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } } static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { struct address_space *mapping = NULL; if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); } __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); } /* * Helper for vma_adjust() in the split_vma insert case: insert a vma into the * mm's list and rbtree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { struct vm_area_struct *next; vma_rb_erase(vma, &mm->mm_rb); prev->vm_next = next = vma->vm_next; if (next) next->vm_prev = prev; /* Kill the cache */ vmacache_invalidate(mm); } /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments * are necessary. The "insert" vma (if any) is to be inserted * before we drop the necessary locks. */ int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next; struct vm_area_struct *importer = NULL; struct address_space *mapping = NULL; struct rb_root *root = NULL; struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; bool start_changed = false, end_changed = false; long adjust_next = 0; int remove_next = 0; if (next && !insert) { struct vm_area_struct *exporter = NULL; if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and * perhaps the one after too (mprotect case 6). */ again: remove_next = 1 + (end > next->vm_end); end = next->vm_end; exporter = next; importer = vma; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; exporter = next; importer = vma; } else if (end < vma->vm_end) { /* * vma shrinks, and !insert tells it's not * split_vma inserting another: so it must be * mprotect case 4 shifting the boundary down. */ adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); exporter = vma; importer = next; } /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); if (error) return error; } } if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; uprobe_munmap(vma, vma->vm_start, vma->vm_end); if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(insert); } } vma_adjust_trans_huge(vma, start, end, adjust_next); anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { VM_BUG_ON_VMA(adjust_next && next->anon_vma && anon_vma != next->anon_vma, next); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) anon_vma_interval_tree_pre_update_vma(next); } if (root) { flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, root); if (adjust_next) vma_interval_tree_remove(next, root); } if (start != vma->vm_start) { vma->vm_start = start; start_changed = true; } if (end != vma->vm_end) { vma->vm_end = end; end_changed = true; } vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; } if (root) { if (adjust_next) vma_interval_tree_insert(next, root); vma_interval_tree_insert(vma, root); flush_dcache_mmap_unlock(mapping); } if (remove_next) { /* * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ __vma_unlink(mm, next, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ __insert_vm_struct(mm, insert); } else { if (start_changed) vma_gap_update(vma); if (end_changed) { if (!next) mm->highest_vm_end = end; else if (!adjust_next) vma_gap_update(next); } } if (anon_vma) { anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); anon_vma_unlock_write(anon_vma); } if (mapping) i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); if (adjust_next) uprobe_mmap(next); } if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); kmem_cache_free(vm_area_cachep, next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter * up the code too much to do both in one go. */ next = vma->vm_next; if (remove_next == 2) goto again; else if (next) vma_gap_update(next); else mm->highest_vm_end = end; } if (insert && file) uprobe_mmap(insert); validate_mm(mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressue on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; return 1; } static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return 1; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; } return 0; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return 1; } return 0; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out * whether that can be merged with its predecessor or its successor. * Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where AAAA is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * * AAAA AAAA AAAA AAAA * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX * cannot merge might become might become might become * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or * mremap move: PPPPNNNNNNNN 8 * AAAA * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN * might become case 1 below case 2 below case 3 below * * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX: * mprotect_fixup updates vm_flags & vm_page_prot on successful return. */ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (next && next->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL); } else /* cases 2, 5, 7 */ err = vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen)) { if (prev && addr < prev->vm_end) /* case 4 */ err = vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); else /* cases 3, 8 */ err = vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL); if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; } /* * Rough compatbility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mm_sem held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mm_sem. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma; struct vm_area_struct *near; near = vma->vm_next; if (!near) goto try_prev; anon_vma = reusable_anon_vma(near, vma, near); if (anon_vma) return anon_vma; try_prev: near = vma->vm_prev; if (!near) goto none; anon_vma = reusable_anon_vma(near, near, vma); if (anon_vma) return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return NULL; } #ifdef CONFIG_PROC_FS void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { const unsigned long stack_flags = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); mm->total_vm += pages; if (file) { mm->shared_vm += pages; if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) mm->exec_vm += pages; } else if (flags & stack_flags) mm->stack_vm += pages; } #endif /* CONFIG_PROC_FS */ /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } static inline int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len) { unsigned long locked, lock_limit; /* mlock MCL_FUTURE? */ if (flags & VM_LOCKED) { locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } return 0; } /* * The caller must hold down_write(&current->mm->mmap_sem). */ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate) { struct mm_struct *mm = current->mm; vm_flags_t vm_flags; *populate = 0; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; if (!len) return -EINVAL; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (addr & ~PAGE_MASK) return addr; /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); switch (flags & MAP_TYPE) { case MAP_SHARED: if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { struct file *file = NULL; unsigned long retval = -EBADF; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) goto out; if (is_file_hugepages(file)) len = ALIGN(len, huge_page_size(hstate_file(file))); retval = -EINVAL; if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); out: return retval; } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma) { vm_flags_t vm_flags = vma->vm_flags; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vma->vm_page_prot) != pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) return 0; /* Do we need to track softdirty? */ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) return 1; /* Specialty mapping? */ if (vm_flags & VM_PFNMAP) return 0; /* Can the mapping track the dirty pages? */ return vma->vm_file && vma->vm_file->f_mapping && mapping_cap_account_dirty(vma->vm_file->f_mapping); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; /* Check against address space limit. */ if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ if (!(vm_flags & MAP_FIXED)) return -ENOMEM; nr_pages = count_vma_pages_range(mm, addr, addr + len); if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Clear old maps */ error = -ENOMEM; munmap_back: if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; } /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } /* * Can we just expand an old mapping? */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); if (vma) goto out; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { error = -ENOMEM; goto unacct_error; } vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain); if (file) { if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; } /* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file; out: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))) mm->locked_vm += (len >> PAGE_SHIFT); else vma->vm_flags &= ~VM_LOCKED; } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY; vma_set_page_prot(vma); return addr; unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) vm_unacct_memory(charged); return error; } unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* * We implement the search by looking for an rbtree node that * immediately follows a suitable gap. That is, * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; * - gap_end = vma->vm_start >= info->low_limit + length; * - gap_end - gap_start >= length */ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* Adjust search limits by the desired length */ if (info->high_limit < length) return -ENOMEM; high_limit = info->high_limit - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) goto check_highest; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) goto check_highest; while (true) { /* Visit left subtree if it looks promising */ gap_end = vma->vm_start; if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; if (gap_end >= low_limit && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ if (vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) goto check_highest; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { gap_start = vma->vm_prev->vm_end; gap_end = vma->vm_start; goto check_current; } } } check_highest: /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ if (gap_start > high_limit) return -ENOMEM; found: /* We found a suitable gap. Clip it with the original low_limit. */ if (gap_start < info->low_limit) gap_start = info->low_limit; /* Adjust gap address to the desired alignment */ gap_start += (info->align_offset - gap_start) & info->align_mask; VM_BUG_ON(gap_start + info->length > info->high_limit); VM_BUG_ON(gap_start + info->length > gap_end); return gap_start; } unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end; /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length; if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length; /* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest; /* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM; while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } } check_current: /* Check if current node has a suitable gap */ gap_end = vma->vm_start; if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } } /* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; goto check_current; } } } found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit; found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask; VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end; } /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = 0; return vm_unmapped_area(&info); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; info.align_mask = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr; } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file && file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (addr & ~PAGE_MASK) return -EINVAL; addr = arch_rebalance_pgtables(addr, len); error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct rb_node *rb_node; struct vm_area_struct *vma; /* Check the cache first. */ vma = vmacache_find(mm, addr); if (likely(vma)) return vma; rb_node = mm->mm_rb.rb_node; vma = NULL; while (rb_node) { struct vm_area_struct *tmp; tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); if (tmp->vm_end > addr) { vma = tmp; if (tmp->vm_start <= addr) break; rb_node = rb_node->rb_left; } else rb_node = rb_node->rb_right; } if (vma) vmacache_update(addr, vma); return vma; } EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; vma = find_vma(mm, addr); if (vma) { *pprev = vma->vm_prev; } else { struct rb_node *rb_node = mm->mm_rb.rb_node; *pprev = NULL; while (rb_node) { *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; unsigned long new_start, actual_size; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ actual_size = size; if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) actual_size -= PAGE_SIZE; if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ if (vma->vm_flags & VM_LOCKED) { unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; /* Ok, everything looks good - let it rip */ if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { int error; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* * We must make sure the anon_vma is allocated * so that the anon_vma locking is not a noop. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. * Also guard against wrapping around to address 0. */ if (address < PAGE_ALIGN(address+4)) address = PAGE_ALIGN(address+4); else { vma_unlock_anon_vma(vma); return -ENOMEM; } error = 0; /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * vma_lock_anon_vma() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&vma->vm_mm->page_table_lock); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; anon_vma_interval_tree_post_update_vma(vma); if (vma->vm_next) vma_gap_update(vma->vm_next); else vma->vm_mm->highest_vm_end = address; spin_unlock(&vma->vm_mm->page_table_lock); perf_event_mmap(vma); } } } vma_unlock_anon_vma(vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(vma->vm_mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { int error; /* * We must make sure the anon_vma is allocated * so that the anon_vma locking is not a noop. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * vma_gap_update() doesn't support concurrent * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. * vma_lock_anon_vma() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard * against concurrent vma expansions. */ spin_lock(&vma->vm_mm->page_table_lock); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&vma->vm_mm->page_table_lock); perf_event_mmap(vma); } } } vma_unlock_anon_vma(vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(vma->vm_mm); return error; } /* * Note how expand_stack() refuses to expand the stack all the way to * abut the next virtual mapping, *unless* that mapping itself is also * a stack mapping. We want to leave room for a guard page, after all * (the guard page itself is not added here, that is done by the * actual page faulting logic) * * This matches the behavior of the guard page logic (see mm/memory.c: * check_stack_guard_page()), which only allows the guard page to be * removed under these circumstances. */ #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { struct vm_area_struct *next; address &= PAGE_MASK; next = vma->vm_next; if (next && next->vm_start == address + PAGE_SIZE) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; } return expand_upwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { struct vm_area_struct *prev; address &= PAGE_MASK; prev = vma->vm_prev; if (prev && prev->vm_end == address) { if (!(prev->vm_flags & VM_GROWSDOWN)) return -ENOMEM; } return expand_downwards(vma, address); } struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) __mlock_vma_pages_range(vma, addr, start, NULL); return vma; } #endif EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long nr_accounted = 0; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); validate_mm(mm); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) { struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; struct mmu_gather tlb; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); } /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { vma_rb_erase(vma, &mm->mm_rb); mm->map_count--; tail_vma = vma; vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; if (vma) { vma->vm_prev = prev; vma_gap_update(vma); } else mm->highest_vm_end = prev ? prev->vm_end : 0; tail_vma->vm_next = NULL; /* Kill the cache */ vmacache_invalidate(mm); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this on the * munmap path where it doesn't make sense to fail. */ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err = -ENOMEM; if (is_vm_hugetlb_page(vma) && (addr & ~(huge_page_mask(hstate_vma(vma))))) return -EINVAL; new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) goto out_err; /* most fields are the same, copy all, and then fixup */ *new = *vma; INIT_LIST_HEAD(&new->anon_vma_chain); if (new_below) new->vm_end = addr; else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) goto out_free_vma; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ if (!err) return 0; /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); out_free_vma: kmem_cache_free(vm_area_cachep, new); out_err: return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(mm, vma, addr, new_below); } /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) { unsigned long end; struct vm_area_struct *vma, *prev, *last; if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; len = PAGE_ALIGN(len); if (len == 0) return -EINVAL; /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) return 0; prev = vma->vm_prev; /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ end = start + len; if (vma->vm_start >= end) return 0; /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { int error; /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) return -ENOMEM; error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { int error = __split_vma(mm, last, end, 1); if (error) return error; } vma = prev ? prev->vm_next : mm->mmap; /* * unlock any mlock()ed ranges before detaching vmas */ if (mm->locked_vm) { struct vm_area_struct *tmp = vma; while (tmp && tmp->vm_start < end) { if (tmp->vm_flags & VM_LOCKED) { mm->locked_vm -= vma_pages(tmp); munlock_vma_pages_all(tmp); } tmp = tmp->vm_next; } } /* * Remove the vma's, and unmap the actual pages */ detach_vmas_to_be_unmapped(mm, vma, prev, end); unmap_region(mm, vma, prev, start, end); arch_unmap(mm, vma, start, end); /* Fix up all other VM information */ remove_vma_list(mm, vma); return 0; } int vm_munmap(unsigned long start, size_t len) { int ret; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); ret = do_munmap(mm, start, len); up_write(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { profile_munmap(addr); return vm_munmap(addr, len); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. " "See Documentation/vm/remap_file_pages.txt.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; down_write(&mm->mmap_sem); vma = find_vma(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start < vma->vm_start || start + size > vma->vm_end) goto out; if (pgoff == linear_page_index(vma, start)) { ret = 0; goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) { flags |= MAP_LOCKED; /* drop PG_Mlocked flag for over-mapped range */ munlock_vma_pages_range(vma, start, start + size); } file = get_file(vma->vm_file); ret = do_mmap_pgoff(vma->vm_file, start, size, prot, flags, pgoff, &populate); fput(file); out: up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } static inline void verify_mm_writelocked(struct mm_struct *mm) { #ifdef CONFIG_DEBUG_VM if (unlikely(down_read_trylock(&mm->mmap_sem))) { WARN_ON(1); up_read(&mm->mmap_sem); } #endif } /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ static unsigned long do_brk(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; unsigned long flags; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; len = PAGE_ALIGN(len); if (!len) return addr; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (error & ~PAGE_MASK) return error; error = mlock_future_check(mm, mm->def_flags, len); if (error) return error; /* * mm->mmap_sem is required to protect against another thread * changing the mappings in case we sleep. */ verify_mm_writelocked(mm); /* * Clear old maps. this also does some error checking for us */ munmap_back: if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; } /* Check against address space limits *after* clearing old maps... */ if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL); if (vma) goto out; /* * create a vma struct for an anonymous mapping */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; return addr; } unsigned long vm_brk(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; unsigned long ret; bool populate; down_write(&mm->mmap_sem); ret = do_brk(addr, len); populate = ((mm->def_flags & VM_LOCKED) != 0); up_write(&mm->mmap_sem); if (populate) mm_populate(addr, len); return ret; } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); if (mm->locked_vm) { vma = mm->mmap; while (vma) { if (vma->vm_flags & VM_LOCKED) munlock_vma_pages_all(vma); vma = vma->vm_next; } } arch_exit_mmap(mm); vma = mm->mmap; if (!vma) /* Can happen if dup_mmap() received an OOM */ return; lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ while (vma) { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ if (!vma->vm_file) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } if (find_vma_links(mm, vma->vm_start, vma->vm_end, &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; struct rb_node **rb_link, *rb_parent; bool faulted_in_anon_vma = true; /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(!vma->vm_file && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (new_vma) { *new_vma = *vma; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; INIT_LIST_HEAD(&new_vma->anon_vma_chain); if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } } return new_vma; out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: kmem_cache_free(vm_area_cachep, new_vma); return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ int may_expand_vm(struct mm_struct *mm, unsigned long npages) { unsigned long cur = mm->total_vm; /* pages */ unsigned long lim; lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; if (cur + npages > lim) return 0; return 1; } static int special_mapping_fault(struct vm_area_struct *vma, struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .name = special_mapping_name, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static int special_mapping_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { pgoff_t pgoff; struct page **pages; /* * special mappings have no vm_file, and in that case, the mm * uses vm_pgoff internally. So we have to subtract it from here. * We are allowed to do this because we are the mm; do not copy * this code into drivers! */ pgoff = vmf->pgoff - vma->vm_pgoff; if (vma->vm_ops == &legacy_special_mapping_vmops) pages = vma->vm_private_data; else pages = ((struct vm_special_mapping *)vma->vm_private_data)-> pages; for (; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_operations_struct *ops, void *priv) { int ret; struct vm_area_struct *vma; vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; mm->total_vm += len >> PAGE_SHIFT; perf_event_mmap(vma); return vma; out: kmem_cache_free(vm_area_cachep, vma); return ERR_PTR(ret); } /* * Called with mm->mmap_sem held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, &special_mapping_vmops, (void *)spec); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, &legacy_special_mapping_vmops, (void *)pages); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_sem in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_sem until mm_drop_all_locks() returns. * * mmap_sem in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We can take all the locks in random order because the VM code * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never * takes more than one of them in a row. Secondly we're protected * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); mutex_lock(&mm_all_locks_mutex); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping) vm_lock_mapping(mm, vma->vm_file->f_mapping); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_sem cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; BUG_ON(down_read_trylock(&mm->mmap_sem)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the VMA slab */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static struct notifier_block reserve_mem_nb = { .notifier_call = reserve_mem_notifier, }; static int __meminit init_reserve_notifier(void) { if (register_hotmemory_notifier(&reserve_mem_nb)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
gpl-2.0
garwynn/android_kernel_lge_msm8996
drivers/staging/vt6655/iwctl.c
331
51004
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: iwctl.c * * Purpose: wireless ext & ioctl functions * * Author: Lyndon Chen * * Date: July 5, 2006 * * Functions: * * Revision History: * */ #include "device.h" #include "ioctl.h" #include "iocmd.h" #include "iwctl.h" #include "mac.h" #include "card.h" #include "hostap.h" #include "power.h" #include "rf.h" #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT #include "iowpa.h" #include "wpactl.h" #endif #include <net/iw_handler.h> extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester /*--------------------- Static Definitions -------------------------*/ //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT #define SUPPORTED_WIRELESS_EXT 18 #else #define SUPPORTED_WIRELESS_EXT 17 #endif static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484, 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980, 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5745, 5765, 5785, 5805, 5825 }; /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev) { struct vnt_private *pDevice = netdev_priv(dev); long ldBm; pDevice->wstats.status = pDevice->op_mode; #ifdef Calcu_LinkQual if (pDevice->scStatistic.LinkQuality > 100) pDevice->scStatistic.LinkQuality = 100; pDevice->wstats.qual.qual = (unsigned char)pDevice->scStatistic.LinkQuality; #else pDevice->wstats.qual.qual = pDevice->byCurrSQ; #endif RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm); pDevice->wstats.qual.level = ldBm; pDevice->wstats.qual.noise = 0; pDevice->wstats.qual.updated = 1; pDevice->wstats.discard.nwid = 0; pDevice->wstats.discard.code = 0; pDevice->wstats.discard.fragment = 0; pDevice->wstats.discard.retries = (unsigned long)pDevice->scStatistic.dwTsrErr; pDevice->wstats.discard.misc = 0; pDevice->wstats.miss.beacon = 0; return &pDevice->wstats; } /*------------------------------------------------------------------*/ static int iwctl_commit(struct net_device *dev, struct iw_request_info *info, void *wrq, char *extra) { pr_debug(" SIOCSIWCOMMIT\n"); return 0; } /* * Wireless Handler : get protocol name */ int iwctl_giwname(struct net_device *dev, struct iw_request_info *info, char *wrq, char *extra) { strcpy(wrq, "802.11-a/b/g"); return 0; } /* * Wireless Handler : set scan */ static int iwctl_siwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct iw_scan_req *req = (struct iw_scan_req *)extra; unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1]; PWLAN_IE_SSID pItemSSID = NULL; pr_debug(" SIOCSIWSCAN\n"); if (pDevice->byReAssocCount > 0) { //reject scan when re-associating! //send scan event to wpa_Supplicant union iwreq_data wrqu; PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n"); memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); return 0; } spin_lock_irq(&pDevice->lock); BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass); //mike add: active scan OR passive scan OR desire_ssid scan if (wrq->length == sizeof(struct iw_scan_req)) { if (wrq->flags & IW_SCAN_THIS_ESSID) { //desire_ssid scan memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)abyScanSSID; pItemSSID->byElementID = WLAN_EID_SSID; memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len); if (pItemSSID->abySSID[req->essid_len - 1] == '\0') { if (req->essid_len > 0) pItemSSID->len = req->essid_len - 1; } else pItemSSID->len = req->essid_len; pMgmt->eScanType = WMAC_SCAN_PASSIVE; PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n", ((PWLAN_IE_SSID)abyScanSSID)->abySSID, ((PWLAN_IE_SSID)abyScanSSID)->len); bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID); spin_unlock_irq(&pDevice->lock); return 0; } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { //passive scan pMgmt->eScanType = WMAC_SCAN_PASSIVE; } } else { //active scan pMgmt->eScanType = WMAC_SCAN_ACTIVE; } pMgmt->eScanType = WMAC_SCAN_PASSIVE; bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL); spin_unlock_irq(&pDevice->lock); return 0; } /* * Wireless Handler : get scan results */ static int iwctl_giwscan(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { int ii, jj, kk; struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PKnownBSS pBSS; PWLAN_IE_SSID pItemSSID; PWLAN_IE_SUPP_RATES pSuppRates, pExtSuppRates; char *current_ev = extra; char *end_buf = extra + IW_SCAN_MAX_DATA; char *current_val = NULL; struct iw_event iwe; long ldBm; char buf[MAX_WPA_IE_LEN * 2 + 30]; pr_debug(" SIOCGIWSCAN\n"); if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. return -EAGAIN; } pBSS = &(pMgmt->sBSSList[0]); for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) { if (current_ev >= end_buf) break; pBSS = &(pMgmt->sBSSList[jj]); if (pBSS->bActive) { //ADD mac address memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, pBSS->abyBSSID, WLAN_BSSID_LEN); current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); //ADD ssid memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID; iwe.u.data.length = pItemSSID->len; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID); //ADD mode memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWMODE; if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo)) iwe.u.mode = IW_MODE_INFRA; else iwe.u.mode = IW_MODE_ADHOC; iwe.len = IW_EV_UINT_LEN; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); //ADD frequency pSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abySuppRates; pExtSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abyExtSuppRates; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = pBSS->uChannel; iwe.u.freq.e = 0; iwe.u.freq.i = 0; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); //2008-0409-04, <Add> by Einsn Liu { int f = (int)pBSS->uChannel - 1; if (f < 0)f = 0; iwe.u.freq.m = frequency_list[f] * 100000; iwe.u.freq.e = 1; } current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); //ADD quality memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVQUAL; RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm); iwe.u.qual.level = ldBm; iwe.u.qual.noise = 0; //2008-0409-01, <Add> by Einsn Liu if (-ldBm < 50) iwe.u.qual.qual = 100; else if (-ldBm > 90) iwe.u.qual.qual = 0; else iwe.u.qual.qual = (40 - (-ldBm - 50)) * 100 / 40; iwe.u.qual.updated = 7; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo)) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; current_val = current_ev + IW_EV_LCP_LEN; for (kk = 0; kk < 12; kk++) { if (pSuppRates->abyRates[kk] == 0) break; // Bit rate given in 500 kb/s units (+ 0x80) iwe.u.bitrate.value = ((pSuppRates->abyRates[kk] & 0x7f) * 500000); current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } for (kk = 0; kk < 8; kk++) { if (pExtSuppRates->abyRates[kk] == 0) break; // Bit rate given in 500 kb/s units (+ 0x80) iwe.u.bitrate.value = ((pExtSuppRates->abyRates[kk] & 0x7f) * 500000); current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } if ((current_val - current_ev) > IW_EV_LCP_LEN) current_ev = current_val; memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "bcn_int=%d", pBSS->wBeaconInterval); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); if ((pBSS->wWPALen > 0) && (pBSS->wWPALen <= MAX_WPA_IE_LEN)) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = pBSS->wWPALen; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byWPAIE); } if ((pBSS->wRSNLen > 0) && (pBSS->wRSNLen <= MAX_WPA_IE_LEN)) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = pBSS->wRSNLen; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byRSNIE); } } }// for wrq->length = current_ev - extra; return 0; } /* * Wireless Handler : set frequency or channel */ int iwctl_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); int rc = 0; pr_debug(" SIOCSIWFREQ\n"); // If setting by frequency, convert to a channel if ((wrq->e == 1) && (wrq->m >= (int) 2.412e8) && (wrq->m <= (int) 2.487e8)) { int f = wrq->m / 100000; int c = 0; while ((c < 14) && (f != frequency_list[c])) c++; wrq->e = 0; wrq->m = c + 1; } // Setting by channel number if ((wrq->m > 14) || (wrq->e > 0)) rc = -EOPNOTSUPP; else { int channel = wrq->m; if ((channel < 1) || (channel > 14)) { pr_debug("%s: New channel value of %d is invalid!\n", dev->name, wrq->m); rc = -EINVAL; } else { // Yes ! We can set it !!! pr_debug(" Set to channel = %d\n", channel); pDevice->uChannel = channel; //2007-0207-04,<Add> by EinsnLiu //Make change effect at once pDevice->bCommit = true; } } return rc; } /* * Wireless Handler : get frequency or channel */ int iwctl_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); pr_debug(" SIOCGIWFREQ\n"); #ifdef WEXT_USECHANNELS wrq->m = (int)pMgmt->uCurrChannel; wrq->e = 0; #else { int f = (int)pMgmt->uCurrChannel - 1; if (f < 0) f = 0; wrq->m = frequency_list[f] * 100000; wrq->e = 1; } #endif return 0; } /* * Wireless Handler : set operation mode */ int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info, __u32 *wmode, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; pr_debug(" SIOCSIWMODE\n"); if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) { pr_debug("Can't set operation mode, hostapd is running\n"); return rc; } switch (*wmode) { case IW_MODE_ADHOC: if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) { pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; if (pDevice->flags & DEVICE_FLAGS_OPENED) pDevice->bCommit = true; } pr_debug("set mode to ad-hoc\n"); break; case IW_MODE_AUTO: case IW_MODE_INFRA: if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) { pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; if (pDevice->flags & DEVICE_FLAGS_OPENED) pDevice->bCommit = true; } pr_debug("set mode to infrastructure\n"); break; case IW_MODE_MASTER: pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; rc = -EOPNOTSUPP; break; if (pMgmt->eConfigMode != WMAC_CONFIG_AP) { pMgmt->eConfigMode = WMAC_CONFIG_AP; if (pDevice->flags & DEVICE_FLAGS_OPENED) pDevice->bCommit = true; } pr_debug("set mode to Access Point\n"); break; case IW_MODE_REPEAT: pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA; rc = -EOPNOTSUPP; break; default: rc = -EINVAL; } return rc; } /* * Wireless Handler : get operation mode */ int iwctl_giwmode(struct net_device *dev, struct iw_request_info *info, __u32 *wmode, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); pr_debug(" SIOCGIWMODE\n"); // If not managed, assume it's ad-hoc switch (pMgmt->eConfigMode) { case WMAC_CONFIG_ESS_STA: *wmode = IW_MODE_INFRA; break; case WMAC_CONFIG_IBSS_STA: *wmode = IW_MODE_ADHOC; break; case WMAC_CONFIG_AUTO: *wmode = IW_MODE_INFRA; break; case WMAC_CONFIG_AP: *wmode = IW_MODE_MASTER; break; default: *wmode = IW_MODE_ADHOC; } return 0; } /* * Wireless Handler : get capability range */ int iwctl_giwrange(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct iw_range *range = (struct iw_range *)extra; int i, k; unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; pr_debug(" SIOCGIWRANGE\n"); if (wrq->pointer) { wrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->min_nwid = 0x0000; range->max_nwid = 0x0000; range->num_channels = 14; // Should be based on cap_rid.country to give only // what the current card support k = 0; for (i = 0; i < 14; i++) { range->freq[k].i = i + 1; // List index range->freq[k].m = frequency_list[i] * 100000; range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10 } range->num_frequency = k; // Hum... Should put the right values there #ifdef Calcu_LinkQual range->max_qual.qual = 100; #else range->max_qual.qual = 255; #endif range->max_qual.level = 0; range->max_qual.noise = 0; range->sensitivity = 255; for (i = 0; i < 13; i++) { range->bitrate[i] = abySupportedRates[i] * 500000; if (range->bitrate[i] == 0) break; } range->num_bitrates = i; // Set an indication of the max TCP throughput // in bit/s that we can expect using this interface. // May be use for QoS stuff... Jean II if (i > 2) range->throughput = 5 * 1000 * 1000; else range->throughput = 1.5 * 1000 * 1000; range->min_rts = 0; range->max_rts = 2312; range->min_frag = 256; range->max_frag = 2312; // the encoding capabilities range->num_encoding_sizes = 3; // 64(40) bits WEP range->encoding_size[0] = 5; // 128(104) bits WEP range->encoding_size[1] = 13; // 256 bits for WPA-PSK range->encoding_size[2] = 32; // 4 keys are allowed range->max_encoding_tokens = 4; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; range->min_pmp = 0; range->max_pmp = 1000000;// 1 secs range->min_pmt = 0; range->max_pmt = 1000000;// 1 secs range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; // Transmit Power - values are in mW range->txpower[0] = 100; range->num_txpower = 1; range->txpower_capa = IW_TXPOW_MWATT; range->we_version_source = SUPPORTED_WIRELESS_EXT; range->we_version_compiled = WIRELESS_EXT; range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = IW_RETRY_LIFETIME; range->min_retry = 1; range->max_retry = 65535; range->min_r_time = 1024; range->max_r_time = 65535 * 1024; // Experimental measurements - boundary 11/5.5 Mb/s // Note : with or without the (local->rssi), results // are somewhat different. - Jean II range->avg_qual.qual = 6; range->avg_qual.level = 176; // -80 dBm range->avg_qual.noise = 0; } return 0; } /* * Wireless Handler : set ap mac address */ int iwctl_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; unsigned char ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; pr_debug(" SIOCSIWAP\n"); if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. pr_debug("SIOCSIWAP(??)-->In scanning..\n"); } if (wrq->sa_family != ARPHRD_ETHER) rc = -EINVAL; else { memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6); //2008-0409-05, <Add> by Einsn Liu if ((pDevice->bLinkPass == true) && (memcmp(pMgmt->abyDesireBSSID, pMgmt->abyCurrBSSID, 6) == 0)) { return rc; } //mike :add if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) || (memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)) { PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n"); return rc; } //mike add: if desired AP is hidden ssid(there are two same BSSID in list), // then ignore,because you don't known which one to be connect with?? { unsigned int ii, uSameBssidNum = 0; for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (pMgmt->sBSSList[ii].bActive && ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID, pMgmt->abyDesireBSSID)) { uSameBssidNum++; } } if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!! PRINT_K("SIOCSIWAP:ignore for desired AP in hidden mode\n"); return rc; } } if (pDevice->flags & DEVICE_FLAGS_OPENED) pDevice->bCommit = true; } return rc; } /* * Wireless Handler : get ap mac address */ int iwctl_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); pr_debug(" SIOCGIWAP\n"); memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6); //2008-0410,<Modify> by Einsn Liu if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)) memset(wrq->sa_data, 0, 6); if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6); wrq->sa_family = ARPHRD_ETHER; return 0; } /* * Wireless Handler : get ap list */ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { int ii, jj, rc = 0; struct sockaddr *sock = NULL; struct sockaddr *s = NULL; struct iw_quality *qual = NULL; struct iw_quality *q = NULL; PKnownBSS pBSS = NULL; struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); pr_debug(" SIOCGIWAPLIST\n"); if (!capable(CAP_NET_ADMIN)) { rc = -EPERM; goto exit; } if (!wrq->pointer) goto exit; sock = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL); if (!sock) { rc = -ENOMEM; goto exit; } qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL); if (!qual) { rc = -ENOMEM; goto exit; } for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (!pBSS->bActive) continue; if (jj >= IW_MAX_AP) break; s = &sock[jj]; q = &qual[jj]; memcpy(s->sa_data, pBSS->abyBSSID, 6); s->sa_family = ARPHRD_ETHER; q->level = pBSS->uRSSI; q->qual = 0; q->noise = 0; q->updated = 2; jj++; } wrq->flags = 1; /* Should be define'd */ wrq->length = jj; memcpy(extra, sock, sizeof(struct sockaddr) * jj); memcpy(extra + sizeof(struct sockaddr) * jj, qual, sizeof(struct iw_quality) * jj); exit: kfree(sock); kfree(qual); return rc; } /* * Wireless Handler : set essid */ int iwctl_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PWLAN_IE_SSID pItemSSID; //2008-0409-05, <Add> by Einsn Liu unsigned char len; pr_debug(" SIOCSIWESSID\n"); pDevice->fWPA_Authened = false; if (pMgmt->eScanState == WMAC_IS_SCANNING) { // In scanning.. pr_debug("SIOCSIWESSID(??)-->In scanning..\n"); } // Check if we asked for `any' if (wrq->flags == 0) { // Just send an empty SSID list memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); memset(pMgmt->abyDesireBSSID, 0xFF, 6); PRINT_K("set essid to 'any'\n"); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT return 0; #endif } else { // Set the SSID memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSID->byElementID = WLAN_EID_SSID; memcpy(pItemSSID->abySSID, extra, wrq->length); if (pItemSSID->abySSID[wrq->length - 1] == '\0') { if (wrq->length > 0) pItemSSID->len = wrq->length - 1; } else pItemSSID->len = wrq->length; pr_debug("set essid to %s\n", pItemSSID->abySSID); //2008-0409-05, <Add> by Einsn Liu len = (pItemSSID->len > ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) ? pItemSSID->len : ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len; if ((pDevice->bLinkPass == true) && (memcmp(pItemSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, len) == 0)) return 0; //mike:need clear desiredBSSID if (pItemSSID->len == 0) { memset(pMgmt->abyDesireBSSID, 0xFF, 6); return 0; } #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //Wext wil order another command of siwap to link with desired AP, //so here need not associate?? if (pDevice->bWPASuppWextEnabled == true) { /*******search if in hidden ssid mode ****/ { PKnownBSS pCurr = NULL; unsigned char abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1]; unsigned int ii, uSameBssidNum = 0; memcpy(abyTmpDesireSSID, pMgmt->abyDesireSSID, sizeof(abyTmpDesireSSID)); pCurr = BSSpSearchBSSList(pDevice, NULL, abyTmpDesireSSID, pMgmt->eConfigPHYMode ); if (pCurr == NULL) { PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n"); vResetCommandTimer((void *)pDevice); pMgmt->eScanType = WMAC_SCAN_ACTIVE; bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); } else { //mike:to find out if that desired SSID is a hidden-ssid AP , // by means of judging if there are two same BSSID exist in list ? for (ii = 0; ii < MAX_BSS_NUM; ii++) { if (pMgmt->sBSSList[ii].bActive && ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID, pCurr->abyBSSID)) { uSameBssidNum++; } } if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!! pr_debug("SIOCSIWESSID:hidden ssid directly associate.......\n"); vResetCommandTimer((void *)pDevice); pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result! bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID); } } } return 0; } #endif pr_debug("set essid = %s\n", pItemSSID->abySSID); } if (pDevice->flags & DEVICE_FLAGS_OPENED) pDevice->bCommit = true; return 0; } /* * Wireless Handler : get essid */ int iwctl_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PWLAN_IE_SSID pItemSSID; pr_debug(" SIOCGIWESSID\n"); // Note : if wrq->u.data.flags != 0, we should // get the relevant SSID from the SSID list... // Get the current SSID pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; memcpy(extra, pItemSSID->abySSID , pItemSSID->len); extra[pItemSSID->len] = '\0'; wrq->length = pItemSSID->len + 1; //2008-0409-03, <Add> by Einsn Liu wrq->length = pItemSSID->len; wrq->flags = 1; // active return 0; } /* * Wireless Handler : set data rate */ int iwctl_siwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); int rc = 0; u8 brate = 0; int i; unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; pr_debug(" SIOCSIWRATE\n"); if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EINVAL; return rc; } // First : get a valid bit rate value // Which type of value if ((wrq->value < 13) && (wrq->value >= 0)) { // Setting by rate index // Find value in the magic rate table brate = wrq->value; } else { // Setting by frequency value u8 normvalue = (u8) (wrq->value/500000); // Check if rate is valid for (i = 0; i < 13; i++) { if (normvalue == abySupportedRates[i]) { brate = i; break; } } } // -1 designed the max rate (mostly auto mode) if (wrq->value == -1) { // Get the highest available rate for (i = 0; i < 13; i++) { if (abySupportedRates[i] == 0) break; } if (i != 0) brate = i - 1; } // Check that it is valid // brate is index of abySupportedRates[] if (brate > 13) { rc = -EINVAL; return rc; } // Now, check if we want a fixed or auto value if (wrq->fixed != 0) { // Fixed mode // One rate, fixed pr_debug("Rate Fix\n"); pDevice->bFixRate = true; if ((pDevice->byBBType == BB_TYPE_11B) && (brate > 3)) { pDevice->uConnectionRate = 3; } else { pDevice->uConnectionRate = brate; pr_debug("Fixed to Rate %d\n", pDevice->uConnectionRate); } } else { pDevice->bFixRate = false; pDevice->uConnectionRate = 13; pr_debug("auto rate:connection_rate is 13\n"); } return rc; } /* * Wireless Handler : get data rate */ int iwctl_giwrate(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); //2007-0118-05,<Mark> by EinsnLiu //Mark the unnecessary sentences. // PSMgmtObject pMgmt = &(pDevice->sMgmtObj); pr_debug(" SIOCGIWRATE\n"); { unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90}; int brate = 0; //2008-5-8 <modify> by chester if (pDevice->bLinkPass) { if (pDevice->bFixRate == true) { if (pDevice->uConnectionRate < 13) { brate = abySupportedRates[pDevice->uConnectionRate]; } else { if (pDevice->byBBType == BB_TYPE_11B) brate = 0x16; if (pDevice->byBBType == BB_TYPE_11G) brate = 0x6C; if (pDevice->byBBType == BB_TYPE_11A) brate = 0x6C; } } else { brate = abySupportedRates[TxRate_iwconfig]; } } else brate = 0; wrq->value = brate * 500000; // If more than one rate, set auto if (pDevice->bFixRate == true) wrq->fixed = true; } return 0; } /* * Wireless Handler : set rts threshold */ int iwctl_siwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); int rc = 0; pr_debug(" SIOCSIWRTS\n"); { int rthr = wrq->value; if (wrq->disabled) rthr = 2312; if ((rthr < 0) || (rthr > 2312)) rc = -EINVAL; else pDevice->wRTSThreshold = rthr; } return 0; } /* * Wireless Handler : get rts */ int iwctl_giwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); pr_debug(" SIOCGIWRTS\n"); wrq->value = pDevice->wRTSThreshold; wrq->disabled = (wrq->value >= 2312); wrq->fixed = 1; return 0; } /* * Wireless Handler : set fragment threshold */ int iwctl_siwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); int rc = 0; int fthr = wrq->value; pr_debug(" SIOCSIWFRAG\n"); if (wrq->disabled) fthr = 2312; if ((fthr < 256) || (fthr > 2312)) { rc = -EINVAL; } else { fthr &= ~0x1; // Get an even value pDevice->wFragmentationThreshold = (u16)fthr; } return rc; } /* * Wireless Handler : get fragment threshold */ int iwctl_giwfrag(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); pr_debug(" SIOCGIWFRAG\n"); wrq->value = pDevice->wFragmentationThreshold; wrq->disabled = (wrq->value >= 2312); wrq->fixed = 1; return 0; } /* * Wireless Handler : set retry threshold */ int iwctl_siwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); int rc = 0; pr_debug(" SIOCSIWRETRY\n"); if (wrq->disabled) { rc = -EINVAL; return rc; } if (wrq->flags & IW_RETRY_LIMIT) { if (wrq->flags & IW_RETRY_MAX) pDevice->byLongRetryLimit = wrq->value; else if (wrq->flags & IW_RETRY_MIN) pDevice->byShortRetryLimit = wrq->value; else { // No modifier : set both pDevice->byShortRetryLimit = wrq->value; pDevice->byLongRetryLimit = wrq->value; } } if (wrq->flags & IW_RETRY_LIFETIME) pDevice->wMaxTransmitMSDULifetime = wrq->value; return rc; } /* * Wireless Handler : get retry threshold */ int iwctl_giwretry(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); pr_debug(" SIOCGIWRETRY\n"); wrq->disabled = 0; // Can't be disabled // Note : by default, display the min retry number if ((wrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { wrq->flags = IW_RETRY_LIFETIME; wrq->value = (int)pDevice->wMaxTransmitMSDULifetime; //ms } else if ((wrq->flags & IW_RETRY_MAX)) { wrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; wrq->value = (int)pDevice->byLongRetryLimit; } else { wrq->flags = IW_RETRY_LIMIT; wrq->value = (int)pDevice->byShortRetryLimit; if ((int)pDevice->byShortRetryLimit != (int)pDevice->byLongRetryLimit) wrq->flags |= IW_RETRY_MIN; } return 0; } /* * Wireless Handler : set encode mode */ int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); unsigned long dwKeyIndex = (unsigned long)(wrq->flags & IW_ENCODE_INDEX); int ii, uu, rc = 0; int index = (wrq->flags & IW_ENCODE_INDEX); //2007-0207-07,<Modify> by EinsnLiu //There are some problems when using iwconfig encode/key command to set the WEP key. //I almost rewrite this function. //now it support:(assume the wireless interface's name is eth0) //iwconfig eth0 key [1] 1122334455 open /*set key stirng to index 1,and driver using key index is set to 1*/ //iwconfig eth0 key [3] /*set driver using key index to 3,the key string no change */ //iwconfig eth0 key 1122334455 /*set key string to driver using index*/ //iwconfig eth0 key restricted /*enable share key*/ PSKeyTable pkeytab; pr_debug(" SIOCSIWENCODE\n"); if ((wrq->flags & IW_ENCODE_DISABLED) == 0) { //Not disable encryption if (dwKeyIndex > WLAN_WEP_NKEYS) { rc = -EINVAL; return rc; } if (dwKeyIndex < 1 && ((wrq->flags & IW_ENCODE_NOKEY) == 0)) {//set default key if (pDevice->byKeyIndex < WLAN_WEP_NKEYS) dwKeyIndex = pDevice->byKeyIndex; else dwKeyIndex = 0; } else { dwKeyIndex--; } // Check the size of the key if (wrq->length > WLAN_WEP232_KEYLEN) { rc = -EINVAL; return rc; } if (wrq->length > 0) {//have key if (wrq->length == WLAN_WEP232_KEYLEN) { pr_debug("Set 232 bit wep key\n"); } else if (wrq->length == WLAN_WEP104_KEYLEN) { pr_debug("Set 104 bit wep key\n"); } else if (wrq->length == WLAN_WEP40_KEYLEN) { pr_debug("Set 40 bit wep key, index= %d\n", (int)dwKeyIndex); } else {//no support length rc = -EINVAL; return rc; } memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN); memcpy(pDevice->abyKey, extra, wrq->length); pr_debug("abyKey: "); for (ii = 0; ii < wrq->length; ii++) pr_debug("%02x ", pDevice->abyKey[ii]); if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); KeybSetDefaultKey(&(pDevice->sKey), (unsigned long)(dwKeyIndex | (1 << 31)), wrq->length, NULL, pDevice->abyKey, KEY_CTL_WEP, pDevice->PortOffset, pDevice->byLocalID ); spin_unlock_irq(&pDevice->lock); } pDevice->byKeyIndex = (unsigned char)dwKeyIndex; pDevice->uKeyLength = wrq->length; pDevice->bTransmitKey = true; pDevice->bEncryptionEnable = true; pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; } else if (index > 0) { //when the length is 0 the request only changes the default transmit key index //check the new key if it has a non zero length if (pDevice->bEncryptionEnable == false) { rc = -EINVAL; return rc; } pr_debug("Just set Default key Index:\n"); pkeytab = &(pDevice->sKey.KeyTable[MAX_KEY_TABLE - 1]); if (pkeytab->GroupKey[(unsigned char)dwKeyIndex].uKeyLength == 0) { pr_debug("Default key len is 0\n"); rc = -EINVAL; return rc; } pDevice->byKeyIndex = (unsigned char)dwKeyIndex; pkeytab->dwGTKeyIndex = dwKeyIndex | (1 << 31); pkeytab->GroupKey[(unsigned char)dwKeyIndex].dwKeyIndex = dwKeyIndex | (1 << 31); } } else {//disable the key pr_debug("Disable WEP function\n"); if (pDevice->bEncryptionEnable == false) return 0; pMgmt->bShareKeyAlgorithm = false; pDevice->bEncryptionEnable = false; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; if (pDevice->flags & DEVICE_FLAGS_OPENED) { spin_lock_irq(&pDevice->lock); for (uu = 0; uu < MAX_KEY_TABLE; uu++) MACvDisableKeyEntry(pDevice->PortOffset, uu); spin_unlock_irq(&pDevice->lock); } } //End Modify,Einsn if (wrq->flags & IW_ENCODE_RESTRICTED) { pr_debug("Enable WEP & ShareKey System\n"); pMgmt->bShareKeyAlgorithm = true; } if (wrq->flags & IW_ENCODE_OPEN) { pr_debug("Enable WEP & Open System\n"); pMgmt->bShareKeyAlgorithm = false; } return rc; } int iwctl_giwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); char abyKey[WLAN_WEP232_KEYLEN]; unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX); PSKeyItem pKey = NULL; pr_debug(" SIOCGIWENCODE\n"); if (index > WLAN_WEP_NKEYS) return -EINVAL; if (index < 1) {//get default key if (pDevice->byKeyIndex < WLAN_WEP_NKEYS) index = pDevice->byKeyIndex; else index = 0; } else { index--; } memset(abyKey, 0, WLAN_WEP232_KEYLEN); // Check encryption mode wrq->flags = IW_ENCODE_NOKEY; // Is WEP enabled ??? if (pDevice->bEncryptionEnable) wrq->flags |= IW_ENCODE_ENABLED; else wrq->flags |= IW_ENCODE_DISABLED; if (pMgmt->bShareKeyAlgorithm) wrq->flags |= IW_ENCODE_RESTRICTED; else wrq->flags |= IW_ENCODE_OPEN; wrq->length = 0; if ((index == 0) && (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled || pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)) {//get wpa pairwise key if (KeybGetKey(&(pDevice->sKey), pMgmt->abyCurrBSSID, 0xffffffff, &pKey)) { wrq->length = pKey->uKeyLength; memcpy(abyKey, pKey->abyKey, pKey->uKeyLength); memcpy(extra, abyKey, WLAN_WEP232_KEYLEN); } } else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)) { wrq->length = pKey->uKeyLength; memcpy(abyKey, pKey->abyKey, pKey->uKeyLength); memcpy(extra, abyKey, WLAN_WEP232_KEYLEN); } wrq->flags |= index+1; return 0; } /* * Wireless Handler : set power mode */ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int rc = 0; pr_debug(" SIOCSIWPOWER\n"); if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EINVAL; return rc; } if (wrq->disabled) { pDevice->ePSMode = WMAC_POWER_CAM; PSvDisablePowerSaving(pDevice); return rc; } if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: pr_debug(" SIOCSIWPOWER: IW_POWER_UNICAST_R\n"); rc = -EINVAL; break; case IW_POWER_ALL_R: pr_debug(" SIOCSIWPOWER: IW_POWER_ALL_R\n"); rc = -EINVAL; case IW_POWER_ON: pr_debug(" SIOCSIWPOWER: IW_POWER_ON\n"); break; default: rc = -EINVAL; } return rc; } /* * Wireless Handler : get power mode */ int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int mode = pDevice->ePSMode; pr_debug(" SIOCGIWPOWER\n"); wrq->disabled = (mode == WMAC_POWER_CAM); if (wrq->disabled) return 0; if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10); wrq->flags = IW_POWER_TIMEOUT; } else { wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10); wrq->flags = IW_POWER_PERIOD; } wrq->flags |= IW_POWER_ALL_R; return 0; } /* * Wireless Handler : get Sensitivity */ int iwctl_giwsens(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); long ldBm; pr_debug(" SIOCGIWSENS\n"); if (pDevice->bLinkPass == true) { RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm); wrq->value = ldBm; } else { wrq->value = 0; } wrq->disabled = (wrq->value == 0); wrq->fixed = 1; return 0; } //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT int iwctl_siwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret = 0; static int wpa_version = 0; //must be static to save the last value,einsn liu static int pairwise = 0; pr_debug(" SIOCSIWAUTH\n"); switch (wrq->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: wpa_version = wrq->value; if (wrq->value == IW_AUTH_WPA_VERSION_DISABLED) PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n"); else if (wrq->value == IW_AUTH_WPA_VERSION_WPA) PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n"); else PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n"); break; case IW_AUTH_CIPHER_PAIRWISE: pairwise = wrq->value; if (pairwise == IW_AUTH_CIPHER_CCMP) pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; else if (pairwise == IW_AUTH_CIPHER_TKIP) pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; else if (pairwise == IW_AUTH_CIPHER_WEP40 || pairwise == IW_AUTH_CIPHER_WEP104) pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; else if (pairwise == IW_AUTH_CIPHER_NONE) ; /* do nothing,einsn liu */ else pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; break; case IW_AUTH_CIPHER_GROUP: if (wpa_version == IW_AUTH_WPA_VERSION_DISABLED) break; if (pairwise == IW_AUTH_CIPHER_NONE) { if (wrq->value == IW_AUTH_CIPHER_CCMP) pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; else pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; } break; case IW_AUTH_KEY_MGMT: if (wpa_version == IW_AUTH_WPA_VERSION_WPA2) { if (wrq->value == IW_AUTH_KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA2; } else if (wpa_version == IW_AUTH_WPA_VERSION_WPA) { if (wrq->value == 0) pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; else if (wrq->value == IW_AUTH_KEY_MGMT_PSK) pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK; else pMgmt->eAuthenMode = WMAC_AUTH_WPA; } break; case IW_AUTH_TKIP_COUNTERMEASURES: break; /* FIXME */ case IW_AUTH_DROP_UNENCRYPTED: break; case IW_AUTH_80211_AUTH_ALG: if (wrq->value == IW_AUTH_ALG_OPEN_SYSTEM) pMgmt->bShareKeyAlgorithm = false; else if (wrq->value == IW_AUTH_ALG_SHARED_KEY) pMgmt->bShareKeyAlgorithm = true; break; case IW_AUTH_WPA_ENABLED: break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: break; case IW_AUTH_ROAMING_CONTROL: ret = -EOPNOTSUPP; break; case IW_AUTH_PRIVACY_INVOKED: pDevice->bEncryptionEnable = !!wrq->value; if (pDevice->bEncryptionEnable == false) { wpa_version = 0; pairwise = 0; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; pMgmt->bShareKeyAlgorithm = false; pMgmt->eAuthenMode = false; } break; default: ret = -EOPNOTSUPP; break; } return ret; } int iwctl_giwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrq, char *extra) { return -EOPNOTSUPP; } int iwctl_siwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char __user *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret = 0; char length; if (wrq->length) { if (wrq->length < 2) return -EINVAL; ret = get_user(length, extra + 1); if (ret) return ret; if (length + 2 != wrq->length) return -EINVAL; if (wrq->length > MAX_WPA_IE_LEN) { ret = -ENOMEM; goto out; } memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN); if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) { ret = -EFAULT; goto out; } pMgmt->wWPAIELen = wrq->length; } else { memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN); pMgmt->wWPAIELen = 0; } out://not completely ...not necessary in wpa_supplicant 0.5.8 return ret; } int iwctl_giwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char __user *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int ret = 0; int space = wrq->length; wrq->length = 0; if (pMgmt->wWPAIELen > 0) { wrq->length = pMgmt->wWPAIELen; if (pMgmt->wWPAIELen <= space) { if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen)) ret = -EFAULT; } else { ret = -E2BIG; } } return ret; } int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct vnt_private *pDevice = netdev_priv(dev); struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct viawget_wpa_param *param = NULL; //original member enum wpa_alg alg_name; u8 addr[6]; int key_idx, set_tx = 0; u8 seq[IW_ENCODE_SEQ_MAX_SIZE]; u8 key[64]; size_t seq_len = 0, key_len = 0; u8 key_array[64]; int ret = 0; PRINT_K("SIOCSIWENCODEEXT......\n"); param = kzalloc(sizeof(*param), GFP_KERNEL); if (param == NULL) return -ENOMEM; //recover alg_name switch (ext->alg) { case IW_ENCODE_ALG_NONE: alg_name = WPA_ALG_NONE; break; case IW_ENCODE_ALG_WEP: alg_name = WPA_ALG_WEP; break; case IW_ENCODE_ALG_TKIP: alg_name = WPA_ALG_TKIP; break; case IW_ENCODE_ALG_CCMP: alg_name = WPA_ALG_CCMP; break; default: PRINT_K("Unknown alg = %d\n", ext->alg); ret = -ENOMEM; goto error; } //recover addr memcpy(addr, ext->addr.sa_data, ETH_ALEN); //recover key_idx key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1; //recover set_tx if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) set_tx = 1; //recover seq,seq_len if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { seq_len = IW_ENCODE_SEQ_MAX_SIZE; memcpy(seq, ext->rx_seq, seq_len); } //recover key,key_len if (ext->key_len) { key_len = ext->key_len; memcpy(key, &ext->key[0], key_len); } memset(key_array, 0, 64); if (key_len > 0) { memcpy(key_array, key, key_len); if (key_len == 32) { // notice ! the oder memcpy(&key_array[16], &key[24], 8); memcpy(&key_array[24], &key[16], 8); } } /**************Translate iw_encode_ext to viawget_wpa_param****************/ memcpy(param->addr, addr, ETH_ALEN); param->u.wpa_key.alg_name = (int)alg_name; param->u.wpa_key.set_tx = set_tx; param->u.wpa_key.key_index = key_idx; param->u.wpa_key.key_len = key_len; param->u.wpa_key.key = (u8 *)key_array; param->u.wpa_key.seq = (u8 *)seq; param->u.wpa_key.seq_len = seq_len; //****set if current action is Network Manager count?? //****this method is so foolish,but there is no other way??? if (param->u.wpa_key.alg_name == WPA_ALG_NONE) { if (param->u.wpa_key.key_index == 0) pDevice->bwextcount++; if ((pDevice->bwextcount == 1) && (param->u.wpa_key.key_index == 1)) pDevice->bwextcount++; if ((pDevice->bwextcount == 2) && (param->u.wpa_key.key_index == 2)) pDevice->bwextcount++; if ((pDevice->bwextcount == 3) && (param->u.wpa_key.key_index == 3)) pDevice->bwextcount++; } if (pDevice->bwextcount == 4) { pr_debug("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n"); pDevice->bwextcount = 0; pDevice->bWPASuppWextEnabled = true; } //****** spin_lock_irq(&pDevice->lock); ret = wpa_set_keys(pDevice, param, true); spin_unlock_irq(&pDevice->lock); error: kfree(param); return ret; } int iwctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { return -EOPNOTSUPP; } int iwctl_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char __user *extra) { struct vnt_private *pDevice = netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); struct iw_mlme mime; int ret = 0; ret = copy_from_user(&mime, extra, sizeof(mime)); if (ret) return -EFAULT; if (memcmp(pMgmt->abyCurrBSSID, mime.addr.sa_data, ETH_ALEN)) { ret = -EINVAL; return ret; } switch (mime.cmd) { case IW_MLME_DEAUTH: //this command seems to be not complete,please test it --einsnliu //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (unsigned char *)&reason); break; case IW_MLME_DISASSOC: if (pDevice->bLinkPass == true) { pr_debug("iwctl_siwmlme--->send DISASSOCIATE\n"); //clear related flags memset(pMgmt->abyDesireBSSID, 0xFF, 6); KeyvInitTable(&pDevice->sKey, pDevice->PortOffset); bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL); } break; default: ret = -EOPNOTSUPP; } return ret; } #endif /*------------------------------------------------------------------*/ /* * Structures to export the Wireless Handlers */ static const iw_handler iwctl_handler[] = { (iw_handler) iwctl_commit, // SIOCSIWCOMMIT (iw_handler) NULL, // SIOCGIWNAME (iw_handler) NULL, // SIOCSIWNWID (iw_handler) NULL, // SIOCGIWNWID (iw_handler) NULL, // SIOCSIWFREQ (iw_handler) NULL, // SIOCGIWFREQ (iw_handler) NULL, // SIOCSIWMODE (iw_handler) NULL, // SIOCGIWMODE (iw_handler) NULL, // SIOCSIWSENS (iw_handler) NULL, // SIOCGIWSENS (iw_handler) NULL, // SIOCSIWRANGE (iw_handler) iwctl_giwrange, // SIOCGIWRANGE (iw_handler) NULL, // SIOCSIWPRIV (iw_handler) NULL, // SIOCGIWPRIV (iw_handler) NULL, // SIOCSIWSTATS (iw_handler) NULL, // SIOCGIWSTATS (iw_handler) NULL, // SIOCSIWSPY (iw_handler) NULL, // SIOCGIWSPY (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWAP (iw_handler) NULL, // SIOCGIWAP (iw_handler) NULL, // -- hole -- 0x16 (iw_handler) NULL, // SIOCGIWAPLIST (iw_handler) iwctl_siwscan, // SIOCSIWSCAN (iw_handler) iwctl_giwscan, // SIOCGIWSCAN (iw_handler) NULL, // SIOCSIWESSID (iw_handler) NULL, // SIOCGIWESSID (iw_handler) NULL, // SIOCSIWNICKN (iw_handler) NULL, // SIOCGIWNICKN (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWRATE 0x20 (iw_handler) NULL, // SIOCGIWRATE (iw_handler) NULL, // SIOCSIWRTS (iw_handler) NULL, // SIOCGIWRTS (iw_handler) NULL, // SIOCSIWFRAG (iw_handler) NULL, // SIOCGIWFRAG (iw_handler) NULL, // SIOCSIWTXPOW (iw_handler) NULL, // SIOCGIWTXPOW (iw_handler) NULL, // SIOCSIWRETRY (iw_handler) NULL, // SIOCGIWRETRY (iw_handler) NULL, // SIOCSIWENCODE (iw_handler) NULL, // SIOCGIWENCODE (iw_handler) NULL, // SIOCSIWPOWER (iw_handler) NULL, // SIOCGIWPOWER //2008-0409-07, <Add> by Einsn Liu (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // -- hole -- (iw_handler) NULL, // SIOCSIWGENIE (iw_handler) NULL, // SIOCGIWGENIE (iw_handler) NULL, // SIOCSIWAUTH (iw_handler) NULL, // SIOCGIWAUTH (iw_handler) NULL, // SIOCSIWENCODEEXT (iw_handler) NULL, // SIOCGIWENCODEEXT (iw_handler) NULL, // SIOCSIWPMKSA (iw_handler) NULL, // -- hole -- }; static const iw_handler iwctl_private_handler[] = { NULL, // SIOCIWFIRSTPRIV }; struct iw_priv_args iwctl_private_args[] = { { IOCTL_CMD_SET, IW_PRIV_TYPE_CHAR | 1024, 0, "set"}, }; const struct iw_handler_def iwctl_handler_def = { .get_wireless_stats = &iwctl_get_wireless_stats, .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler), .num_private = 0, .num_private_args = 0, .standard = (iw_handler *)iwctl_handler, .private = NULL, .private_args = NULL, };
gpl-2.0
GAXUSXX/G935FGaXusKernel2
drivers/staging/comedi/drivers/addi_apci_3120.c
331
6452
#include <linux/module.h> #include <linux/pci.h> #include "../comedidev.h" #include "comedi_fc.h" #include "amcc_s5933.h" #include "addi-data/addi_common.h" #include "addi-data/hwdrv_apci3120.c" enum apci3120_boardid { BOARD_APCI3120, BOARD_APCI3001, }; static const struct addi_board apci3120_boardtypes[] = { [BOARD_APCI3120] = { .pc_DriverName = "apci3120", .i_NbrAiChannel = 16, .i_NbrAiChannelDiff = 8, .i_AiChannelList = 16, .i_NbrAoChannel = 8, .i_AiMaxdata = 0xffff, .i_AoMaxdata = 0x3fff, .i_NbrDiChannel = 4, .i_NbrDoChannel = 4, .i_DoMaxdata = 0x0f, .interrupt = apci3120_interrupt, }, [BOARD_APCI3001] = { .pc_DriverName = "apci3001", .i_NbrAiChannel = 16, .i_NbrAiChannelDiff = 8, .i_AiChannelList = 16, .i_AiMaxdata = 0xfff, .i_NbrDiChannel = 4, .i_NbrDoChannel = 4, .i_DoMaxdata = 0x0f, .interrupt = apci3120_interrupt, }, }; static irqreturn_t v_ADDI_Interrupt(int irq, void *d) { struct comedi_device *dev = d; const struct addi_board *this_board = dev->board_ptr; this_board->interrupt(irq, d); return IRQ_RETVAL(1); } static int apci3120_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct addi_board *this_board = NULL; struct addi_private *devpriv; struct comedi_subdevice *s; int ret, order, i; if (context < ARRAY_SIZE(apci3120_boardtypes)) this_board = &apci3120_boardtypes[context]; if (!this_board) return -ENODEV; dev->board_ptr = this_board; dev->board_name = this_board->pc_DriverName; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; pci_set_master(pcidev); dev->iobase = pci_resource_start(pcidev, 1); devpriv->iobase = dev->iobase; devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0); devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2); devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3); if (pcidev->irq > 0) { ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } /* Allocate DMA buffers */ for (i = 0; i < 2; i++) { for (order = 2; order >= 0; order--) { devpriv->ul_DmaBufferVirtual[i] = dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order, &devpriv->ul_DmaBufferHw[i], GFP_KERNEL); if (devpriv->ul_DmaBufferVirtual[i]) break; } if (!devpriv->ul_DmaBufferVirtual[i]) break; devpriv->ui_DmaBufferSize[i] = PAGE_SIZE << order; } if (devpriv->ul_DmaBufferVirtual[0]) devpriv->us_UseDma = 1; if (devpriv->ul_DmaBufferVirtual[1]) devpriv->b_DmaDoubleBuffer = 1; ret = comedi_alloc_subdevices(dev, 5); if (ret) return ret; /* Allocate and Initialise AI Subdevice Structures */ s = &dev->subdevices[0]; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (this_board->i_NbrAiChannel) s->n_chan = this_board->i_NbrAiChannel; else s->n_chan = this_board->i_NbrAiChannelDiff; s->maxdata = this_board->i_AiMaxdata; s->len_chanlist = this_board->i_AiChannelList; s->range_table = &range_apci3120_ai; s->insn_config = apci3120_ai_insn_config; s->insn_read = apci3120_ai_insn_read; s->do_cmdtest = apci3120_ai_cmdtest; s->do_cmd = apci3120_ai_cmd; s->cancel = apci3120_cancel; /* Allocate and Initialise AO Subdevice Structures */ s = &dev->subdevices[1]; if (this_board->i_NbrAoChannel) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrAoChannel; s->maxdata = this_board->i_AoMaxdata; s->len_chanlist = this_board->i_NbrAoChannel; s->range_table = &range_apci3120_ao; s->insn_write = apci3120_ao_insn_write; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DI Subdevice Structures */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrDiChannel; s->maxdata = 1; s->len_chanlist = this_board->i_NbrDiChannel; s->range_table = &range_digital; s->insn_bits = apci3120_di_insn_bits; /* Allocate and Initialise DO Subdevice Structures */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrDoChannel; s->maxdata = this_board->i_DoMaxdata; s->len_chanlist = this_board->i_NbrDoChannel; s->range_table = &range_digital; s->insn_bits = apci3120_do_insn_bits; /* Allocate and Initialise Timer Subdevice Structures */ s = &dev->subdevices[4]; s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 1; s->maxdata = 0; s->len_chanlist = 1; s->range_table = &range_digital; s->insn_write = apci3120_write_insn_timer; s->insn_read = apci3120_read_insn_timer; s->insn_config = apci3120_config_insn_timer; apci3120_reset(dev); return 0; } static void apci3120_detach(struct comedi_device *dev) { struct addi_private *devpriv = dev->private; if (dev->iobase) apci3120_reset(dev); comedi_pci_detach(dev); if (devpriv) { unsigned int i; for (i = 0; i < 2; i++) { if (devpriv->ul_DmaBufferVirtual[i]) { dma_free_coherent(dev->hw_dev, devpriv->ui_DmaBufferSize[i], devpriv-> ul_DmaBufferVirtual[i], devpriv->ul_DmaBufferHw[i]); } } } } static struct comedi_driver apci3120_driver = { .driver_name = "addi_apci_3120", .module = THIS_MODULE, .auto_attach = apci3120_auto_attach, .detach = apci3120_detach, }; static int apci3120_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data); } static const struct pci_device_id apci3120_pci_table[] = { { PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 }, { PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 }, { 0 } }; MODULE_DEVICE_TABLE(pci, apci3120_pci_table); static struct pci_driver apci3120_pci_driver = { .name = "addi_apci_3120", .id_table = apci3120_pci_table, .probe = apci3120_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(apci3120_driver, apci3120_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("ADDI-DATA APCI-3120, Analog input board"); MODULE_LICENSE("GPL");
gpl-2.0
resin-io/linux
drivers/input/misc/ad714x-spi.c
843
2992
/* * AD714X CapTouch Programmable Controller driver (SPI bus) * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/input.h> /* BUS_SPI */ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/pm.h> #include <linux/types.h> #include "ad714x.h" #define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */ #define AD714x_SPI_READ BIT(10) #ifdef CONFIG_PM_SLEEP static int ad714x_spi_suspend(struct device *dev) { return ad714x_disable(spi_get_drvdata(to_spi_device(dev))); } static int ad714x_spi_resume(struct device *dev) { return ad714x_enable(spi_get_drvdata(to_spi_device(dev))); } #endif static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); static int ad714x_spi_read(struct ad714x_chip *chip, unsigned short reg, unsigned short *data, size_t len) { struct spi_device *spi = to_spi_device(chip->dev); struct spi_message message; struct spi_transfer xfer[2]; int i; int error; spi_message_init(&message); memset(xfer, 0, sizeof(xfer)); chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg); xfer[0].tx_buf = &chip->xfer_buf[0]; xfer[0].len = sizeof(chip->xfer_buf[0]); spi_message_add_tail(&xfer[0], &message); xfer[1].rx_buf = &chip->xfer_buf[1]; xfer[1].len = sizeof(chip->xfer_buf[1]) * len; spi_message_add_tail(&xfer[1], &message); error = spi_sync(spi, &message); if (unlikely(error)) { dev_err(chip->dev, "SPI read error: %d\n", error); return error; } for (i = 0; i < len; i++) data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); return 0; } static int ad714x_spi_write(struct ad714x_chip *chip, unsigned short reg, unsigned short data) { struct spi_device *spi = to_spi_device(chip->dev); int error; chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); chip->xfer_buf[1] = cpu_to_be16(data); error = spi_write(spi, (u8 *)chip->xfer_buf, 2 * sizeof(*chip->xfer_buf)); if (unlikely(error)) { dev_err(chip->dev, "SPI write error: %d\n", error); return error; } return 0; } static int ad714x_spi_probe(struct spi_device *spi) { struct ad714x_chip *chip; int err; spi->bits_per_word = 8; err = spi_setup(spi); if (err < 0) return err; chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, ad714x_spi_read, ad714x_spi_write); if (IS_ERR(chip)) return PTR_ERR(chip); spi_set_drvdata(spi, chip); return 0; } static int ad714x_spi_remove(struct spi_device *spi) { struct ad714x_chip *chip = spi_get_drvdata(spi); ad714x_remove(chip); return 0; } static struct spi_driver ad714x_spi_driver = { .driver = { .name = "ad714x_captouch", .owner = THIS_MODULE, .pm = &ad714x_spi_pm, }, .probe = ad714x_spi_probe, .remove = ad714x_spi_remove, }; module_spi_driver(ad714x_spi_driver); MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver"); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
binkybear/android_kernel_asus_grouper
sound/usb/mixer_quirks.c
1867
18493
/* * USB Audio Driver for ALSA * * Quirks and vendor-specific extensions for mixer interfaces * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/control.h> #include <sound/hwdep.h> #include <sound/info.h> #include "usbaudio.h" #include "mixer.h" #include "mixer_quirks.h" #include "helper.h" extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl; /* * Sound Blaster remote control configuration * * format of remote control data: * Extigy: xx 00 * Audigy 2 NX: 06 80 xx 00 00 00 * Live! 24-bit: 06 80 xx yy 22 83 */ static const struct rc_config { u32 usb_id; u8 offset; u8 length; u8 packet_length; u8 min_packet_length; /* minimum accepted length of the URB result */ u8 mute_mixer_id; u32 mute_code; } rc_configs[] = { { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */ { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */ { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; static void snd_usb_soundblaster_remote_complete(struct urb *urb) { struct usb_mixer_interface *mixer = urb->context; const struct rc_config *rc = mixer->rc_cfg; u32 code; if (urb->status < 0 || urb->actual_length < rc->min_packet_length) return; code = mixer->rc_buffer[rc->offset]; if (rc->length == 2) code |= mixer->rc_buffer[rc->offset + 1] << 8; /* the Mute button actually changes the mixer control */ if (code == rc->mute_code) snd_usb_mixer_notify_id(mixer, rc->mute_mixer_id); mixer->rc_code = code; wmb(); wake_up(&mixer->rc_waitq); } static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf, long count, loff_t *offset) { struct usb_mixer_interface *mixer = hw->private_data; int err; u32 rc_code; if (count != 1 && count != 4) return -EINVAL; err = wait_event_interruptible(mixer->rc_waitq, (rc_code = xchg(&mixer->rc_code, 0)) != 0); if (err == 0) { if (count == 1) err = put_user(rc_code, buf); else err = put_user(rc_code, (u32 __user *)buf); } return err < 0 ? err : count; } static unsigned int snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct usb_mixer_interface *mixer = hw->private_data; poll_wait(file, &mixer->rc_waitq, wait); return mixer->rc_code ? POLLIN | POLLRDNORM : 0; } static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer) { struct snd_hwdep *hwdep; int err, len, i; for (i = 0; i < ARRAY_SIZE(rc_configs); ++i) if (rc_configs[i].usb_id == mixer->chip->usb_id) break; if (i >= ARRAY_SIZE(rc_configs)) return 0; mixer->rc_cfg = &rc_configs[i]; len = mixer->rc_cfg->packet_length; init_waitqueue_head(&mixer->rc_waitq); err = snd_hwdep_new(mixer->chip->card, "SB remote control", 0, &hwdep); if (err < 0) return err; snprintf(hwdep->name, sizeof(hwdep->name), "%s remote control", mixer->chip->card->shortname); hwdep->iface = SNDRV_HWDEP_IFACE_SB_RC; hwdep->private_data = mixer; hwdep->ops.read = snd_usb_sbrc_hwdep_read; hwdep->ops.poll = snd_usb_sbrc_hwdep_poll; hwdep->exclusive = 1; mixer->rc_urb = usb_alloc_urb(0, GFP_KERNEL); if (!mixer->rc_urb) return -ENOMEM; mixer->rc_setup_packet = kmalloc(sizeof(*mixer->rc_setup_packet), GFP_KERNEL); if (!mixer->rc_setup_packet) { usb_free_urb(mixer->rc_urb); mixer->rc_urb = NULL; return -ENOMEM; } mixer->rc_setup_packet->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; mixer->rc_setup_packet->bRequest = UAC_GET_MEM; mixer->rc_setup_packet->wValue = cpu_to_le16(0); mixer->rc_setup_packet->wIndex = cpu_to_le16(0); mixer->rc_setup_packet->wLength = cpu_to_le16(len); usb_fill_control_urb(mixer->rc_urb, mixer->chip->dev, usb_rcvctrlpipe(mixer->chip->dev, 0), (u8*)mixer->rc_setup_packet, mixer->rc_buffer, len, snd_usb_soundblaster_remote_complete, mixer); return 0; } #define snd_audigy2nx_led_info snd_ctl_boolean_mono_info static int snd_audigy2nx_led_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); int index = kcontrol->private_value; ucontrol->value.integer.value[0] = mixer->audigy2nx_leds[index]; return 0; } static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); int index = kcontrol->private_value; int value = ucontrol->value.integer.value[0]; int err, changed; if (value > 1) return -EINVAL; changed = value != mixer->audigy2nx_leds[index]; if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, !value, 0, NULL, 0, 100); /* USB X-Fi S51 Pro */ if (mixer->chip->usb_id == USB_ID(0x041e, 0x30df)) err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, !value, 0, NULL, 0, 100); else err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, value, index + 2, NULL, 0, 100); if (err < 0) return err; mixer->audigy2nx_leds[index] = value; return changed; } static struct snd_kcontrol_new snd_audigy2nx_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "CMSS LED Switch", .info = snd_audigy2nx_led_info, .get = snd_audigy2nx_led_get, .put = snd_audigy2nx_led_put, .private_value = 0, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Power LED Switch", .info = snd_audigy2nx_led_info, .get = snd_audigy2nx_led_get, .put = snd_audigy2nx_led_put, .private_value = 1, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Dolby Digital LED Switch", .info = snd_audigy2nx_led_info, .get = snd_audigy2nx_led_get, .put = snd_audigy2nx_led_put, .private_value = 2, }, }; static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer) { int i, err; for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) { /* USB X-Fi S51 doesn't have a CMSS LED */ if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0) continue; /* USB X-Fi S51 Pro doesn't have one either */ if ((mixer->chip->usb_id == USB_ID(0x041e, 0x30df)) && i == 0) continue; if (i > 1 && /* Live24ext has 2 LEDs only */ (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || mixer->chip->usb_id == USB_ID(0x041e, 0x3042) || mixer->chip->usb_id == USB_ID(0x041e, 0x30df) || mixer->chip->usb_id == USB_ID(0x041e, 0x3048))) break; err = snd_ctl_add(mixer->chip->card, snd_ctl_new1(&snd_audigy2nx_controls[i], mixer)); if (err < 0) return err; } mixer->audigy2nx_leds[1] = 1; /* Power LED is on by default */ return 0; } static void snd_audigy2nx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { static const struct sb_jack { int unitid; const char *name; } jacks_audigy2nx[] = { {4, "dig in "}, {7, "line in"}, {19, "spk out"}, {20, "hph out"}, {-1, NULL} }, jacks_live24ext[] = { {4, "line in"}, /* &1=Line, &2=Mic*/ {3, "hph out"}, /* headphones */ {0, "RC "}, /* last command, 6 bytes see rc_config above */ {-1, NULL} }; const struct sb_jack *jacks; struct usb_mixer_interface *mixer = entry->private_data; int i, err; u8 buf[3]; snd_iprintf(buffer, "%s jacks\n\n", mixer->chip->card->shortname); if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020)) jacks = jacks_audigy2nx; else if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) jacks = jacks_live24ext; else return; for (i = 0; jacks[i].name; ++i) { snd_iprintf(buffer, "%s: ", jacks[i].name); err = snd_usb_ctl_msg(mixer->chip->dev, usb_rcvctrlpipe(mixer->chip->dev, 0), UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, jacks[i].unitid << 8, buf, 3, 100); if (err == 3 && (buf[0] == 3 || buf[0] == 6)) snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]); else snd_iprintf(buffer, "?\n"); } } static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02); return 0; } static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); u8 old_status, new_status; int err, changed; old_status = mixer->xonar_u1_status; if (ucontrol->value.integer.value[0]) new_status = old_status | 0x02; else new_status = old_status & ~0x02; changed = new_status != old_status; err = snd_usb_ctl_msg(mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 50, 0, &new_status, 1, 100); if (err < 0) return err; mixer->xonar_u1_status = new_status; return changed; } static struct snd_kcontrol_new snd_xonar_u1_output_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Playback Switch", .info = snd_ctl_boolean_mono_info, .get = snd_xonar_u1_switch_get, .put = snd_xonar_u1_switch_put, }; static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer) { int err; err = snd_ctl_add(mixer->chip->card, snd_ctl_new1(&snd_xonar_u1_output_switch, mixer)); if (err < 0) return err; mixer->xonar_u1_status = 0x05; return 0; } /* Native Instruments device quirks */ #define _MAKE_NI_CONTROL(bRequest,wIndex) ((bRequest) << 16 | (wIndex)) static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); struct usb_device *dev = mixer->chip->dev; u8 bRequest = (kcontrol->private_value >> 16) & 0xff; u16 wIndex = kcontrol->private_value & 0xffff; u8 tmp; int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, cpu_to_le16(wIndex), &tmp, sizeof(tmp), 1000); if (ret < 0) { snd_printk(KERN_ERR "unable to issue vendor read request (ret = %d)", ret); return ret; } ucontrol->value.integer.value[0] = tmp; return 0; } static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol); struct usb_device *dev = mixer->chip->dev; u8 bRequest = (kcontrol->private_value >> 16) & 0xff; u16 wIndex = kcontrol->private_value & 0xffff; u16 wValue = ucontrol->value.integer.value[0]; int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, cpu_to_le16(wValue), cpu_to_le16(wIndex), NULL, 0, 1000); if (ret < 0) { snd_printk(KERN_ERR "unable to issue vendor write request (ret = %d)", ret); return ret; } return 0; } static struct snd_kcontrol_new snd_nativeinstruments_ta6_mixers[] = { { .name = "Direct Thru Channel A", .private_value = _MAKE_NI_CONTROL(0x01, 0x03), }, { .name = "Direct Thru Channel B", .private_value = _MAKE_NI_CONTROL(0x01, 0x05), }, { .name = "Phono Input Channel A", .private_value = _MAKE_NI_CONTROL(0x02, 0x03), }, { .name = "Phono Input Channel B", .private_value = _MAKE_NI_CONTROL(0x02, 0x05), }, }; static struct snd_kcontrol_new snd_nativeinstruments_ta10_mixers[] = { { .name = "Direct Thru Channel A", .private_value = _MAKE_NI_CONTROL(0x01, 0x03), }, { .name = "Direct Thru Channel B", .private_value = _MAKE_NI_CONTROL(0x01, 0x05), }, { .name = "Direct Thru Channel C", .private_value = _MAKE_NI_CONTROL(0x01, 0x07), }, { .name = "Direct Thru Channel D", .private_value = _MAKE_NI_CONTROL(0x01, 0x09), }, { .name = "Phono Input Channel A", .private_value = _MAKE_NI_CONTROL(0x02, 0x03), }, { .name = "Phono Input Channel B", .private_value = _MAKE_NI_CONTROL(0x02, 0x05), }, { .name = "Phono Input Channel C", .private_value = _MAKE_NI_CONTROL(0x02, 0x07), }, { .name = "Phono Input Channel D", .private_value = _MAKE_NI_CONTROL(0x02, 0x09), }, }; static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer, const struct snd_kcontrol_new *kc, unsigned int count) { int i, err = 0; struct snd_kcontrol_new template = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .get = snd_nativeinstruments_control_get, .put = snd_nativeinstruments_control_put, .info = snd_ctl_boolean_mono_info, }; for (i = 0; i < count; i++) { struct snd_kcontrol *c; template.name = kc[i].name; template.private_value = kc[i].private_value; c = snd_ctl_new1(&template, mixer); err = snd_ctl_add(mixer->chip->card, c); if (err < 0) break; } return err; } /* M-Audio FastTrack Ultra quirks */ /* private_free callback */ static void usb_mixer_elem_free(struct snd_kcontrol *kctl) { kfree(kctl->private_data); kctl->private_data = NULL; } static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer, int in, int out, const char *name) { struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; cval->id = 5; cval->mixer = mixer; cval->val_type = USB_MIXER_S16; cval->channels = 1; cval->control = out + 1; cval->cmask = 1 << in; kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval); if (!kctl) { kfree(cval); return -ENOMEM; } snprintf(kctl->id.name, sizeof(kctl->id.name), name); kctl->private_free = usb_mixer_elem_free; return snd_usb_mixer_add_control(mixer, kctl); } static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer) { char name[64]; int in, out, err; for (out = 0; out < 8; out++) { for (in = 0; in < 8; in++) { snprintf(name, sizeof(name), "AIn%d - Out%d Capture Volume", in + 1, out + 1); err = snd_maudio_ftu_create_ctl(mixer, in, out, name); if (err < 0) return err; } for (in = 8; in < 16; in++) { snprintf(name, sizeof(name), "DIn%d - Out%d Playback Volume", in - 7, out + 1); err = snd_maudio_ftu_create_ctl(mixer, in, out, name); if (err < 0) return err; } } return 0; } void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, unsigned char samplerate_id) { struct usb_mixer_interface *mixer; struct usb_mixer_elem_info *cval; int unitid = 12; /* SamleRate ExtensionUnit ID */ list_for_each_entry(mixer, &chip->mixer_list, list) { cval = mixer->id_elems[unitid]; if (cval) { snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, cval->control << 8, samplerate_id); snd_usb_mixer_notify_id(mixer, unitid); } break; } } int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) { int err = 0; struct snd_info_entry *entry; if ((err = snd_usb_soundblaster_remote_init(mixer)) < 0) return err; switch (mixer->chip->usb_id) { case USB_ID(0x041e, 0x3020): case USB_ID(0x041e, 0x3040): case USB_ID(0x041e, 0x3042): case USB_ID(0x041e, 0x30df): case USB_ID(0x041e, 0x3048): err = snd_audigy2nx_controls_create(mixer); if (err < 0) break; if (!snd_card_proc_new(mixer->chip->card, "audigy2nx", &entry)) snd_info_set_text_ops(entry, mixer, snd_audigy2nx_proc_read); break; case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */ case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */ err = snd_maudio_ftu_create_mixer(mixer); break; case USB_ID(0x0b05, 0x1739): case USB_ID(0x0b05, 0x1743): err = snd_xonar_u1_controls_create(mixer); break; case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */ err = snd_nativeinstruments_create_mixer(mixer, snd_nativeinstruments_ta6_mixers, ARRAY_SIZE(snd_nativeinstruments_ta6_mixers)); break; case USB_ID(0x17cc, 0x1021): /* Traktor Audio 10 */ err = snd_nativeinstruments_create_mixer(mixer, snd_nativeinstruments_ta10_mixers, ARRAY_SIZE(snd_nativeinstruments_ta10_mixers)); break; } return err; } void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, int unitid) { if (!mixer->rc_cfg) return; /* unit ids specific to Extigy/Audigy 2 NX: */ switch (unitid) { case 0: /* remote control */ mixer->rc_urb->dev = mixer->chip->dev; usb_submit_urb(mixer->rc_urb, GFP_ATOMIC); break; case 4: /* digital in jack */ case 7: /* line in jacks */ case 19: /* speaker out jacks */ case 20: /* headphones out jack */ break; /* live24ext: 4 = line-in jack */ case 3: /* hp-out jack (may actuate Mute) */ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) snd_usb_mixer_notify_id(mixer, mixer->rc_cfg->mute_mixer_id); break; default: snd_printd(KERN_DEBUG "memory change in unknown unit %d\n", unitid); break; } }
gpl-2.0
turtlepa/android_kernel_samsung_aries-galaxys4gmtd
drivers/staging/usbip/userspace/libsrc/vhci_driver.c
2379
10555
/* * Copyright (C) 2005-2007 Takahiro Hirofuchi */ #include "usbip.h" static const char vhci_driver_name[] = "vhci_hcd"; struct usbip_vhci_driver *vhci_driver; static struct usbip_imported_device *imported_device_init(struct usbip_imported_device *idev, char *busid) { struct sysfs_device *sudev; sudev = sysfs_open_device("usb", busid); if (!sudev) { err("sysfs_open_device %s", busid); goto err; } read_usb_device(sudev, &idev->udev); sysfs_close_device(sudev); /* add class devices of this imported device */ struct class_device *cdev; dlist_for_each_data(vhci_driver->cdev_list, cdev, struct class_device) { if (!strncmp(cdev->devpath, idev->udev.path, strlen(idev->udev.path))) { struct class_device *new_cdev; /* alloc and copy because dlist is linked from only one list */ new_cdev = calloc(1, sizeof(*new_cdev)); if (!new_cdev) goto err; memcpy(new_cdev, cdev, sizeof(*new_cdev)); dlist_unshift(idev->cdev_list, (void*) new_cdev); } } return idev; err: return NULL; } static int parse_status(char *value) { int ret = 0; char *c; for (int i = 0; i < vhci_driver->nports; i++) bzero(&vhci_driver->idev[i], sizeof(struct usbip_imported_device)); /* skip a header line */ c = strchr(value, '\n') + 1; while (*c != '\0') { int port, status, speed, devid; unsigned long socket; char lbusid[SYSFS_BUS_ID_SIZE]; ret = sscanf(c, "%d %d %d %x %lx %s\n", &port, &status, &speed, &devid, &socket, lbusid); if (ret < 5) { err("scanf %d", ret); BUG(); } dbg("port %d status %d speed %d devid %x", port, status, speed, devid); dbg("socket %lx lbusid %s", socket, lbusid); /* if a device is connected, look at it */ { struct usbip_imported_device *idev = &vhci_driver->idev[port]; idev->port = port; idev->status = status; idev->devid = devid; idev->busnum = (devid >> 16); idev->devnum = (devid & 0x0000ffff); idev->cdev_list = dlist_new(sizeof(struct class_device)); if (!idev->cdev_list) { err("init new device"); return -1; } if (idev->status != VDEV_ST_NULL && idev->status != VDEV_ST_NOTASSIGNED) { idev = imported_device_init(idev, lbusid); if (!idev) { err("init new device"); return -1; } } } /* go to the next line */ c = strchr(c, '\n') + 1; } dbg("exit"); return 0; } static int check_usbip_device(struct sysfs_class_device *cdev) { char clspath[SYSFS_PATH_MAX]; /* /sys/class/video4linux/video0/device */ char devpath[SYSFS_PATH_MAX]; /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */ int ret; snprintf(clspath, sizeof(clspath), "%s/device", cdev->path); ret = sysfs_get_link(clspath, devpath, SYSFS_PATH_MAX); if (!ret) { if (!strncmp(devpath, vhci_driver->hc_device->path, strlen(vhci_driver->hc_device->path))) { /* found usbip device */ struct class_device *cdev; cdev = calloc(1, sizeof(*cdev)); if (!cdev) { err("calloc cdev"); return -1; } dlist_unshift(vhci_driver->cdev_list, (void*) cdev); strncpy(cdev->clspath, clspath, sizeof(cdev->clspath)); strncpy(cdev->devpath, devpath, sizeof(cdev->clspath)); dbg(" found %s %s", clspath, devpath); } } return 0; } static int search_class_for_usbip_device(char *cname) { struct sysfs_class *class; struct dlist *cdev_list; struct sysfs_class_device *cdev; int ret = 0; class = sysfs_open_class(cname); if (!class) { err("open class"); return -1; } dbg("class %s", class->name); cdev_list = sysfs_get_class_devices(class); if (!cdev_list) /* nothing */ goto out; dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) { dbg(" cdev %s", cdev->name); ret = check_usbip_device(cdev); if (ret < 0) goto out; } out: sysfs_close_class(class); return ret; } static int refresh_class_device_list(void) { int ret; struct dlist *cname_list; char *cname; /* search under /sys/class */ cname_list = sysfs_open_directory_list("/sys/class"); if (!cname_list) { err("open class directory"); return -1; } dlist_for_each_data(cname_list, cname, char) { ret = search_class_for_usbip_device(cname); if (ret < 0) { sysfs_close_list(cname_list); return -1; } } sysfs_close_list(cname_list); /* seach under /sys/block */ ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME); if (ret < 0) return -1; return 0; } static int refresh_imported_device_list(void) { struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { err("get attr %s of %s", "status", vhci_driver->hc_device->name); return -1; } dbg("name %s, path %s, len %d, method %d\n", attr_status->name, attr_status->path, attr_status->len, attr_status->method); dbg("%s", attr_status->value); return parse_status(attr_status->value); } static int get_nports(void) { int nports = 0; struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { err("get attr %s of %s", "status", vhci_driver->hc_device->name); return -1; } dbg("name %s, path %s, len %d, method %d\n", attr_status->name, attr_status->path, attr_status->len, attr_status->method); dbg("%s", attr_status->value); { char *c; /* skip a header line */ c = strchr(attr_status->value, '\n') + 1; while (*c != '\0') { /* go to the next line */ c = strchr(c, '\n') + 1; nports += 1; } } return nports; } static int get_hc_busid(char *sysfs_mntpath, char *hc_busid) { struct sysfs_driver *sdriver; char sdriver_path[SYSFS_PATH_MAX]; struct sysfs_device *hc_dev; struct dlist *hc_devs; int found = 0; snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/platform/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, SYSFS_DRIVERS_NAME, vhci_driver_name); sdriver = sysfs_open_driver_path(sdriver_path); if (!sdriver) { info("%s is not found", sdriver_path); info("load usbip-core.ko and vhci-hcd.ko !"); return -1; } hc_devs = sysfs_get_driver_devices(sdriver); if (!hc_devs) { err("get hc list"); goto err; } /* assume only one vhci_hcd */ dlist_for_each_data(hc_devs, hc_dev, struct sysfs_device) { strncpy(hc_busid, hc_dev->bus_id, SYSFS_BUS_ID_SIZE); found = 1; } err: sysfs_close_driver(sdriver); if (found) return 0; err("not found usbip hc"); return -1; } /* ---------------------------------------------------------------------- */ int usbip_vhci_driver_open(void) { int ret; char hc_busid[SYSFS_BUS_ID_SIZE]; vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver)); if (!vhci_driver) { err("alloc vhci_driver"); return -1; } ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX); if (ret < 0) { err("sysfs must be mounted"); goto err; } ret = get_hc_busid(vhci_driver->sysfs_mntpath, hc_busid); if (ret < 0) goto err; /* will be freed in usbip_driver_close() */ vhci_driver->hc_device = sysfs_open_device("platform", hc_busid); if (!vhci_driver->hc_device) { err("get sysfs vhci_driver"); goto err; } vhci_driver->nports = get_nports(); info("%d ports available\n", vhci_driver->nports); vhci_driver->cdev_list = dlist_new(sizeof(struct class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); if (vhci_driver) free(vhci_driver); vhci_driver = NULL; return -1; } void usbip_vhci_driver_close() { if (!vhci_driver) return; if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); free(vhci_driver); vhci_driver = NULL; } int usbip_vhci_refresh_device_list(void) { if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } vhci_driver->cdev_list = dlist_new(sizeof(struct class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } err("refresh device list"); return -1; } int usbip_vhci_get_free_port(void) { for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].status == VDEV_ST_NULL) return i; } return -1; } int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, uint32_t speed) { struct sysfs_attribute *attr_attach; char buff[200]; /* what size should be ? */ int ret; attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach"); if (!attr_attach) { err("get attach"); return -1; } snprintf(buff, sizeof(buff), "%u %u %u %u", port, sockfd, devid, speed); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_attach, buff, strlen(buff)); if (ret < 0) { err("write to attach failed"); return -1; } info("port %d attached", port); return 0; } static unsigned long get_devid(uint8_t busnum, uint8_t devnum) { return (busnum << 16) | devnum; } /* will be removed */ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum, uint8_t devnum, uint32_t speed) { int devid = get_devid(busnum, devnum); return usbip_vhci_attach_device2(port, sockfd, devid, speed); } int usbip_vhci_detach_device(uint8_t port) { struct sysfs_attribute *attr_detach; char buff[200]; /* what size should be ? */ int ret; attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach"); if (!attr_detach) { err("get detach"); return -1; } snprintf(buff, sizeof(buff), "%u", port); dbg("writing to detach"); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_detach, buff, strlen(buff)); if (ret < 0) { err("write to detach failed"); return -1; } info("port %d detached", port); return 0; }
gpl-2.0
Split-Screen/android_kernel_motorola_omap4-common
drivers/staging/usbip/userspace/libsrc/vhci_driver.c
2379
10555
/* * Copyright (C) 2005-2007 Takahiro Hirofuchi */ #include "usbip.h" static const char vhci_driver_name[] = "vhci_hcd"; struct usbip_vhci_driver *vhci_driver; static struct usbip_imported_device *imported_device_init(struct usbip_imported_device *idev, char *busid) { struct sysfs_device *sudev; sudev = sysfs_open_device("usb", busid); if (!sudev) { err("sysfs_open_device %s", busid); goto err; } read_usb_device(sudev, &idev->udev); sysfs_close_device(sudev); /* add class devices of this imported device */ struct class_device *cdev; dlist_for_each_data(vhci_driver->cdev_list, cdev, struct class_device) { if (!strncmp(cdev->devpath, idev->udev.path, strlen(idev->udev.path))) { struct class_device *new_cdev; /* alloc and copy because dlist is linked from only one list */ new_cdev = calloc(1, sizeof(*new_cdev)); if (!new_cdev) goto err; memcpy(new_cdev, cdev, sizeof(*new_cdev)); dlist_unshift(idev->cdev_list, (void*) new_cdev); } } return idev; err: return NULL; } static int parse_status(char *value) { int ret = 0; char *c; for (int i = 0; i < vhci_driver->nports; i++) bzero(&vhci_driver->idev[i], sizeof(struct usbip_imported_device)); /* skip a header line */ c = strchr(value, '\n') + 1; while (*c != '\0') { int port, status, speed, devid; unsigned long socket; char lbusid[SYSFS_BUS_ID_SIZE]; ret = sscanf(c, "%d %d %d %x %lx %s\n", &port, &status, &speed, &devid, &socket, lbusid); if (ret < 5) { err("scanf %d", ret); BUG(); } dbg("port %d status %d speed %d devid %x", port, status, speed, devid); dbg("socket %lx lbusid %s", socket, lbusid); /* if a device is connected, look at it */ { struct usbip_imported_device *idev = &vhci_driver->idev[port]; idev->port = port; idev->status = status; idev->devid = devid; idev->busnum = (devid >> 16); idev->devnum = (devid & 0x0000ffff); idev->cdev_list = dlist_new(sizeof(struct class_device)); if (!idev->cdev_list) { err("init new device"); return -1; } if (idev->status != VDEV_ST_NULL && idev->status != VDEV_ST_NOTASSIGNED) { idev = imported_device_init(idev, lbusid); if (!idev) { err("init new device"); return -1; } } } /* go to the next line */ c = strchr(c, '\n') + 1; } dbg("exit"); return 0; } static int check_usbip_device(struct sysfs_class_device *cdev) { char clspath[SYSFS_PATH_MAX]; /* /sys/class/video4linux/video0/device */ char devpath[SYSFS_PATH_MAX]; /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */ int ret; snprintf(clspath, sizeof(clspath), "%s/device", cdev->path); ret = sysfs_get_link(clspath, devpath, SYSFS_PATH_MAX); if (!ret) { if (!strncmp(devpath, vhci_driver->hc_device->path, strlen(vhci_driver->hc_device->path))) { /* found usbip device */ struct class_device *cdev; cdev = calloc(1, sizeof(*cdev)); if (!cdev) { err("calloc cdev"); return -1; } dlist_unshift(vhci_driver->cdev_list, (void*) cdev); strncpy(cdev->clspath, clspath, sizeof(cdev->clspath)); strncpy(cdev->devpath, devpath, sizeof(cdev->clspath)); dbg(" found %s %s", clspath, devpath); } } return 0; } static int search_class_for_usbip_device(char *cname) { struct sysfs_class *class; struct dlist *cdev_list; struct sysfs_class_device *cdev; int ret = 0; class = sysfs_open_class(cname); if (!class) { err("open class"); return -1; } dbg("class %s", class->name); cdev_list = sysfs_get_class_devices(class); if (!cdev_list) /* nothing */ goto out; dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) { dbg(" cdev %s", cdev->name); ret = check_usbip_device(cdev); if (ret < 0) goto out; } out: sysfs_close_class(class); return ret; } static int refresh_class_device_list(void) { int ret; struct dlist *cname_list; char *cname; /* search under /sys/class */ cname_list = sysfs_open_directory_list("/sys/class"); if (!cname_list) { err("open class directory"); return -1; } dlist_for_each_data(cname_list, cname, char) { ret = search_class_for_usbip_device(cname); if (ret < 0) { sysfs_close_list(cname_list); return -1; } } sysfs_close_list(cname_list); /* seach under /sys/block */ ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME); if (ret < 0) return -1; return 0; } static int refresh_imported_device_list(void) { struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { err("get attr %s of %s", "status", vhci_driver->hc_device->name); return -1; } dbg("name %s, path %s, len %d, method %d\n", attr_status->name, attr_status->path, attr_status->len, attr_status->method); dbg("%s", attr_status->value); return parse_status(attr_status->value); } static int get_nports(void) { int nports = 0; struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { err("get attr %s of %s", "status", vhci_driver->hc_device->name); return -1; } dbg("name %s, path %s, len %d, method %d\n", attr_status->name, attr_status->path, attr_status->len, attr_status->method); dbg("%s", attr_status->value); { char *c; /* skip a header line */ c = strchr(attr_status->value, '\n') + 1; while (*c != '\0') { /* go to the next line */ c = strchr(c, '\n') + 1; nports += 1; } } return nports; } static int get_hc_busid(char *sysfs_mntpath, char *hc_busid) { struct sysfs_driver *sdriver; char sdriver_path[SYSFS_PATH_MAX]; struct sysfs_device *hc_dev; struct dlist *hc_devs; int found = 0; snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/platform/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, SYSFS_DRIVERS_NAME, vhci_driver_name); sdriver = sysfs_open_driver_path(sdriver_path); if (!sdriver) { info("%s is not found", sdriver_path); info("load usbip-core.ko and vhci-hcd.ko !"); return -1; } hc_devs = sysfs_get_driver_devices(sdriver); if (!hc_devs) { err("get hc list"); goto err; } /* assume only one vhci_hcd */ dlist_for_each_data(hc_devs, hc_dev, struct sysfs_device) { strncpy(hc_busid, hc_dev->bus_id, SYSFS_BUS_ID_SIZE); found = 1; } err: sysfs_close_driver(sdriver); if (found) return 0; err("not found usbip hc"); return -1; } /* ---------------------------------------------------------------------- */ int usbip_vhci_driver_open(void) { int ret; char hc_busid[SYSFS_BUS_ID_SIZE]; vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver)); if (!vhci_driver) { err("alloc vhci_driver"); return -1; } ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX); if (ret < 0) { err("sysfs must be mounted"); goto err; } ret = get_hc_busid(vhci_driver->sysfs_mntpath, hc_busid); if (ret < 0) goto err; /* will be freed in usbip_driver_close() */ vhci_driver->hc_device = sysfs_open_device("platform", hc_busid); if (!vhci_driver->hc_device) { err("get sysfs vhci_driver"); goto err; } vhci_driver->nports = get_nports(); info("%d ports available\n", vhci_driver->nports); vhci_driver->cdev_list = dlist_new(sizeof(struct class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); if (vhci_driver) free(vhci_driver); vhci_driver = NULL; return -1; } void usbip_vhci_driver_close() { if (!vhci_driver) return; if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); free(vhci_driver); vhci_driver = NULL; } int usbip_vhci_refresh_device_list(void) { if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } vhci_driver->cdev_list = dlist_new(sizeof(struct class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } err("refresh device list"); return -1; } int usbip_vhci_get_free_port(void) { for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].status == VDEV_ST_NULL) return i; } return -1; } int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, uint32_t speed) { struct sysfs_attribute *attr_attach; char buff[200]; /* what size should be ? */ int ret; attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach"); if (!attr_attach) { err("get attach"); return -1; } snprintf(buff, sizeof(buff), "%u %u %u %u", port, sockfd, devid, speed); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_attach, buff, strlen(buff)); if (ret < 0) { err("write to attach failed"); return -1; } info("port %d attached", port); return 0; } static unsigned long get_devid(uint8_t busnum, uint8_t devnum) { return (busnum << 16) | devnum; } /* will be removed */ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum, uint8_t devnum, uint32_t speed) { int devid = get_devid(busnum, devnum); return usbip_vhci_attach_device2(port, sockfd, devid, speed); } int usbip_vhci_detach_device(uint8_t port) { struct sysfs_attribute *attr_detach; char buff[200]; /* what size should be ? */ int ret; attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach"); if (!attr_detach) { err("get detach"); return -1; } snprintf(buff, sizeof(buff), "%u", port); dbg("writing to detach"); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_detach, buff, strlen(buff)); if (ret < 0) { err("write to detach failed"); return -1; } info("port %d detached", port); return 0; }
gpl-2.0
TeamGlide/android_kernel_htc_msm7x30
drivers/staging/octeon/ethernet-mdio.c
2379
4719
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-mdio.h" #include "ethernet-util.h" #include "cvmx-helper-board.h" #include "cvmx-smix-defs.h" static void cvm_oct_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "cavium-ethernet"); strcpy(info->version, OCTEON_ETHERNET_VERSION); strcpy(info->bus_info, "Builtin"); } static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct octeon_ethernet *priv = netdev_priv(dev); if (priv->phydev) return phy_ethtool_gset(priv->phydev, cmd); return -EINVAL; } static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct octeon_ethernet *priv = netdev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (priv->phydev) return phy_ethtool_sset(priv->phydev, cmd); return -EINVAL; } static int cvm_oct_nway_reset(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (priv->phydev) return phy_start_aneg(priv->phydev); return -EINVAL; } const struct ethtool_ops cvm_oct_ethtool_ops = { .get_drvinfo = cvm_oct_get_drvinfo, .get_settings = cvm_oct_get_settings, .set_settings = cvm_oct_set_settings, .nway_reset = cvm_oct_nway_reset, .get_link = ethtool_op_get_link, }; /** * cvm_oct_ioctl - IOCTL support for PHY control * @dev: Device to change * @rq: the request * @cmd: the command * * Returns Zero on success */ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct octeon_ethernet *priv = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; if (!priv->phydev) return -EINVAL; return phy_mii_ioctl(priv->phydev, rq, cmd); } static void cvm_oct_adjust_link(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; if (priv->last_link != priv->phydev->link) { priv->last_link = priv->phydev->link; link_info.u64 = 0; link_info.s.link_up = priv->last_link ? 1 : 0; link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0; link_info.s.speed = priv->phydev->speed; cvmx_helper_link_set( priv->port, link_info); if (priv->last_link) { netif_carrier_on(dev); if (priv->queue != -1) DEBUGPRINT("%s: %u Mbps %s duplex, " "port %2d, queue %2d\n", dev->name, priv->phydev->speed, priv->phydev->duplex ? "Full" : "Half", priv->port, priv->queue); else DEBUGPRINT("%s: %u Mbps %s duplex, " "port %2d, POW\n", dev->name, priv->phydev->speed, priv->phydev->duplex ? "Full" : "Half", priv->port); } else { netif_carrier_off(dev); DEBUGPRINT("%s: Link down\n", dev->name); } } } /** * cvm_oct_phy_setup_device - setup the PHY * * @dev: Device to setup * * Returns Zero on success, negative on failure */ int cvm_oct_phy_setup_device(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); int phy_addr = cvmx_helper_board_get_mii_address(priv->port); if (phy_addr != -1) { char phy_id[20]; snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr); priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(priv->phydev)) { priv->phydev = NULL; return -1; } priv->last_link = 0; phy_start_aneg(priv->phydev); } return 0; }
gpl-2.0
koxda/android_kernel_samsung_msm8660-common
drivers/scsi/bfa/bfad.c
2379
40438
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfad.c Linux driver PCI interface module. */ #include <linux/module.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/firmware.h> #include <asm/uaccess.h> #include <asm/fcntl.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_defs.h" #include "bfa.h" BFA_TRC_FILE(LDRV, BFAD); DEFINE_MUTEX(bfad_mutex); LIST_HEAD(bfad_list); static int bfad_inst; static int num_sgpgs_parm; int supported_fc4s; char *host_name, *os_name, *os_patch; int num_rports, num_ios, num_tms; int num_fcxps, num_ufbufs; int reqq_size, rspq_size, num_sgpgs; int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; int bfa_io_max_sge = BFAD_IO_MAX_SGE; int bfa_log_level = 3; /* WARNING log level */ int ioc_auto_recover = BFA_TRUE; int bfa_linkup_delay = -1; int fdmi_enable = BFA_TRUE; int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; /* Firmware releated */ u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; #define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" #define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" #define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name); static const char *msix_name_ct[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "ctrl" }; static const char *msix_name_cb[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "eemc", "elpu0", "elpu1", "epss", "mlpu" }; MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); module_param(os_patch, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); module_param(host_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); module_param(num_rports, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " "(physical/logical), default=1024"); module_param(num_ios, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); module_param(num_tms, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); module_param(num_fcxps, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " "buffers, default=64"); module_param(reqq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " "default=256"); module_param(rspq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " "default=64"); module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " "Range[>0]"); module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " "Range[Critical:1|Error:2|Warning:3|Info:4]"); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " "Range[off:0|on:1]"); module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " "boot port. Otherwise 10 secs in RHEL4 & 0 for " "[RHEL5, SLES10, ESX40] Range[>0]"); module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " "for Brocade-415/425/815/825 cards, default=0, " " Range[false:0|true:1]"); module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " "if possible for Brocade-1010/1020/804/1007/902/1741 " "cards, default=0, Range[false:0|true:1]"); module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " "Range[false:0|true:1]"); module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " "(use system setting), Range[128|256|512|1024|2048|4096]"); module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); /* * Beginning state for the driver instance, awaiting the pci_probe event */ static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_CREATE: bfa_sm_set_state(bfad, bfad_sm_created); bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", "bfad_worker"); if (IS_ERR(bfad->bfad_tsk)) { printk(KERN_INFO "bfad[%d]: Kernel thread " "creation failed!\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); } bfa_sm_send_event(bfad, BFAD_E_INIT); break; case BFAD_E_STOP: /* Ignore stop; already in uninit */ break; default: bfa_sm_fault(bfad, event); } } /* * Driver Instance is created, awaiting event INIT to initialize the bfad */ static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { unsigned long flags; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT: bfa_sm_set_state(bfad, bfad_sm_initializing); init_completion(&bfad->comp); /* Enable Interrupt and wait bfa_init completion */ if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); break; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Set up interrupt handler for each vectors */ if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) { printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", __func__, bfad->inst_no); } bfad_init_timer(bfad); wait_for_completion(&bfad->comp); if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); } else { printk(KERN_WARNING "bfa %s: bfa init failed\n", bfad->pci_name); bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); } break; case BFAD_E_KTHREAD_CREATE_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; unsigned long flags; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) break; bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_INTR_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; case BFAD_E_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_failed); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) break; bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_STOP: if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) bfad_uncfg_pport(bfad); if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; } bfad_stop(bfad); break; case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_STOP: bfa_sm_set_state(bfad, bfad_sm_fcs_exit); bfad_fcs_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_FCS_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_stopping); bfad_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); break; default: bfa_sm_fault(bfad, event); break; } } /* * BFA callbacks */ void bfad_hcb_comp(void *arg, bfa_status_t status) { struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; fcomp->status = status; complete(&fcomp->comp); } /* * bfa_init callback */ void bfa_cb_init(void *drv, bfa_status_t init_status) { struct bfad_s *bfad = drv; if (init_status == BFA_STATUS_OK) { bfad->bfad_flags |= BFAD_HAL_INIT_DONE; /* * If BFAD_HAL_INIT_FAIL flag is set: * Wake up the kernel thread to start * the bfad operations after HAL init done */ if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; wake_up_process(bfad->bfad_tsk); } } complete(&bfad->comp); } /* * BFA_FCS callbacks */ struct bfad_port_s * bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { bfa_status_t rc; struct bfad_port_s *port_drv; if (!vp_drv && !vf_drv) { port_drv = &bfad->pport; port_drv->pvb_type = BFAD_PORT_PHYS_BASE; } else if (!vp_drv && vf_drv) { port_drv = &vf_drv->base_port; port_drv->pvb_type = BFAD_PORT_VF_BASE; } else if (vp_drv && !vf_drv) { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; } else { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_VF_VPORT; } port_drv->fcs_port = port; port_drv->roles = roles; if (roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_port_new(bfad, port_drv); if (rc != BFA_STATUS_OK) { bfad_im_port_delete(bfad, port_drv); port_drv = NULL; } } return port_drv; } void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { struct bfad_port_s *port_drv; /* this will be only called from rmmod context */ if (vp_drv && !vp_drv->comp_del) { port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); bfa_trc(bfad, roles); if (roles & BFA_LPORT_ROLE_FCP_IM) bfad_im_port_delete(bfad, port_drv); } } /* * FCS RPORT alloc callback, after successful PLOGI by FCS */ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, struct bfad_rport_s **rport_drv) { bfa_status_t rc = BFA_STATUS_OK; *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); if (*rport_drv == NULL) { rc = BFA_STATUS_ENOMEM; goto ext; } *rport = &(*rport_drv)->fcs_rport; ext: return rc; } /* * FCS PBC VPORT Create */ void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) { struct bfa_lport_cfg_s port_cfg = {0}; struct bfad_vport_s *vport; int rc; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { bfa_trc(bfad, 0); return; } vport->drv_port.bfad = bfad; port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; port_cfg.pwwn = pbc_vport.vp_pwwn; port_cfg.nwwn = pbc_vport.vp_nwwn; port_cfg.preboot_vp = BFA_TRUE; rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, &port_cfg, vport); if (rc != BFA_STATUS_OK) { bfa_trc(bfad, 0); return; } list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); } void bfad_hal_mem_release(struct bfad_s *bfad) { int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_elem_s *meminfo_elem; for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { meminfo_elem = &hal_meminfo->meminfo[i]; if (meminfo_elem->kva != NULL) { switch (meminfo_elem->mem_type) { case BFA_MEM_TYPE_KVA: vfree(meminfo_elem->kva); break; case BFA_MEM_TYPE_DMA: dma_free_coherent(&bfad->pcidev->dev, meminfo_elem->mem_len, meminfo_elem->kva, (dma_addr_t) meminfo_elem->dma); break; default: WARN_ON(1); break; } } } memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); } void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) { if (num_rports > 0) bfa_cfg->fwcfg.num_rports = num_rports; if (num_ios > 0) bfa_cfg->fwcfg.num_ioim_reqs = num_ios; if (num_tms > 0) bfa_cfg->fwcfg.num_tskim_reqs = num_tms; if (num_fcxps > 0) bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; if (num_ufbufs > 0) bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; if (reqq_size > 0) bfa_cfg->drvcfg.num_reqq_elems = reqq_size; if (rspq_size > 0) bfa_cfg->drvcfg.num_rspq_elems = rspq_size; if (num_sgpgs > 0) bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; /* * populate the hal values back to the driver for sysfs use. * otherwise, the default values will be shown as 0 in sysfs */ num_rports = bfa_cfg->fwcfg.num_rports; num_ios = bfa_cfg->fwcfg.num_ioim_reqs; num_tms = bfa_cfg->fwcfg.num_tskim_reqs; num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; reqq_size = bfa_cfg->drvcfg.num_reqq_elems; rspq_size = bfa_cfg->drvcfg.num_rspq_elems; num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; } bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_elem_s *meminfo_elem; dma_addr_t phys_addr; void *kva; bfa_status_t rc = BFA_STATUS_OK; int retry_count = 0; int reset_value = 1; int min_num_sgpgs = 512; bfa_cfg_get_default(&bfad->ioc_cfg); retry: bfad_update_hal_cfg(&bfad->ioc_cfg); bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { meminfo_elem = &hal_meminfo->meminfo[i]; switch (meminfo_elem->mem_type) { case BFA_MEM_TYPE_KVA: kva = vmalloc(meminfo_elem->mem_len); if (kva == NULL) { bfad_hal_mem_release(bfad); rc = BFA_STATUS_ENOMEM; goto ext; } memset(kva, 0, meminfo_elem->mem_len); meminfo_elem->kva = kva; break; case BFA_MEM_TYPE_DMA: kva = dma_alloc_coherent(&bfad->pcidev->dev, meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); if (kva == NULL) { bfad_hal_mem_release(bfad); /* * If we cannot allocate with default * num_sgpages try with half the value. */ if (num_sgpgs > min_num_sgpgs) { printk(KERN_INFO "bfad[%d]: memory allocation failed" " with num_sgpgs: %d\n", bfad->inst_no, num_sgpgs); nextLowerInt(&num_sgpgs); printk(KERN_INFO "bfad[%d]: trying to allocate memory" " with num_sgpgs: %d\n", bfad->inst_no, num_sgpgs); retry_count++; goto retry; } else { if (num_sgpgs_parm > 0) num_sgpgs = num_sgpgs_parm; else { reset_value = (1 << retry_count); num_sgpgs *= reset_value; } rc = BFA_STATUS_ENOMEM; goto ext; } } if (num_sgpgs_parm > 0) num_sgpgs = num_sgpgs_parm; else { reset_value = (1 << retry_count); num_sgpgs *= reset_value; } memset(kva, 0, meminfo_elem->mem_len); meminfo_elem->kva = kva; meminfo_elem->dma = phys_addr; break; default: break; } } ext: return rc; } /* * Create a vport under a vf. */ bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct device *dev) { struct bfad_vport_s *vport; int rc = BFA_STATUS_OK; unsigned long flags; struct completion fcomp; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { rc = BFA_STATUS_ENOMEM; goto ext; } vport->drv_port.bfad = bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, port_cfg, vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) goto ext_free_vport; if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, dev); if (rc != BFA_STATUS_OK) goto ext_free_fcs_vport; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; ext_free_fcs_vport: spin_lock_irqsave(&bfad->bfad_lock, flags); vport->comp_del = &fcomp; init_completion(vport->comp_del); bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(vport->comp_del); ext_free_vport: kfree(vport); ext: return rc; } void bfad_bfa_tmo(unsigned long data) { struct bfad_s *bfad = (struct bfad_s *) data; unsigned long flags; struct list_head doneq; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_timer_beat(&bfad->bfa.timer_mod); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } void bfad_init_timer(struct bfad_s *bfad) { init_timer(&bfad->hal_tmo); bfad->hal_tmo.function = bfad_bfa_tmo; bfad->hal_tmo.data = (unsigned long)bfad; mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); goto out; } if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) goto out_disable_device; pci_set_master(pdev); if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); goto out_release_region; } bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); goto out_release_region; } bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; bfad->hal_pcidev.device_id = pdev->device; bfad->pci_name = pci_name(pdev); bfad->pci_attr.vendor_id = pdev->vendor; bfad->pci_attr.device_id = pdev->device; bfad->pci_attr.ssid = pdev->subsystem_device; bfad->pci_attr.ssvid = pdev->subsystem_vendor; bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ if (pcie_max_read_reqsz > 0) { int pcie_cap_reg; u16 pcie_dev_ctl; u16 mask = 0xffff; switch (pcie_max_read_reqsz) { case 128: mask = 0x0; break; case 256: mask = 0x1000; break; case 512: mask = 0x2000; break; case 1024: mask = 0x3000; break; case 2048: mask = 0x4000; break; case 4096: mask = 0x5000; break; default: break; } pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (mask != 0xffff && pcie_cap_reg) { pcie_cap_reg += 0x08; pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); if ((pcie_dev_ctl & 0x7000) != mask) { printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " "reset to %d\n", bfad->pci_name, (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, pcie_max_read_reqsz); pcie_dev_ctl &= ~0x7000; pci_write_config_word(pdev, pcie_cap_reg, pcie_dev_ctl | mask); } } } return 0; out_release_region: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return rc; } void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) { pci_iounmap(pdev, bfad->pci_bar0_kva); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } bfa_status_t bfad_drv_init(struct bfad_s *bfad) { bfa_status_t rc; unsigned long flags; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; bfad->cfg_data.io_max_sge = bfa_io_max_sge; bfad->cfg_data.binding_method = FCP_PWWN_BINDING; rc = bfad_hal_mem_alloc(bfad); if (rc != BFA_STATUS_OK) { printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING "Not enough memory to attach all Brocade HBA ports, %s", "System may need more memory.\n"); goto out_hal_mem_alloc_failure; } bfad->bfa.trcmod = bfad->trcmod; bfad->bfa.plog = &bfad->plog_buf; bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 0, "Driver Attach"); bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfad->bfa_fcs.fdmi_enabled = fdmi_enable; spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; return BFA_STATUS_OK; out_hal_mem_alloc_failure: return BFA_STATUS_FAILED; } void bfad_drv_uninit(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfad_hal_mem_release(bfad); bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; } void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_start(&bfad->bfa); bfa_fcs_fabric_modstart(&bfad->bfa_fcs); bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (bfad->im) flush_workqueue(bfad->im->drv_workq); } void bfad_fcs_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfad->pport.flags |= BFAD_PORT_DELETE; bfa_fcs_exit(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); } void bfad_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); bfad->bfad_flags &= ~BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); } bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) { int rc = BFA_STATUS_OK; /* Allocate scsi_host for the physical port */ if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (role & BFA_LPORT_ROLE_FCP_IM)) { if (bfad->pport.im_port == NULL) { rc = BFA_STATUS_FAILED; goto out; } rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, &bfad->pcidev->dev); if (rc != BFA_STATUS_OK) goto out; bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; out: return rc; } void bfad_uncfg_pport(struct bfad_s *bfad) { if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); bfad_im_port_clean(bfad->pport.im_port); kfree(bfad->pport.im_port); bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; } bfa_status_t bfad_start_ops(struct bfad_s *bfad) { int retval; unsigned long flags; struct bfad_vport_s *vport, *vport_new; struct bfa_fcs_driver_info_s driver_info; /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); strncpy(driver_info.version, BFAD_DRIVER_VERSION, sizeof(driver_info.version) - 1); if (host_name) strncpy(driver_info.host_machine_name, host_name, sizeof(driver_info.host_machine_name) - 1); if (os_name) strncpy(driver_info.host_os_name, os_name, sizeof(driver_info.host_os_name) - 1); if (os_patch) strncpy(driver_info.host_os_patch, os_patch, sizeof(driver_info.host_os_patch) - 1); strncpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name - 1)); /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (retval != BFA_STATUS_OK) { if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); bfad_stop(bfad); return BFA_STATUS_FAILED; } /* BFAD level FC4 IM specific resource allocation */ retval = bfad_im_probe(bfad); if (retval != BFA_STATUS_OK) { printk(KERN_WARNING "bfad_im_probe failed\n"); if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); bfad_stop(bfad); return BFA_STATUS_FAILED; } else bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; bfad_drv_start(bfad); /* Complete pbc vport create */ list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, list_entry) { struct fc_vport_identifiers vid; struct fc_vport *fc_vport; char pwwn_buf[BFA_STRING_32]; memset(&vid, 0, sizeof(vid)); vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; vid.disable = false; vid.node_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.nwwn))); vid.port_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.pwwn))); fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); if (!fc_vport) { wwn2str(pwwn_buf, vid.port_name); printk(KERN_WARNING "bfad%d: failed to create pbc vport" " %s\n", bfad->inst_no, pwwn_buf); } list_del(&vport->list_entry); kfree(vport); } /* * If bfa_linkup_delay is set to -1 default; try to retrive the * value using the bfad_get_linkup_delay(); else use the * passed in module param value as the bfa_linkup_delay. */ if (bfa_linkup_delay < 0) { bfa_linkup_delay = bfad_get_linkup_delay(bfad); bfad_rport_online_wait(bfad); bfa_linkup_delay = -1; } else bfad_rport_online_wait(bfad); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); return BFA_STATUS_OK; } int bfad_worker(void *ptr) { struct bfad_s *bfad; unsigned long flags; bfad = (struct bfad_s *)ptr; while (!kthread_should_stop()) { /* Send event BFAD_E_INIT_SUCCESS */ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; } return 0; } /* * BFA driver interrupt functions */ irqreturn_t bfad_intx(int irq, void *dev_id) { struct bfad_s *bfad = dev_id; struct list_head doneq; unsigned long flags; bfa_boolean_t rc; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_intx(&bfad->bfa); if (!rc) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return IRQ_NONE; } bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } static irqreturn_t bfad_msix(int irq, void *dev_id) { struct bfad_msix_s *vec = dev_id; struct bfad_s *bfad = vec->bfad; struct list_head doneq; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_msix(&bfad->bfa, vec->msix.entry); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } /* * Initialize the MSIX entry table. */ static void bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, int mask, int max_bit) { int i; int match = 0x00000001; for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { if (mask & match) { bfad->msix_tab[bfad->nvec].msix.entry = i; bfad->msix_tab[bfad->nvec].bfad = bfad; msix_entries[bfad->nvec].entry = i; bfad->nvec++; } match <<= 1; } } int bfad_install_msix_handler(struct bfad_s *bfad) { int i, error = 0; for (i = 0; i < bfad->nvec; i++) { sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", bfad->pci_name, ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? msix_name_ct[i] : msix_name_cb[i])); error = request_irq(bfad->msix_tab[i].msix.vector, (irq_handler_t) bfad_msix, 0, bfad->msix_tab[i].name, &bfad->msix_tab[i]); bfa_trc(bfad, i); bfa_trc(bfad, bfad->msix_tab[i].msix.vector); if (error) { int j; for (j = 0; j < i; j++) free_irq(bfad->msix_tab[j].msix.vector, &bfad->msix_tab[j]); return 1; } } return 0; } /* * Setup MSIX based interrupt. */ int bfad_setup_intr(struct bfad_s *bfad) { int error = 0; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; struct pci_dev *pdev = bfad->pcidev; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { /* * Only error number of vector is available. * We don't have a mechanism to map multiple * interrupts into one vector, so even if we * can try to request less vectors, we don't * know how to associate interrupt events to * vectors. Linux doesn't duplicate vectors * in the MSIX table for this case. */ printk(KERN_WARNING "bfad%d: " "pci_enable_msix failed (%d)," " use line based.\n", bfad->inst_no, error); goto line_based; } /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); bfad->msix_tab[i].msix.vector = msix_entries[i].vector; } bfa_msix_init(&bfad->bfa, bfad->nvec); bfad->bfad_flags |= BFAD_MSIX_ON; return error; } line_based: error = 0; if (request_irq (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad) != 0) { /* Enable interrupt handler failed */ return 1; } return error; } void bfad_remove_intr(struct bfad_s *bfad) { int i; if (bfad->bfad_flags & BFAD_MSIX_ON) { for (i = 0; i < bfad->nvec; i++) free_irq(bfad->msix_tab[i].msix.vector, &bfad->msix_tab[i]); pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; } else { free_irq(bfad->pcidev->irq, bfad); } } /* * PCI probe entry. */ int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; int error = -ENODEV, retval; /* For single port cards - only claim function 0 */ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && (PCI_FUNC(pdev->devfn) != 0)) return -ENODEV; bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); if (!bfad) { error = -ENOMEM; goto out; } bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); if (!bfad->trcmod) { printk(KERN_WARNING "Error alloc trace buffer!\n"); error = -ENOMEM; goto out_alloc_trace_failure; } /* TRACE INIT */ bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); if (!(bfad_load_fwimg(pdev))) { kfree(bfad->trcmod); goto out_alloc_trace_failure; } retval = bfad_pci_init(pdev, bfad); if (retval) { printk(KERN_WARNING "bfad_pci_init failure!\n"); error = retval; goto out_pci_init_failure; } mutex_lock(&bfad_mutex); bfad->inst_no = bfad_inst++; list_add_tail(&bfad->list_entry, &bfad_list); mutex_unlock(&bfad_mutex); /* Initializing the state machine: State set to uninit */ bfa_sm_set_state(bfad, bfad_sm_uninit); spin_lock_init(&bfad->bfad_lock); pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); /* Setup the debugfs node for this bfad */ if (bfa_debugfs_enable) bfad_debugfs_init(&bfad->pport); retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; bfa_sm_send_event(bfad, BFAD_E_CREATE); if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) goto out_bfad_sm_failure; return 0; out_bfad_sm_failure: bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_drv_init_failure: /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); out_pci_init_failure: kfree(bfad->trcmod); out_alloc_trace_failure: kfree(bfad); out: return error; } /* * PCI remove entry. */ void bfad_pci_remove(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); unsigned long flags; bfa_trc(bfad, bfad->inst_no); spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfad->bfad_tsk != NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); kthread_stop(bfad->bfad_tsk); } else { spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* Send Event BFAD_E_STOP */ bfa_sm_send_event(bfad, BFAD_E_STOP); /* Driver detach and dealloc mem */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_detach(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); kfree(bfad->trcmod); kfree(bfad); } struct pci_device_id bfad_id_table[] = { { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G2P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT_FC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, bfad_id_table); static struct pci_driver bfad_pci_driver = { .name = BFAD_DRIVER_NAME, .id_table = bfad_id_table, .probe = bfad_pci_probe, .remove = __devexit_p(bfad_pci_remove), }; /* * Driver module init. */ static int __init bfad_init(void) { int error = 0; printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", BFAD_DRIVER_VERSION); if (num_sgpgs > 0) num_sgpgs_parm = num_sgpgs; error = bfad_im_module_init(); if (error) { error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); goto ext; } if (strcmp(FCPI_NAME, " fcpim") == 0) supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; bfa_auto_recover = ioc_auto_recover; bfa_fcs_rport_set_del_timeout(rport_del_timeout); error = pci_register_driver(&bfad_pci_driver); if (error) { printk(KERN_WARNING "pci_register_driver failure\n"); goto ext; } return 0; ext: bfad_im_module_exit(); return error; } /* * Driver module exit. */ static void __exit bfad_exit(void) { pci_unregister_driver(&bfad_pci_driver); bfad_im_module_exit(); bfad_free_fwimg(); } /* Firmware handling */ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; if (request_firmware(&fw, fw_name, &pdev->dev)) { printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); *bfi_image = NULL; goto out; } *bfi_image = vmalloc(fw->size); if (NULL == *bfi_image) { printk(KERN_ALERT "Fail to allocate buffer for fw image " "size=%x!\n", (u32) fw->size); goto out; } memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); out: release_firmware(fw); } static u32 * bfad_load_fwimg(struct pci_dev *pdev) { if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { if (bfi_image_ct_fc_size == 0) bfad_read_firmware(pdev, &bfi_image_ct_fc, &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); return bfi_image_ct_fc; } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { if (bfi_image_ct_cna_size == 0) bfad_read_firmware(pdev, &bfi_image_ct_cna, &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); return bfi_image_ct_cna; } else { if (bfi_image_cb_fc_size == 0) bfad_read_firmware(pdev, &bfi_image_cb_fc, &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); return bfi_image_cb_fc; } } static void bfad_free_fwimg(void) { if (bfi_image_ct_fc_size && bfi_image_ct_fc) vfree(bfi_image_ct_fc); if (bfi_image_ct_cna_size && bfi_image_ct_cna) vfree(bfi_image_ct_cna); if (bfi_image_cb_fc_size && bfi_image_cb_fc) vfree(bfi_image_cb_fc); } module_init(bfad_init); module_exit(bfad_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); MODULE_AUTHOR("Brocade Communications Systems, Inc."); MODULE_VERSION(BFAD_DRIVER_VERSION);
gpl-2.0
onejay09/OLD----kernel_HTC_msm7x30_KK
net/atm/mpc.c
2379
39161
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/seq_file.h> /* We are an ethernet device */ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include <net/checksum.h> /* for ip_fast_csum() */ #include <net/arp.h> #include <net/dst.h> #include <linux/proc_fs.h> /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> #include <linux/atmmpc.h> /* Modular too */ #include <linux/module.h> #include "lec.h" #include "mpc.h" #include "resources.h" /* * mpc.c: Implementation of MPOA client kernel part */ #if 0 #define dprintk(format, args...) \ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args) #define dprintk_cont(format, args...) printk(KERN_CONT format, ##args) #else #define dprintk(format, args...) \ do { if (0) \ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\ } while (0) #define dprintk_cont(format, args...) \ do { if (0) printk(KERN_CONT format, ##args); } while (0) #endif #if 0 #define ddprintk(format, args...) \ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args) #define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args) #else #define ddprintk(format, args...) \ do { if (0) \ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\ } while (0) #define ddprintk_cont(format, args...) \ do { if (0) printk(KERN_CONT format, ##args); } while (0) #endif /* mpc_daemon -> kernel */ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc); static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc); static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); static void mps_death(struct k_message *msg, struct mpoa_client *mpc); static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action); static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc); static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); static const uint8_t *copy_macs(struct mpoa_client *mpc, const uint8_t *router_mac, const uint8_t *tlvs, uint8_t mps_macs, uint8_t device_type); static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry); static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc); static void mpoad_close(struct atm_vcc *vcc); static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); static netdev_tx_t mpc_send_packet(struct sk_buff *skb, struct net_device *dev); static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); static void mpc_timer_refresh(void); static void mpc_cache_check(unsigned long checking_time); static struct llc_snap_hdr llc_snap_mpoa_ctrl = { 0xaa, 0xaa, 0x03, {0x00, 0x00, 0x5e}, {0x00, 0x03} /* For MPOA control PDUs */ }; static struct llc_snap_hdr llc_snap_mpoa_data = { 0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x08, 0x00} /* This is for IP PDUs only */ }; static struct llc_snap_hdr llc_snap_mpoa_data_tagged = { 0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c} /* This is for tagged data PDUs */ }; static struct notifier_block mpoa_notifier = { mpoa_event_listener, NULL, 0 }; struct mpoa_client *mpcs = NULL; /* FIXME */ static struct atm_mpoa_qos *qos_head = NULL; static DEFINE_TIMER(mpc_timer, NULL, 0, 0); static struct mpoa_client *find_mpc_by_itfnum(int itf) { struct mpoa_client *mpc; mpc = mpcs; /* our global linked list */ while (mpc != NULL) { if (mpc->dev_num == itf) return mpc; mpc = mpc->next; } return NULL; /* not found */ } static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc) { struct mpoa_client *mpc; mpc = mpcs; /* our global linked list */ while (mpc != NULL) { if (mpc->mpoad_vcc == vcc) return mpc; mpc = mpc->next; } return NULL; /* not found */ } static struct mpoa_client *find_mpc_by_lec(struct net_device *dev) { struct mpoa_client *mpc; mpc = mpcs; /* our global linked list */ while (mpc != NULL) { if (mpc->dev == dev) return mpc; mpc = mpc->next; } return NULL; /* not found */ } /* * Functions for managing QoS list */ /* * Overwrites the old entry or makes a new one. */ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos) { struct atm_mpoa_qos *entry; entry = atm_mpoa_search_qos(dst_ip); if (entry != NULL) { entry->qos = *qos; return entry; } entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); if (entry == NULL) { pr_info("mpoa: out of memory\n"); return entry; } entry->ipaddr = dst_ip; entry->qos = *qos; entry->next = qos_head; qos_head = entry; return entry; } struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip) { struct atm_mpoa_qos *qos; qos = qos_head; while (qos) { if (qos->ipaddr == dst_ip) break; qos = qos->next; } return qos; } /* * Returns 0 for failure */ int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) { struct atm_mpoa_qos *curr; if (entry == NULL) return 0; if (entry == qos_head) { qos_head = qos_head->next; kfree(entry); return 1; } curr = qos_head; while (curr != NULL) { if (curr->next == entry) { curr->next = entry->next; kfree(entry); return 1; } curr = curr->next; } return 0; } /* this is buggered - we need locking for qos_head */ void atm_mpoa_disp_qos(struct seq_file *m) { struct atm_mpoa_qos *qos; qos = qos_head; seq_printf(m, "QoS entries for shortcuts:\n"); seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n"); while (qos != NULL) { seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n", &qos->ipaddr, qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu, qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu); qos = qos->next; } } static struct net_device *find_lec_by_itfnum(int itf) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "lec%d", itf); dev = dev_get_by_name(&init_net, name); return dev; } static struct mpoa_client *alloc_mpc(void) { struct mpoa_client *mpc; mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL); if (mpc == NULL) return NULL; rwlock_init(&mpc->ingress_lock); rwlock_init(&mpc->egress_lock); mpc->next = mpcs; atm_mpoa_init_cache(mpc); mpc->parameters.mpc_p1 = MPC_P1; mpc->parameters.mpc_p2 = MPC_P2; memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3)); mpc->parameters.mpc_p4 = MPC_P4; mpc->parameters.mpc_p5 = MPC_P5; mpc->parameters.mpc_p6 = MPC_P6; mpcs = mpc; return mpc; } /* * * start_mpc() puts the MPC on line. All the packets destined * to the lec underneath us are now being monitored and * shortcuts will be established. * */ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) { dprintk("(%s)\n", mpc->dev->name); if (!dev->netdev_ops) pr_info("(%s) not starting\n", dev->name); else { mpc->old_ops = dev->netdev_ops; mpc->new_ops = *mpc->old_ops; mpc->new_ops.ndo_start_xmit = mpc_send_packet; dev->netdev_ops = &mpc->new_ops; } } static void stop_mpc(struct mpoa_client *mpc) { struct net_device *dev = mpc->dev; dprintk("(%s)", mpc->dev->name); /* Lets not nullify lec device's dev->hard_start_xmit */ if (dev->netdev_ops != &mpc->new_ops) { dprintk_cont(" mpc already stopped, not fatal\n"); return; } dprintk_cont("\n"); dev->netdev_ops = mpc->old_ops; mpc->old_ops = NULL; /* close_shortcuts(mpc); ??? FIXME */ } static const char *mpoa_device_type_string(char type) __attribute__ ((unused)); static const char *mpoa_device_type_string(char type) { switch (type) { case NON_MPOA: return "non-MPOA device"; case MPS: return "MPS"; case MPC: return "MPC"; case MPS_AND_MPC: return "both MPS and MPC"; } return "unspecified (non-MPOA) device"; } /* * lec device calls this via its netdev_priv(dev)->lane2_ops * ->associate_indicator() when it sees a TLV in LE_ARP packet. * We fill in the pointer above when we see a LANE2 lec initializing * See LANE2 spec 3.1.5 * * Quite a big and ugly function but when you look at it * all it does is to try to locate and parse MPOA Device * Type TLV. * We give our lec a pointer to this function and when the * lec sees a TLV it uses the pointer to call this function. * */ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, const u8 *tlvs, u32 sizeoftlvs) { uint32_t type; uint8_t length, mpoa_device_type, number_of_mps_macs; const uint8_t *end_of_tlvs; struct mpoa_client *mpc; mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ dprintk("(%s) received TLV(s), ", dev->name); dprintk("total length of all TLVs %d\n", sizeoftlvs); mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */ if (mpc == NULL) { pr_info("(%s) no mpc\n", dev->name); return; } end_of_tlvs = tlvs + sizeoftlvs; while (end_of_tlvs - tlvs >= 5) { type = ((tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3]); length = tlvs[4]; tlvs += 5; dprintk(" type 0x%x length %02x\n", type, length); if (tlvs + length > end_of_tlvs) { pr_info("TLV value extends past its buffer, aborting parse\n"); return; } if (type == 0) { pr_info("mpoa: (%s) TLV type was 0, returning\n", dev->name); return; } if (type != TLV_MPOA_DEVICE_TYPE) { tlvs += length; continue; /* skip other TLVs */ } mpoa_device_type = *tlvs++; number_of_mps_macs = *tlvs++; dprintk("(%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type)); if (mpoa_device_type == MPS_AND_MPC && length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */ pr_info("(%s) short MPOA Device Type TLV\n", dev->name); continue; } if ((mpoa_device_type == MPS || mpoa_device_type == MPC) && length < 22 + number_of_mps_macs*ETH_ALEN) { pr_info("(%s) short MPOA Device Type TLV\n", dev->name); continue; } if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) { dprintk("ignoring non-MPS device "); if (mpoa_device_type == MPC) tlvs += 20; continue; /* we are only interested in MPSs */ } if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) { pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name); continue; /* someone should read the spec */ } dprintk_cont("this MPS has %d MAC addresses\n", number_of_mps_macs); /* * ok, now we can go and tell our daemon * the control address of MPS */ send_set_mps_ctrl_addr(tlvs, mpc); tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); if (tlvs == NULL) return; } if (end_of_tlvs - tlvs != 0) pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", dev->name, end_of_tlvs - tlvs); } /* * Store at least advertizing router's MAC address * plus the possible MAC address(es) to mpc->mps_macs. * For a freshly allocated MPOA client mpc->mps_macs == 0. */ static const uint8_t *copy_macs(struct mpoa_client *mpc, const uint8_t *router_mac, const uint8_t *tlvs, uint8_t mps_macs, uint8_t device_type) { int num_macs; num_macs = (mps_macs > 1) ? mps_macs : 1; if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); mpc->number_of_mps_macs = 0; mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL); if (mpc->mps_macs == NULL) { pr_info("(%s) out of mem\n", mpc->dev->name); return NULL; } } memcpy(mpc->mps_macs, router_mac, ETH_ALEN); tlvs += 20; if (device_type == MPS_AND_MPC) tlvs += 20; if (mps_macs > 0) memcpy(mpc->mps_macs, tlvs, mps_macs*ETH_ALEN); tlvs += mps_macs*ETH_ALEN; mpc->number_of_mps_macs = num_macs; return tlvs; } static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) { in_cache_entry *entry; struct iphdr *iph; char *buff; __be32 ipaddr = 0; static struct { struct llc_snap_hdr hdr; __be32 tag; } tagged_llc_snap_hdr = { {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}}, 0 }; buff = skb->data + mpc->dev->hard_header_len; iph = (struct iphdr *)buff; ipaddr = iph->daddr; ddprintk("(%s) ipaddr 0x%x\n", mpc->dev->name, ipaddr); entry = mpc->in_ops->get(ipaddr, mpc); if (entry == NULL) { entry = mpc->in_ops->add_entry(ipaddr, mpc); if (entry != NULL) mpc->in_ops->put(entry); return 1; } /* threshold not exceeded or VCC not ready */ if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) { ddprintk("(%s) cache_hit: returns != OPEN\n", mpc->dev->name); mpc->in_ops->put(entry); return 1; } ddprintk("(%s) using shortcut\n", mpc->dev->name); /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ if (iph->ttl <= 1) { ddprintk("(%s) IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); mpc->in_ops->put(entry); return 1; } iph->ttl--; iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); if (entry->ctrl_info.tag != 0) { ddprintk("(%s) adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag); tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, sizeof(tagged_llc_snap_hdr)); } else { skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)); } atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; entry->shortcut->send(entry->shortcut, skb); entry->packets_fwded++; mpc->in_ops->put(entry); return 0; } /* * Probably needs some error checks and locking, not sure... */ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, struct net_device *dev) { struct mpoa_client *mpc; struct ethhdr *eth; int i = 0; mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ if (mpc == NULL) { pr_info("(%s) no MPC found\n", dev->name); goto non_ip; } eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) goto non_ip; /* Multi-Protocol Over ATM :-) */ /* Weed out funny packets (e.g., AF_PACKET or raw). */ if (skb->len < ETH_HLEN + sizeof(struct iphdr)) goto non_ip; skb_set_network_header(skb, ETH_HLEN); if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5) goto non_ip; while (i < mpc->number_of_mps_macs) { if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */ return NETDEV_TX_OK; i++; } non_ip: return mpc->old_ops->ndo_start_xmit(skb, dev); } static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) { int bytes_left; struct mpoa_client *mpc; struct atmmpc_ioc ioc_data; in_cache_entry *in_entry; __be32 ipaddr; bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); if (bytes_left != 0) { pr_info("mpoa:Short read (missed %d bytes) from userland\n", bytes_left); return -EFAULT; } ipaddr = ioc_data.ipaddr; if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF) return -EINVAL; mpc = find_mpc_by_itfnum(ioc_data.dev_num); if (mpc == NULL) return -EINVAL; if (ioc_data.type == MPC_SOCKET_INGRESS) { in_entry = mpc->in_ops->get(ipaddr, mpc); if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { pr_info("(%s) did not find RESOLVED entry from ingress cache\n", mpc->dev->name); if (in_entry != NULL) mpc->in_ops->put(in_entry); return -EINVAL; } pr_info("(%s) attaching ingress SVC, entry = %pI4\n", mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); in_entry->shortcut = vcc; mpc->in_ops->put(in_entry); } else { pr_info("(%s) attaching egress SVC\n", mpc->dev->name); } vcc->proto_data = mpc->dev; vcc->push = mpc_push; return 0; } /* * */ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev) { struct mpoa_client *mpc; in_cache_entry *in_entry; eg_cache_entry *eg_entry; mpc = find_mpc_by_lec(dev); if (mpc == NULL) { pr_info("(%s) close for unknown MPC\n", dev->name); return; } dprintk("(%s)\n", dev->name); in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); if (in_entry) { dprintk("(%s) ingress SVC closed ip = %pI4\n", mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); in_entry->shortcut = NULL; mpc->in_ops->put(in_entry); } eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc); if (eg_entry) { dprintk("(%s) egress SVC closed\n", mpc->dev->name); eg_entry->shortcut = NULL; mpc->eg_ops->put(eg_entry); } if (in_entry == NULL && eg_entry == NULL) dprintk("(%s) unused vcc closed\n", dev->name); } static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = (struct net_device *)vcc->proto_data; struct sk_buff *new_skb; eg_cache_entry *eg; struct mpoa_client *mpc; __be32 tag; char *tmp; ddprintk("(%s)\n", dev->name); if (skb == NULL) { dprintk("(%s) null skb, closing VCC\n", dev->name); mpc_vcc_close(vcc, dev); return; } skb->dev = dev; if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { struct sock *sk = sk_atm(vcc); dprintk("(%s) control packet arrived\n", dev->name); /* Pass control packets to daemon */ skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); return; } /* data coming over the shortcut */ atm_return(vcc, skb->truesize); mpc = find_mpc_by_lec(dev); if (mpc == NULL) { pr_info("(%s) unknown MPC\n", dev->name); return; } if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ ddprintk("(%s) tagged data packet arrived\n", dev->name); } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n", dev->name); dev_kfree_skb_any(skb); return; } else { pr_info("(%s) garbage arrived, purging\n", dev->name); dev_kfree_skb_any(skb); return; } tmp = skb->data + sizeof(struct llc_snap_hdr); tag = *(__be32 *)tmp; eg = mpc->eg_ops->get_by_tag(tag, mpc); if (eg == NULL) { pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n", dev->name, tag); purge_egress_shortcut(vcc, NULL); dev_kfree_skb_any(skb); return; } /* * See if ingress MPC is using shortcut we opened as a return channel. * This means we have a bi-directional vcc opened by us. */ if (eg->shortcut == NULL) { eg->shortcut = vcc; pr_info("(%s) egress SVC in use\n", dev->name); } skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ dev_kfree_skb_any(skb); if (new_skb == NULL) { mpc->eg_ops->put(eg); return; } skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */ skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length); new_skb->protocol = eth_type_trans(new_skb, dev); skb_reset_network_header(new_skb); eg->latest_ip_addr = ip_hdr(new_skb)->saddr; eg->packets_rcvd++; mpc->eg_ops->put(eg); memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data)); netif_rx(new_skb); } static struct atmdev_ops mpc_ops = { /* only send is required */ .close = mpoad_close, .send = msg_from_mpoad }; static struct atm_dev mpc_dev = { .ops = &mpc_ops, .type = "mpc", .number = 42, .lock = __SPIN_LOCK_UNLOCKED(mpc_dev.lock) /* members not explicitly initialised will be 0 */ }; static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg) { struct mpoa_client *mpc; struct lec_priv *priv; int err; if (mpcs == NULL) { init_timer(&mpc_timer); mpc_timer_refresh(); /* This lets us now how our LECs are doing */ err = register_netdevice_notifier(&mpoa_notifier); if (err < 0) { del_timer(&mpc_timer); return err; } } mpc = find_mpc_by_itfnum(arg); if (mpc == NULL) { dprintk("allocating new mpc for itf %d\n", arg); mpc = alloc_mpc(); if (mpc == NULL) return -ENOMEM; mpc->dev_num = arg; mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */ } if (mpc->mpoad_vcc) { pr_info("mpoad is already present for itf %d\n", arg); return -EADDRINUSE; } if (mpc->dev) { /* check if the lec is LANE2 capable */ priv = netdev_priv(mpc->dev); if (priv->lane_version < 2) { dev_put(mpc->dev); mpc->dev = NULL; } else priv->lane2_ops->associate_indicator = lane2_assoc_ind; } mpc->mpoad_vcc = vcc; vcc->dev = &mpc_dev; vcc_insert_socket(sk_atm(vcc)); set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); if (mpc->dev) { char empty[ATM_ESA_LEN]; memset(empty, 0, ATM_ESA_LEN); start_mpc(mpc, mpc->dev); /* set address if mpcd e.g. gets killed and restarted. * If we do not do it now we have to wait for the next LE_ARP */ if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0) send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc); } __module_get(THIS_MODULE); return arg; } static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc) { struct k_message mesg; memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); mesg.type = SET_MPS_CTRL_ADDR; memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); msg_to_mpoad(&mesg, mpc); } static void mpoad_close(struct atm_vcc *vcc) { struct mpoa_client *mpc; struct sk_buff *skb; mpc = find_mpc_by_vcc(vcc); if (mpc == NULL) { pr_info("did not find MPC\n"); return; } if (!mpc->mpoad_vcc) { pr_info("close for non-present mpoad\n"); return; } mpc->mpoad_vcc = NULL; if (mpc->dev) { struct lec_priv *priv = netdev_priv(mpc->dev); priv->lane2_ops->associate_indicator = NULL; stop_mpc(mpc); dev_put(mpc->dev); } mpc->in_ops->destroy_cache(mpc); mpc->eg_ops->destroy_cache(mpc); while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { atm_return(vcc, skb->truesize); kfree_skb(skb); } pr_info("(%s) going down\n", (mpc->dev) ? mpc->dev->name : "<unknown>"); module_put(THIS_MODULE); } /* * */ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb) { struct mpoa_client *mpc = find_mpc_by_vcc(vcc); struct k_message *mesg = (struct k_message *)skb->data; atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (mpc == NULL) { pr_info("no mpc found\n"); return 0; } dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>"); switch (mesg->type) { case MPOA_RES_REPLY_RCVD: dprintk_cont("mpoa_res_reply_rcvd\n"); MPOA_res_reply_rcvd(mesg, mpc); break; case MPOA_TRIGGER_RCVD: dprintk_cont("mpoa_trigger_rcvd\n"); MPOA_trigger_rcvd(mesg, mpc); break; case INGRESS_PURGE_RCVD: dprintk_cont("nhrp_purge_rcvd\n"); ingress_purge_rcvd(mesg, mpc); break; case EGRESS_PURGE_RCVD: dprintk_cont("egress_purge_reply_rcvd\n"); egress_purge_rcvd(mesg, mpc); break; case MPS_DEATH: dprintk_cont("mps_death\n"); mps_death(mesg, mpc); break; case CACHE_IMPOS_RCVD: dprintk_cont("cache_impos_rcvd\n"); MPOA_cache_impos_rcvd(mesg, mpc); break; case SET_MPC_CTRL_ADDR: dprintk_cont("set_mpc_ctrl_addr\n"); set_mpc_ctrl_addr_rcvd(mesg, mpc); break; case SET_MPS_MAC_ADDR: dprintk_cont("set_mps_mac_addr\n"); set_mps_mac_addr_rcvd(mesg, mpc); break; case CLEAN_UP_AND_EXIT: dprintk_cont("clean_up_and_exit\n"); clean_up(mesg, mpc, DIE); break; case RELOAD: dprintk_cont("reload\n"); clean_up(mesg, mpc, RELOAD); break; case SET_MPC_PARAMS: dprintk_cont("set_mpc_params\n"); mpc->parameters = mesg->content.params; break; default: dprintk_cont("unknown message %d\n", mesg->type); break; } kfree_skb(skb); return 0; } /* Remember that this function may not do things that sleep */ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc) { struct sk_buff *skb; struct sock *sk; if (mpc == NULL || !mpc->mpoad_vcc) { pr_info("mesg %d to a non-existent mpoad\n", mesg->type); return -ENXIO; } skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; skb_put(skb, sizeof(struct k_message)); skb_copy_to_linear_data(skb, mesg, sizeof(*mesg)); atm_force_charge(mpc->mpoad_vcc, skb->truesize); sk = sk_atm(mpc->mpoad_vcc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); return 0; } static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr) { struct net_device *dev; struct mpoa_client *mpc; struct lec_priv *priv; dev = (struct net_device *)dev_ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->name == NULL || strncmp(dev->name, "lec", 3)) return NOTIFY_DONE; /* we are only interested in lec:s */ switch (event) { case NETDEV_REGISTER: /* a new lec device was allocated */ priv = netdev_priv(dev); if (priv->lane_version < 2) break; priv->lane2_ops->associate_indicator = lane2_assoc_ind; mpc = find_mpc_by_itfnum(priv->itfnum); if (mpc == NULL) { dprintk("allocating new mpc for %s\n", dev->name); mpc = alloc_mpc(); if (mpc == NULL) { pr_info("no new mpc"); break; } } mpc->dev_num = priv->itfnum; mpc->dev = dev; dev_hold(dev); dprintk("(%s) was initialized\n", dev->name); break; case NETDEV_UNREGISTER: /* the lec device was deallocated */ mpc = find_mpc_by_lec(dev); if (mpc == NULL) break; dprintk("device (%s) was deallocated\n", dev->name); stop_mpc(mpc); dev_put(mpc->dev); mpc->dev = NULL; break; case NETDEV_UP: /* the dev was ifconfig'ed up */ mpc = find_mpc_by_lec(dev); if (mpc == NULL) break; if (mpc->mpoad_vcc != NULL) start_mpc(mpc, dev); break; case NETDEV_DOWN: /* the dev was ifconfig'ed down */ /* this means that the flow of packets from the * upper layer stops */ mpc = find_mpc_by_lec(dev); if (mpc == NULL) break; if (mpc->mpoad_vcc != NULL) stop_mpc(mpc); break; case NETDEV_REBOOT: case NETDEV_CHANGE: case NETDEV_CHANGEMTU: case NETDEV_CHANGEADDR: case NETDEV_GOING_DOWN: break; default: break; } return NOTIFY_DONE; } /* * Functions which are called after a message is received from mpcd. * Msg is reused on purpose. */ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) { __be32 dst_ip = msg->content.in_info.in_dst_ip; in_cache_entry *entry; entry = mpc->in_ops->get(dst_ip, mpc); if (entry == NULL) { entry = mpc->in_ops->add_entry(dst_ip, mpc); entry->entry_state = INGRESS_RESOLVING; msg->type = SND_MPOA_RES_RQST; msg->content.in_info = entry->ctrl_info; msg_to_mpoad(msg, mpc); do_gettimeofday(&(entry->reply_wait)); mpc->in_ops->put(entry); return; } if (entry->entry_state == INGRESS_INVALID) { entry->entry_state = INGRESS_RESOLVING; msg->type = SND_MPOA_RES_RQST; msg->content.in_info = entry->ctrl_info; msg_to_mpoad(msg, mpc); do_gettimeofday(&(entry->reply_wait)); mpc->in_ops->put(entry); return; } pr_info("(%s) entry already in resolving state\n", (mpc->dev) ? mpc->dev->name : "<unknown>"); mpc->in_ops->put(entry); } /* * Things get complicated because we have to check if there's an egress * shortcut with suitable traffic parameters we could use. */ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) { __be32 dst_ip = msg->content.in_info.in_dst_ip; struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); if (eg_entry && eg_entry->shortcut) { if (eg_entry->shortcut->qos.txtp.traffic_class & msg->qos.txtp.traffic_class & (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) { if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR) entry->shortcut = eg_entry->shortcut; else if (eg_entry->shortcut->qos.txtp.max_pcr > 0) entry->shortcut = eg_entry->shortcut; } if (entry->shortcut) { dprintk("(%s) using egress SVC to reach %pI4\n", client->dev->name, &dst_ip); client->eg_ops->put(eg_entry); return; } } if (eg_entry != NULL) client->eg_ops->put(eg_entry); /* No luck in the egress cache we must open an ingress SVC */ msg->type = OPEN_INGRESS_SVC; if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) { msg->qos = qos->qos; pr_info("(%s) trying to get a CBR shortcut\n", client->dev->name); } else memset(&msg->qos, 0, sizeof(struct atm_qos)); msg_to_mpoad(msg, client); } static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) { __be32 dst_ip = msg->content.in_info.in_dst_ip; in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); dprintk("(%s) ip %pI4\n", mpc->dev->name, &dst_ip); ddprintk("(%s) entry = %p", mpc->dev->name, entry); if (entry == NULL) { pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); return; } ddprintk_cont(" entry_state = %d ", entry->entry_state); if (entry->entry_state == INGRESS_RESOLVED) { pr_info("(%s) RESOLVED entry!\n", mpc->dev->name); mpc->in_ops->put(entry); return; } entry->ctrl_info = msg->content.in_info; do_gettimeofday(&(entry->tv)); do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ entry->refresh_time = 0; ddprintk_cont("entry->shortcut = %p\n", entry->shortcut); if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) { entry->entry_state = INGRESS_RESOLVED; mpc->in_ops->put(entry); return; /* Shortcut already open... */ } if (entry->shortcut != NULL) { pr_info("(%s) entry->shortcut != NULL, impossible!\n", mpc->dev->name); mpc->in_ops->put(entry); return; } check_qos_and_open_shortcut(msg, mpc, entry); entry->entry_state = INGRESS_RESOLVED; mpc->in_ops->put(entry); return; } static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) { __be32 dst_ip = msg->content.in_info.in_dst_ip; __be32 mask = msg->ip_mask; in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); if (entry == NULL) { pr_info("(%s) purge for a non-existing entry, ip = %pI4\n", mpc->dev->name, &dst_ip); return; } do { dprintk("(%s) removing an ingress entry, ip = %pI4\n", mpc->dev->name, &dst_ip); write_lock_bh(&mpc->ingress_lock); mpc->in_ops->remove_entry(entry, mpc); write_unlock_bh(&mpc->ingress_lock); mpc->in_ops->put(entry); entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); } while (entry != NULL); } static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) { __be32 cache_id = msg->content.eg_info.cache_id; eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); if (entry == NULL) { dprintk("(%s) purge for a non-existing entry\n", mpc->dev->name); return; } write_lock_irq(&mpc->egress_lock); mpc->eg_ops->remove_entry(entry, mpc); write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); } static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) { struct sock *sk; struct k_message *purge_msg; struct sk_buff *skb; dprintk("entering\n"); if (vcc == NULL) { pr_info("vcc == NULL\n"); return; } skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); if (skb == NULL) { pr_info("out of memory\n"); return; } skb_put(skb, sizeof(struct k_message)); memset(skb->data, 0, sizeof(struct k_message)); purge_msg = (struct k_message *)skb->data; purge_msg->type = DATA_PLANE_PURGE; if (entry != NULL) purge_msg->content.eg_info = entry->ctrl_info; atm_force_charge(vcc, skb->truesize); sk = sk_atm(vcc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); dprintk("exiting\n"); } /* * Our MPS died. Tell our daemon to send NHRP data plane purge to each * of the egress shortcuts we have. */ static void mps_death(struct k_message *msg, struct mpoa_client *mpc) { eg_cache_entry *entry; dprintk("(%s)\n", mpc->dev->name); if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) { pr_info("(%s) wrong MPS\n", mpc->dev->name); return; } /* FIXME: This knows too much of the cache structure */ read_lock_irq(&mpc->egress_lock); entry = mpc->eg_cache; while (entry != NULL) { purge_egress_shortcut(entry->shortcut, entry); entry = entry->next; } read_unlock_irq(&mpc->egress_lock); mpc->in_ops->destroy_cache(mpc); mpc->eg_ops->destroy_cache(mpc); } static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc) { uint16_t holding_time; eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); holding_time = msg->content.eg_info.holding_time; dprintk("(%s) entry = %p, holding_time = %u\n", mpc->dev->name, entry, holding_time); if (entry == NULL && holding_time) { entry = mpc->eg_ops->add_entry(msg, mpc); mpc->eg_ops->put(entry); return; } if (holding_time) { mpc->eg_ops->update(entry, holding_time); return; } write_lock_irq(&mpc->egress_lock); mpc->eg_ops->remove_entry(entry, mpc); write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); } static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc) { struct lec_priv *priv; int i, retval ; uint8_t tlv[4 + 1 + 1 + 1 + ATM_ESA_LEN]; tlv[0] = 00; tlv[1] = 0xa0; tlv[2] = 0x3e; tlv[3] = 0x2a; /* type */ tlv[4] = 1 + 1 + ATM_ESA_LEN; /* length */ tlv[5] = 0x02; /* MPOA client */ tlv[6] = 0x00; /* number of MPS MAC addresses */ memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */ memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN); dprintk("(%s) setting MPC ctrl ATM address to", mpc->dev ? mpc->dev->name : "<unknown>"); for (i = 7; i < sizeof(tlv); i++) dprintk_cont(" %02x", tlv[i]); dprintk_cont("\n"); if (mpc->dev) { priv = netdev_priv(mpc->dev); retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv)); if (retval == 0) pr_info("(%s) MPOA device type TLV association failed\n", mpc->dev->name); retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL); if (retval < 0) pr_info("(%s) targetless LE_ARP request failed\n", mpc->dev->name); } } static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client) { if (client->number_of_mps_macs) kfree(client->mps_macs); client->number_of_mps_macs = 0; client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); if (client->mps_macs == NULL) { pr_info("out of memory\n"); return; } client->number_of_mps_macs = 1; } /* * purge egress cache and tell daemon to 'action' (DIE, RELOAD) */ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action) { eg_cache_entry *entry; msg->type = SND_EGRESS_PURGE; /* FIXME: This knows too much of the cache structure */ read_lock_irq(&mpc->egress_lock); entry = mpc->eg_cache; while (entry != NULL) { msg->content.eg_info = entry->ctrl_info; dprintk("cache_id %u\n", entry->ctrl_info.cache_id); msg_to_mpoad(msg, mpc); entry = entry->next; } read_unlock_irq(&mpc->egress_lock); msg->type = action; msg_to_mpoad(msg, mpc); } static void mpc_timer_refresh(void) { mpc_timer.expires = jiffies + (MPC_P2 * HZ); mpc_timer.data = mpc_timer.expires; mpc_timer.function = mpc_cache_check; add_timer(&mpc_timer); } static void mpc_cache_check(unsigned long checking_time) { struct mpoa_client *mpc = mpcs; static unsigned long previous_resolving_check_time; static unsigned long previous_refresh_time; while (mpc != NULL) { mpc->in_ops->clear_count(mpc); mpc->eg_ops->clear_expired(mpc); if (checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ) { mpc->in_ops->check_resolving(mpc); previous_resolving_check_time = checking_time; } if (checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ) { mpc->in_ops->refresh(mpc); previous_refresh_time = checking_time; } mpc = mpc->next; } mpc_timer_refresh(); } static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0; struct atm_vcc *vcc = ATM_SD(sock); if (cmd != ATMMPC_CTRL && cmd != ATMMPC_DATA) return -ENOIOCTLCMD; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ATMMPC_CTRL: err = atm_mpoa_mpoad_attach(vcc, (int)arg); if (err >= 0) sock->state = SS_CONNECTED; break; case ATMMPC_DATA: err = atm_mpoa_vcc_attach(vcc, (void __user *)arg); break; default: break; } return err; } static struct atm_ioctl atm_ioctl_ops = { .owner = THIS_MODULE, .ioctl = atm_mpoa_ioctl, }; static __init int atm_mpoa_init(void) { register_atm_ioctl(&atm_ioctl_ops); if (mpc_proc_init() != 0) pr_info("failed to initialize /proc/mpoa\n"); pr_info("mpc.c: initialized\n"); return 0; } static void __exit atm_mpoa_cleanup(void) { struct mpoa_client *mpc, *tmp; struct atm_mpoa_qos *qos, *nextqos; struct lec_priv *priv; mpc_proc_clean(); del_timer(&mpc_timer); unregister_netdevice_notifier(&mpoa_notifier); deregister_atm_ioctl(&atm_ioctl_ops); mpc = mpcs; mpcs = NULL; while (mpc != NULL) { tmp = mpc->next; if (mpc->dev != NULL) { stop_mpc(mpc); priv = netdev_priv(mpc->dev); if (priv->lane2_ops != NULL) priv->lane2_ops->associate_indicator = NULL; } ddprintk("about to clear caches\n"); mpc->in_ops->destroy_cache(mpc); mpc->eg_ops->destroy_cache(mpc); ddprintk("caches cleared\n"); kfree(mpc->mps_macs); memset(mpc, 0, sizeof(struct mpoa_client)); ddprintk("about to kfree %p\n", mpc); kfree(mpc); ddprintk("next mpc is at %p\n", tmp); mpc = tmp; } qos = qos_head; qos_head = NULL; while (qos != NULL) { nextqos = qos->next; dprintk("freeing qos entry %p\n", qos); kfree(qos); qos = nextqos; } } module_init(atm_mpoa_init); module_exit(atm_mpoa_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
rastomanchik/android_kernel_htc_primou_new
sound/usb/6fire/midi.c
3147
5009
/* * Linux driver for TerraTec DMX 6Fire USB * * Rawmidi driver * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Version: 0.3.0 * Copyright: (C) Torsten Schenk * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <sound/rawmidi.h> #include "midi.h" #include "chip.h" #include "comm.h" static void usb6fire_midi_out_handler(struct urb *urb) { struct midi_runtime *rt = urb->context; int ret; unsigned long flags; spin_lock_irqsave(&rt->out_lock, flags); if (rt->out) { ret = snd_rawmidi_transmit(rt->out, rt->out_buffer + 4, MIDI_BUFSIZE - 4); if (ret > 0) { /* more data available, send next packet */ rt->out_buffer[1] = ret + 2; rt->out_buffer[3] = rt->out_serial++; urb->transfer_buffer_length = ret + 4; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) snd_printk(KERN_ERR PREFIX "midi out urb " "submit failed: %d\n", ret); } else /* no more data to transmit */ rt->out = NULL; } spin_unlock_irqrestore(&rt->out_lock, flags); } static void usb6fire_midi_in_received( struct midi_runtime *rt, u8 *data, int length) { unsigned long flags; spin_lock_irqsave(&rt->in_lock, flags); if (rt->in) snd_rawmidi_receive(rt->in, data, length); spin_unlock_irqrestore(&rt->in_lock, flags); } static int usb6fire_midi_out_open(struct snd_rawmidi_substream *alsa_sub) { return 0; } static int usb6fire_midi_out_close(struct snd_rawmidi_substream *alsa_sub) { return 0; } static void usb6fire_midi_out_trigger( struct snd_rawmidi_substream *alsa_sub, int up) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; struct urb *urb = &rt->out_urb; __s8 ret; unsigned long flags; spin_lock_irqsave(&rt->out_lock, flags); if (up) { /* start transfer */ if (rt->out) { /* we are already transmitting so just return */ spin_unlock_irqrestore(&rt->out_lock, flags); return; } ret = snd_rawmidi_transmit(alsa_sub, rt->out_buffer + 4, MIDI_BUFSIZE - 4); if (ret > 0) { rt->out_buffer[1] = ret + 2; rt->out_buffer[3] = rt->out_serial++; urb->transfer_buffer_length = ret + 4; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) snd_printk(KERN_ERR PREFIX "midi out urb " "submit failed: %d\n", ret); else rt->out = alsa_sub; } } else if (rt->out == alsa_sub) rt->out = NULL; spin_unlock_irqrestore(&rt->out_lock, flags); } static void usb6fire_midi_out_drain(struct snd_rawmidi_substream *alsa_sub) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; int retry = 0; while (rt->out && retry++ < 100) msleep(10); } static int usb6fire_midi_in_open(struct snd_rawmidi_substream *alsa_sub) { return 0; } static int usb6fire_midi_in_close(struct snd_rawmidi_substream *alsa_sub) { return 0; } static void usb6fire_midi_in_trigger( struct snd_rawmidi_substream *alsa_sub, int up) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&rt->in_lock, flags); if (up) rt->in = alsa_sub; else rt->in = NULL; spin_unlock_irqrestore(&rt->in_lock, flags); } static struct snd_rawmidi_ops out_ops = { .open = usb6fire_midi_out_open, .close = usb6fire_midi_out_close, .trigger = usb6fire_midi_out_trigger, .drain = usb6fire_midi_out_drain }; static struct snd_rawmidi_ops in_ops = { .open = usb6fire_midi_in_open, .close = usb6fire_midi_in_close, .trigger = usb6fire_midi_in_trigger }; int __devinit usb6fire_midi_init(struct sfire_chip *chip) { int ret; struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime), GFP_KERNEL); struct comm_runtime *comm_rt = chip->comm; if (!rt) return -ENOMEM; rt->chip = chip; rt->in_received = usb6fire_midi_in_received; rt->out_buffer[0] = 0x80; /* 'send midi' command */ rt->out_buffer[1] = 0x00; /* size of data */ rt->out_buffer[2] = 0x00; /* always 0 */ spin_lock_init(&rt->in_lock); spin_lock_init(&rt->out_lock); comm_rt->init_urb(comm_rt, &rt->out_urb, rt->out_buffer, rt, usb6fire_midi_out_handler); ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); if (ret < 0) { kfree(rt); snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); return ret; } rt->instance->private_data = rt; strcpy(rt->instance->name, "DMX6FireUSB MIDI"); rt->instance->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_OUTPUT, &out_ops); snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_INPUT, &in_ops); chip->midi = rt; return 0; } void usb6fire_midi_abort(struct sfire_chip *chip) { struct midi_runtime *rt = chip->midi; if (rt) usb_poison_urb(&rt->out_urb); } void usb6fire_midi_destroy(struct sfire_chip *chip) { kfree(chip->midi); chip->midi = NULL; }
gpl-2.0
MoKee/android_kernel_motorola_ghost
drivers/media/video/tea6420.c
7243
4447
/* tea6420 - i2c-driver for the tea6420 by SGS Thomson Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> The tea6420 is a bus controlled audio-matrix with 5 stereo inputs, 4 stereo outputs and gain control for each output. It is cascadable, i.e. it can be found at the addresses 0x98 and 0x9a on the i2c-bus. For detailed informations download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include "tea6420.h" MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("tea6420 driver"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* make a connection between the input 'i' and the output 'o' with gain 'g' (note: i = 6 means 'mute') */ static int tea6420_s_routing(struct v4l2_subdev *sd, u32 i, u32 o, u32 config) { struct i2c_client *client = v4l2_get_subdevdata(sd); int g = (o >> 4) & 0xf; u8 byte; int ret; o &= 0xf; v4l2_dbg(1, debug, sd, "i=%d, o=%d, g=%d\n", i, o, g); /* check if the parameters are valid */ if (i < 1 || i > 6 || o < 1 || o > 4 || g < 0 || g > 6 || g % 2 != 0) return -EINVAL; byte = ((o - 1) << 5); byte |= (i - 1); /* to understand this, have a look at the tea6420-specs (p.5) */ switch (g) { case 0: byte |= (3 << 3); break; case 2: byte |= (2 << 3); break; case 4: byte |= (1 << 3); break; case 6: break; } ret = i2c_smbus_write_byte(client, byte); if (ret) { v4l2_dbg(1, debug, sd, "i2c_smbus_write_byte() failed, ret:%d\n", ret); return -EIO; } return 0; } static int tea6420_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TEA6420, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tea6420_core_ops = { .g_chip_ident = tea6420_g_chip_ident, }; static const struct v4l2_subdev_audio_ops tea6420_audio_ops = { .s_routing = tea6420_s_routing, }; static const struct v4l2_subdev_ops tea6420_ops = { .core = &tea6420_core_ops, .audio = &tea6420_audio_ops, }; static int tea6420_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct v4l2_subdev *sd; int err, i; /* let's see whether this adapter can support what we need */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6420_ops); /* set initial values: set "mute"-input to all outputs at gain 0 */ err = 0; for (i = 1; i < 5; i++) err += tea6420_s_routing(sd, 6, i, 0); if (err) { v4l_dbg(1, debug, client, "could not initialize tea6420\n"); return -ENODEV; } return 0; } static int tea6420_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(sd); return 0; } static const struct i2c_device_id tea6420_id[] = { { "tea6420", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tea6420_id); static struct i2c_driver tea6420_driver = { .driver = { .owner = THIS_MODULE, .name = "tea6420", }, .probe = tea6420_probe, .remove = tea6420_remove, .id_table = tea6420_id, }; module_i2c_driver(tea6420_driver);
gpl-2.0
Bdaman80/BDA-Iconia
fs/ext4/mballoc.c
76
129748
/* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Licens * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ /* * mballoc.c contains the multiblocks allocation routines */ #include "mballoc.h" #include <linux/debugfs.h> #include <linux/slab.h> #include <trace/events/ext4.h> /* * MUSTDO: * - test ext4_ext_search_left() and ext4_ext_search_right() * - search for metadata in few groups * * TODO v4: * - normalization should take into account whether file is still open * - discard preallocations if no free space left (policy?) * - don't normalize tails * - quota * - reservation for superuser * * TODO v3: * - bitmap read-ahead (proposed by Oleg Drokin aka green) * - track min/max extents in each group for better group selection * - mb_mark_used() may allocate chunk right after splitting buddy * - tree of groups sorted by number of free blocks * - error handling */ /* * The allocation request involve request for multiple number of blocks * near to the goal(block) value specified. * * During initialization phase of the allocator we decide to use the * group preallocation or inode preallocation depending on the size of * the file. The size of the file could be the resulting file size we * would have after allocation, or the current file size, which ever * is larger. If the size is less than sbi->s_mb_stream_request we * select to use the group preallocation. The default value of * s_mb_stream_request is 16 blocks. This can also be tuned via * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in * terms of number of blocks. * * The main motivation for having small file use group preallocation is to * ensure that we have small files closer together on the disk. * * First stage the allocator looks at the inode prealloc list, * ext4_inode_info->i_prealloc_list, which contains list of prealloc * spaces for this particular inode. The inode prealloc space is * represented as: * * pa_lstart -> the logical start block for this prealloc space * pa_pstart -> the physical start block for this prealloc space * pa_len -> length for this prealloc space * pa_free -> free space available in this prealloc space * * The inode preallocation space is used looking at the _logical_ start * block. If only the logical file block falls within the range of prealloc * space we will consume the particular prealloc space. This make sure that * that the we have contiguous physical blocks representing the file blocks * * The important thing to be noted in case of inode prealloc space is that * we don't modify the values associated to inode prealloc space except * pa_free. * * If we are not able to find blocks in the inode prealloc space and if we * have the group allocation flag set then we look at the locality group * prealloc space. These are per CPU prealloc list repreasented as * * ext4_sb_info.s_locality_groups[smp_processor_id()] * * The reason for having a per cpu locality group is to reduce the contention * between CPUs. It is possible to get scheduled at this point. * * The locality group prealloc space is used looking at whether we have * enough free space (pa_free) withing the prealloc space. * * If we can't allocate blocks via inode prealloc or/and locality group * prealloc then we look at the buddy cache. The buddy cache is represented * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets * mapped to the buddy and bitmap information regarding different * groups. The buddy information is attached to buddy cache inode so that * we can access them through the page cache. The information regarding * each group is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are stored in the * inode as: * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. So for each group we * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * * The buddy cache inode is not stored on disk. The inode is thrown * away when the filesystem is unmounted. * * We look for count number of blocks in the buddy cache. If we were able * to locate that many free blocks we return with additional information * regarding rest of the contiguous physical block available * * Before allocating blocks via buddy cache we normalize the request * blocks. This ensure we ask for more blocks that we needed. The extra * blocks that we get after allocation is added to the respective prealloc * list. In case of inode preallocation we follow a list of heuristics * based on file size. This can be found in ext4_mb_normalize_request. If * we are doing a group prealloc we try to normalize the request to * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is * 512 blocks. This can be tuned via * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in * terms of number of blocks. If we have mounted the file system with -O * stripe=<value> option the group prealloc request is normalized to the * stripe value (sbi->s_stripe) * * The regular allocator(using the buddy cache) supports few tunables. * * /sys/fs/ext4/<partition>/mb_min_to_scan * /sys/fs/ext4/<partition>/mb_max_to_scan * /sys/fs/ext4/<partition>/mb_order2_req * * The regular allocator uses buddy scan only if the request len is power of * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The * value of s_mb_order2_reqs can be tuned via * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to * stripe size (sbi->s_stripe), we try to search for contiguous block in * stripe size. This should result in better allocation on RAID setups. If * not, we search in the specific group using bitmap for best extents. The * tunable min_to_scan and max_to_scan control the behaviour here. * min_to_scan indicate how long the mballoc __must__ look for a best * extent and max_to_scan indicates how long the mballoc __can__ look for a * best extent in the found extents. Searching for the blocks starts with * the group specified as the goal value in allocation context via * ac_g_ex. Each group is first checked based on the criteria whether it * can used for allocation. ext4_mb_good_group explains how the groups are * checked. * * Both the prealloc space are getting populated as above. So for the first * request we will hit the buddy cache which will result in this prealloc * space getting filled. The prealloc space is then later used for the * subsequent request. */ /* * mballoc operates on the following data: * - on-disk bitmap * - in-core buddy (actually includes buddy and bitmap) * - preallocation descriptors (PAs) * * there are two types of preallocations: * - inode * assiged to specific inode and can be used for this inode only. * it describes part of inode's space preallocated to specific * physical blocks. any block from that preallocated can be used * independent. the descriptor just tracks number of blocks left * unused. so, before taking some block from descriptor, one must * make sure corresponded logical block isn't allocated yet. this * also means that freeing any block within descriptor's range * must discard all preallocated blocks. * - locality group * assigned to specific locality group which does not translate to * permanent set of inodes: inode can join and leave group. space * from this type of preallocation can be used for any inode. thus * it's consumed from the beginning to the end. * * relation between them can be expressed as: * in-core buddy = on-disk bitmap + preallocation descriptors * * this mean blocks mballoc considers used are: * - allocated blocks (persistent) * - preallocated blocks (non-persistent) * * consistency in mballoc world means that at any time a block is either * free or used in ALL structures. notice: "any time" should not be read * literally -- time is discrete and delimited by locks. * * to keep it simple, we don't use block numbers, instead we count number of * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. * * all operations can be expressed as: * - init buddy: buddy = on-disk + PAs * - new PA: buddy += N; PA = N * - use inode PA: on-disk += N; PA -= N * - discard inode PA buddy -= on-disk - PA; PA = 0 * - use locality group PA on-disk += N; PA -= N * - discard locality group PA buddy -= PA; PA = 0 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap * is used in real operation because we can't know actual used * bits from PA, only from on-disk bitmap * * if we follow this strict logic, then all operations above should be atomic. * given some of them can block, we'd have to use something like semaphores * killing performance on high-end SMP hardware. let's try to relax it using * the following knowledge: * 1) if buddy is referenced, it's already initialized * 2) while block is used in buddy and the buddy is referenced, * nobody can re-allocate that block * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has * bit set and PA claims same block, it's OK. IOW, one can set bit in * on-disk bitmap if buddy has same bit set or/and PA covers corresponded * block * * so, now we're building a concurrency table: * - init buddy vs. * - new PA * blocks for PA are allocated in the buddy, buddy must be referenced * until PA is linked to allocation group to avoid concurrent buddy init * - use inode PA * we need to make sure that either on-disk bitmap or PA has uptodate data * given (3) we care that PA-=N operation doesn't interfere with init * - discard inode PA * the simplest way would be to have buddy initialized by the discard * - use locality group PA * again PA-=N must be serialized with init * - discard locality group PA * the simplest way would be to have buddy initialized by the discard * - new PA vs. * - use inode PA * i_data_sem serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * some mutex should serialize them * - discard locality group PA * discard process must wait until PA isn't used by another process * - use inode PA * - use inode PA * i_data_sem or another mutex should serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * nothing wrong here -- they're different PAs covering different blocks * - discard locality group PA * discard process must wait until PA isn't used by another process * * now we're ready to make few consequences: * - PA is referenced and while it is no discard is possible * - PA is referenced until block isn't marked in on-disk bitmap * - PA changes only after on-disk bitmap * - discard must not compete with init. either init is done before * any discard or they're serialized somehow * - buddy init as sum of on-disk bitmap and PAs is done atomically * * a special case when we've used PA to emptiness. no need to modify buddy * in this case, but we should care about concurrent init * */ /* * Logic in few words: * * - allocation: * load group * find blocks * mark bits in on-disk bitmap * release group * * - use preallocation: * find proper PA (per-inode or group) * load group * mark bits in on-disk bitmap * release group * release PA * * - free: * load group * mark bits in on-disk bitmap * release group * * - discard preallocations in group: * mark PAs deleted * move them onto local list * load on-disk bitmap * load group * remove PA from object (inode or locality group) * mark free blocks in-core * * - discard inode's preallocations: */ /* * Locking rules * * Locks: * - bitlock on a group (group) * - object (inode/locality) (object) * - per-pa lock (pa) * * Paths: * - new pa * object * group * * - find and use pa: * pa * * - release consumed pa: * pa * group * object * * - generate in-core bitmap: * group * pa * * - discard all for given object (inode, locality group): * object * pa * group * * - discard all for given group: * group * pa * group * object * */ static struct kmem_cache *ext4_pspace_cachep; static struct kmem_cache *ext4_ac_cachep; static struct kmem_cache *ext4_free_ext_cachep; static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group); static void release_blocks_on_commit(journal_t *journal, transaction_t *txn); static inline void *mb_correct_addr_and_bit(int *bit, void *addr) { #if BITS_PER_LONG == 64 *bit += ((unsigned long) addr & 7UL) << 3; addr = (void *) ((unsigned long) addr & ~7UL); #elif BITS_PER_LONG == 32 *bit += ((unsigned long) addr & 3UL) << 3; addr = (void *) ((unsigned long) addr & ~3UL); #else #error "how many bits you are?!" #endif return addr; } static inline int mb_test_bit(int bit, void *addr) { /* * ext4_test_bit on architecture like powerpc * needs unsigned long aligned address */ addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_bit(bit, addr); } static inline void mb_set_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit(bit, addr); } static inline void mb_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit(bit, addr); } static inline int mb_find_next_zero_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static inline int mb_find_next_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) { char *bb; BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); BUG_ON(max == NULL); if (order > e4b->bd_blkbits + 1) { *max = 0; return NULL; } /* at order 0 we see each particular block */ *max = 1 << (e4b->bd_blkbits + 3); if (order == 0) return EXT4_MB_BITMAP(e4b); bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; return bb; } #ifdef DOUBLE_CHECK static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int i; struct super_block *sb = e4b->bd_sb; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); for (i = 0; i < count; i++) { if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += first + i; ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing block already freed " "(bit %u)", first + i); } mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { int i; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); for (i = 0; i < count; i++) { BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); mb_set_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { unsigned char *b1, *b2; int i; b1 = (unsigned char *) e4b->bd_info->bb_bitmap; b2 = (unsigned char *) bitmap; for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { if (b1[i] != b2[i]) { printk(KERN_ERR "corruption in group %u " "at byte %u(%u): %x in copy != %x " "on disk/prealloc\n", e4b->bd_group, i, i * 8, b1[i], b2[i]); BUG(); } } } } #else static inline void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { return; } #endif #ifdef AGGRESSIVE_CHECK #define MB_CHECK_ASSERT(assert) \ do { \ if (!(assert)) { \ printk(KERN_EMERG \ "Assertion failure in %s() at %s:%d: \"%s\"\n", \ function, file, line, # assert); \ BUG(); \ } \ } while (0) static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line) { struct super_block *sb = e4b->bd_sb; int order = e4b->bd_blkbits + 1; int max; int max2; int i; int j; int k; int count; struct ext4_group_info *grp; int fragments = 0; int fstart; struct list_head *cur; void *buddy; void *buddy2; { static int mb_check_counter; if (mb_check_counter++ % 100 != 0) return 0; } while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); MB_CHECK_ASSERT(buddy); buddy2 = mb_find_buddy(e4b, order - 1, &max2); MB_CHECK_ASSERT(buddy2); MB_CHECK_ASSERT(buddy != buddy2); MB_CHECK_ASSERT(max * 2 == max2); count = 0; for (i = 0; i < max; i++) { if (mb_test_bit(i, buddy)) { /* only single bit in buddy2 may be 1 */ if (!mb_test_bit(i << 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit((i<<1)+1, buddy2)); } else if (!mb_test_bit((i << 1) + 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit(i << 1, buddy2)); } continue; } /* both bits in buddy2 must be 0 */ MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); for (j = 0; j < (1 << order); j++) { k = (i * (1 << order)) + j; MB_CHECK_ASSERT( !mb_test_bit(k, EXT4_MB_BITMAP(e4b))); } count++; } MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); order--; } fstart = -1; buddy = mb_find_buddy(e4b, 0, &max); for (i = 0; i < max; i++) { if (!mb_test_bit(i, buddy)) { MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); if (fstart == -1) { fragments++; fstart = i; } continue; } fstart = -1; /* check used bits only */ for (j = 0; j < e4b->bd_blkbits + 1; j++) { buddy2 = mb_find_buddy(e4b, j, &max2); k = i >> j; MB_CHECK_ASSERT(k < max2); MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); } } MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); buddy = mb_find_buddy(e4b, 0, &max); list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); MB_CHECK_ASSERT(groupnr == e4b->bd_group); for (i = 0; i < pa->pa_len; i++) MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); } return 0; } #undef MB_CHECK_ASSERT #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ __FILE__, __func__, __LINE__) #else #define mb_check_buddy(e4b) #endif /* FIXME!! need more doc */ static void ext4_mb_mark_free_simple(struct super_block *sb, void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; unsigned short border; BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); border = 2 << sb->s_blocksize_bits; while (len > 0) { /* find how many blocks can be covered since this position */ max = ffs(first | border) - 1; /* find how many blocks of power 2 we need to mark */ min = fls(len) - 1; if (max < min) min = max; chunk = 1 << min; /* mark multiblock chunks only */ grp->bb_counters[min]++; if (min > 0) mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]); len -= chunk; first += chunk; } } /* * Cache the order of the largest free extent we have available in this block * group. */ static void mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) { int i; int bits; grp->bb_largest_free_order = -1; /* uninit */ bits = sb->s_blocksize_bits + 1; for (i = bits; i >= 0; i--) { if (grp->bb_counters[i] > 0) { grp->bb_largest_free_order = i; break; } } } static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_grpblk_t first; ext4_grpblk_t len; unsigned free = 0; unsigned fragments = 0; unsigned long long period = get_cycles(); /* initialize buddy from bitmap which is aggregation * of on-disk bitmap and preallocations */ i = mb_find_next_zero_bit(bitmap, max, 0); grp->bb_first_free = i; while (i < max) { fragments++; first = i; i = mb_find_next_bit(bitmap, max, i); len = i - first; free += len; if (len > 1) ext4_mb_mark_free_simple(sb, buddy, first, len, grp); else grp->bb_counters[0]++; if (i < max) i = mb_find_next_zero_bit(bitmap, max, i); } grp->bb_fragments = fragments; if (free != grp->bb_free) { ext4_grp_locked_error(sb, group, 0, 0, "%u blocks in bitmap, %u in gd", free, grp->bb_free); /* * If we intent to continue, we consider group descritor * corrupt and update bb_free using bitmap value */ grp->bb_free = free; } mb_set_largest_free_order(sb, grp); clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); period = get_cycles() - period; spin_lock(&EXT4_SB(sb)->s_bal_lock); EXT4_SB(sb)->s_mb_buddies_generated++; EXT4_SB(sb)->s_mb_generation_time += period; spin_unlock(&EXT4_SB(sb)->s_bal_lock); } /* The buddy information is attached the buddy cache inode * for convenience. The information regarding each group * is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are * stored in the inode as * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * * Locking note: This routine takes the block group lock of all groups * for this page; do not hold this lock when calling this routine! */ static int ext4_mb_init_cache(struct page *page, char *incore) { ext4_group_t ngroups; int blocksize; int blocks_per_page; int groups_per_page; int err = 0; int i; ext4_group_t first_group; int first_block; struct super_block *sb; struct buffer_head *bhs; struct buffer_head **bh; struct inode *inode; char *data; char *bitmap; mb_debug(1, "init page %lu\n", page->index); inode = page->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = 1 << inode->i_blkbits; blocks_per_page = PAGE_CACHE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; /* allocate buffer_heads to read bitmaps */ if (groups_per_page > 1) { err = -ENOMEM; i = sizeof(struct buffer_head *) * groups_per_page; bh = kzalloc(i, GFP_NOFS); if (bh == NULL) goto out; } else bh = &bhs; first_group = page->index * blocks_per_page / 2; /* read all groups the page covers into the cache */ for (i = 0; i < groups_per_page; i++) { struct ext4_group_desc *desc; if (first_group + i >= ngroups) break; err = -EIO; desc = ext4_get_group_desc(sb, first_group + i, NULL); if (desc == NULL) goto out; err = -ENOMEM; bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc)); if (bh[i] == NULL) goto out; if (bitmap_uptodate(bh[i])) continue; lock_buffer(bh[i]); if (bitmap_uptodate(bh[i])) { unlock_buffer(bh[i]); continue; } ext4_lock_group(sb, first_group + i); if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ext4_init_block_bitmap(sb, bh[i], first_group + i, desc); set_bitmap_uptodate(bh[i]); set_buffer_uptodate(bh[i]); ext4_unlock_group(sb, first_group + i); unlock_buffer(bh[i]); continue; } ext4_unlock_group(sb, first_group + i); if (buffer_uptodate(bh[i])) { /* * if not uninit if bh is uptodate, * bitmap is also uptodate */ set_bitmap_uptodate(bh[i]); unlock_buffer(bh[i]); continue; } get_bh(bh[i]); /* * submit the buffer_head for read. We can * safely mark the bitmap as uptodate now. * We do it here so the bitmap uptodate bit * get set with buffer lock held. */ set_bitmap_uptodate(bh[i]); bh[i]->b_end_io = end_buffer_read_sync; submit_bh(READ, bh[i]); mb_debug(1, "read bitmap for group %u\n", first_group + i); } /* wait for I/O completion */ for (i = 0; i < groups_per_page && bh[i]; i++) wait_on_buffer(bh[i]); err = -EIO; for (i = 0; i < groups_per_page && bh[i]; i++) if (!buffer_uptodate(bh[i])) goto out; err = 0; first_block = page->index * blocks_per_page; /* init the page */ memset(page_address(page), 0xff, PAGE_CACHE_SIZE); for (i = 0; i < blocks_per_page; i++) { int group; struct ext4_group_info *grinfo; group = (first_block + i) >> 1; if (group >= ngroups) break; /* * data carry information regarding this * particular group in the format specified * above * */ data = page_address(page) + (i * blocksize); bitmap = bh[group - first_group]->b_data; /* * We place the buddy block and bitmap block * close together */ if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); mb_debug(1, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * (sb->s_blocksize_bits+2)); /* * incore got set to the group block bitmap below */ ext4_lock_group(sb, group); ext4_mb_generate_buddy(sb, data, incore, group); ext4_unlock_group(sb, group); incore = NULL; } else { /* this is block of bitmap */ BUG_ON(incore != NULL); mb_debug(1, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_bitmap_load(sb, group); /* see comments in ext4_mb_put_pa() */ ext4_lock_group(sb, group); memcpy(data, bitmap, blocksize); /* mark all preallocated blks used in in-core bitmap */ ext4_mb_generate_from_pa(sb, data, group); ext4_mb_generate_from_freelist(sb, data, group); ext4_unlock_group(sb, group); /* set incore so that the buddy information can be * generated using this */ incore = data; } } SetPageUptodate(page); out: if (bh) { for (i = 0; i < groups_per_page && bh[i]; i++) brelse(bh[i]); if (bh != &bhs) kfree(bh); } return err; } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) { int ret = 0; void *bitmap; int blocks_per_page; int block, pnum, poff; int num_grp_locked = 0; struct ext4_group_info *this_grp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; struct page *page = NULL, *bitmap_page = NULL; mb_debug(1, "init group %u\n", group); blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; this_grp = ext4_get_group_info(sb, group); /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already * allocating. If we are looking at the buddy cache we would * have taken a reference using ext4_mb_load_buddy and that * would have taken the alloc_sem lock. */ num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { /* * somebody initialized the group * return without doing anything */ ret = 0; goto err; } /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); ret = ext4_mb_init_cache(page, NULL); if (ret) { unlock_page(page); goto err; } unlock_page(page); } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); bitmap_page = page; bitmap = page_address(page) + (poff * sb->s_blocksize); /* init buddy cache */ block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page == bitmap_page) { /* * If both the bitmap and buddy are in * the same page we don't need to force * init the buddy */ unlock_page(page); } else if (page) { BUG_ON(page->mapping != inode->i_mapping); ret = ext4_mb_init_cache(page, bitmap); if (ret) { unlock_page(page); goto err; } unlock_page(page); } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); err: ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); if (bitmap_page) page_cache_release(bitmap_page); if (page) page_cache_release(page); return ret; } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { int blocks_per_page; int block; int pnum; int poff; struct page *page; int ret; struct ext4_group_info *grp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; mb_debug(1, "load group %u\n", group); blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = ext4_get_group_info(sb, group); e4b->bd_sb = sb; e4b->bd_group = group; e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; e4b->alloc_semp = &grp->alloc_sem; /* Take the read lock on the group alloc * sem. This would make sure a parallel * ext4_mb_init_group happening on other * groups mapped by the page is blocked * till we are done with allocation */ repeat_load_buddy: down_read(e4b->alloc_semp); if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { /* we need to check for group need init flag * with alloc_semp held so that we can be sure * that new blocks didn't get added to the group * when we are loading the buddy cache */ up_read(e4b->alloc_semp); /* * we need full data about the group * to make a good selection */ ret = ext4_mb_init_group(sb, group); if (ret) return ret; goto repeat_load_buddy; } /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; /* we could use find_or_create_page(), but it locks page * what we'd like to avoid in fast path ... */ page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) /* * drop the page reference and try * to get the page with lock. If we * are not uptodate that implies * somebody just created the page but * is yet to initialize the same. So * wait for it to initialize. */ page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, NULL); if (ret) { unlock_page(page); goto err; } mb_cmp_bitmaps(e4b, page_address(page) + (poff * sb->s_blocksize)); } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, e4b->bd_bitmap); if (ret) { unlock_page(page); goto err; } } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); return 0; err: if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; /* Done with the buddy cache */ up_read(e4b->alloc_semp); return ret; } static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); /* Done with the buddy cache */ if (e4b->alloc_semp) up_read(e4b->alloc_semp); } static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) { int order = 1; void *bb; BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); bb = EXT4_MB_BUDDY(e4b); while (order <= e4b->bd_blkbits + 1) { block = block >> 1; if (!mb_test_bit(block, bb)) { /* this block is part of buddy of order 'order' */ return order; } bb += 1 << (e4b->bd_blkbits - order); order++; } return 0; } static void mb_clear_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); *addr = 0; cur += 32; continue; } mb_clear_bit(cur, bm); cur++; } } static void mb_set_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: set whole word at once */ addr = bm + (cur >> 3); *addr = 0xffffffff; cur += 32; continue; } mb_set_bit(cur, bm); cur++; } } static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int block = 0; int max = 0; int order; void *buddy; void *buddy2; struct super_block *sb = e4b->bd_sb; BUG_ON(first + count > (sb->s_blocksize << 3)); assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); e4b->bd_info->bb_free += count; if (first < e4b->bd_info->bb_first_free) e4b->bd_info->bb_first_free = first; /* let's maintain fragments counter */ if (first != 0) block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b)); if (first + count < EXT4_SB(sb)->s_mb_maxs[0]) max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b)); if (block && max) e4b->bd_info->bb_fragments--; else if (!block && !max) e4b->bd_info->bb_fragments++; /* let's maintain buddy itself */ while (count-- > 0) { block = first++; order = 0; if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += block; ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing already freed block " "(bit %u)", block); } mb_clear_bit(block, EXT4_MB_BITMAP(e4b)); e4b->bd_info->bb_counters[order]++; /* start of the buddy */ buddy = mb_find_buddy(e4b, order, &max); do { block &= ~1UL; if (mb_test_bit(block, buddy) || mb_test_bit(block + 1, buddy)) break; /* both the buddies are free, try to coalesce them */ buddy2 = mb_find_buddy(e4b, order + 1, &max); if (!buddy2) break; if (order > 0) { /* for special purposes, we don't set * free bits in bitmap */ mb_set_bit(block, buddy); mb_set_bit(block + 1, buddy); } e4b->bd_info->bb_counters[order]--; e4b->bd_info->bb_counters[order]--; block = block >> 1; order++; e4b->bd_info->bb_counters[order]++; mb_clear_bit(block, buddy2); buddy = buddy2; } while (1); } mb_set_largest_free_order(sb, e4b->bd_info); mb_check_buddy(e4b); } static int mb_find_extent(struct ext4_buddy *e4b, int order, int block, int needed, struct ext4_free_extent *ex) { int next = block; int max; int ord; void *buddy; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); BUG_ON(ex == NULL); buddy = mb_find_buddy(e4b, order, &max); BUG_ON(buddy == NULL); BUG_ON(block >= max); if (mb_test_bit(block, buddy)) { ex->fe_len = 0; ex->fe_start = 0; ex->fe_group = 0; return 0; } /* FIXME dorp order completely ? */ if (likely(order == 0)) { /* find actual order */ order = mb_find_order_for_block(e4b, block); block = block >> order; } ex->fe_len = 1 << order; ex->fe_start = block << order; ex->fe_group = e4b->bd_group; /* calc difference from given start */ next = next - ex->fe_start; ex->fe_len -= next; ex->fe_start += next; while (needed > ex->fe_len && (buddy = mb_find_buddy(e4b, order, &max))) { if (block + 1 >= max) break; next = (block + 1) * (1 << order); if (mb_test_bit(next, EXT4_MB_BITMAP(e4b))) break; ord = mb_find_order_for_block(e4b, next); order = ord; block = next >> order; ex->fe_len += 1 << order; } BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); return ex->fe_len; } static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) { int ord; int mlen = 0; int max = 0; int cur; int start = ex->fe_start; int len = ex->fe_len; unsigned ret = 0; int len0 = len; void *buddy; BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); BUG_ON(e4b->bd_group != ex->fe_group); assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); mb_check_buddy(e4b); mb_mark_used_double(e4b, start, len); e4b->bd_info->bb_free -= len; if (e4b->bd_info->bb_first_free == start) e4b->bd_info->bb_first_free += len; /* let's maintain fragments counter */ if (start != 0) mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b)); if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b)); if (mlen && max) e4b->bd_info->bb_fragments++; else if (!mlen && !max) e4b->bd_info->bb_fragments--; /* let's maintain buddy itself */ while (len) { ord = mb_find_order_for_block(e4b, start); if (((start >> ord) << ord) == start && len >= (1 << ord)) { /* the whole chunk may be allocated at once! */ mlen = 1 << ord; buddy = mb_find_buddy(e4b, ord, &max); BUG_ON((start >> ord) >= max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; start += mlen; len -= mlen; BUG_ON(len < 0); continue; } /* store for history */ if (ret == 0) ret = len | (ord << 16); /* we have to split large buddy */ BUG_ON(ord <= 0); buddy = mb_find_buddy(e4b, ord, &max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; ord--; cur = (start >> ord) & ~1U; buddy = mb_find_buddy(e4b, ord, &max); mb_clear_bit(cur, buddy); mb_clear_bit(cur + 1, buddy); e4b->bd_info->bb_counters[ord]++; e4b->bd_info->bb_counters[ord]++; } mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); mb_check_buddy(e4b); return ret; } /* * Must be called under group lock! */ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int ret; BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); BUG_ON(ac->ac_status == AC_STATUS_FOUND); ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; ret = mb_mark_used(e4b, &ac->ac_b_ex); /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; ac->ac_status = AC_STATUS_FOUND; ac->ac_tail = ret & 0xffff; ac->ac_buddy = ret >> 16; /* * take the page reference. We want the page to be pinned * so that we don't get a ext4_mb_init_cache_call for this * group until we update the bitmap. That would mean we * double allocate blocks. The reference is dropped * in ext4_mb_release_context */ ac->ac_bitmap_page = e4b->bd_bitmap_page; get_page(ac->ac_bitmap_page); ac->ac_buddy_page = e4b->bd_buddy_page; get_page(ac->ac_buddy_page); /* on allocation we use ac to track the held semaphore */ ac->alloc_semp = e4b->alloc_semp; e4b->alloc_semp = NULL; /* store last allocated for subsequent stream allocation */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { spin_lock(&sbi->s_md_lock); sbi->s_mb_last_group = ac->ac_f_ex.fe_group; sbi->s_mb_last_start = ac->ac_f_ex.fe_start; spin_unlock(&sbi->s_md_lock); } } /* * regular allocator, for general purposes allocation */ static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; struct ext4_free_extent ex; int max; if (ac->ac_status == AC_STATUS_FOUND) return; /* * We don't want to scan for a whole year */ if (ac->ac_found > sbi->s_mb_max_to_scan && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { ac->ac_status = AC_STATUS_BREAK; return; } /* * Haven't found good chunk so far, let's continue */ if (bex->fe_len < gex->fe_len) return; if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) && bex->fe_group == e4b->bd_group) { /* recheck chunk's availability - we don't know * when it was found (within this lock-unlock * period or not) */ max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex); if (max >= gex->fe_len) { ext4_mb_use_best_found(ac, e4b); return; } } } /* * The routine checks whether found extent is good enough. If it is, * then the extent gets marked used and flag is set to the context * to stop scanning. Otherwise, the extent is compared with the * previous found extent and if new one is better, then it's stored * in the context. Later, the best found extent will be used, if * mballoc can't find good enough extent. * * FIXME: real allocation policy is to be designed yet! */ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, struct ext4_free_extent *ex, struct ext4_buddy *e4b) { struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; BUG_ON(ex->fe_len <= 0); BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); ac->ac_found++; /* * The special case - take what you catch first */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * Let's check whether the chuck is good enough */ if (ex->fe_len == gex->fe_len) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * If this is first found extent, just store it in the context */ if (bex->fe_len == 0) { *bex = *ex; return; } /* * If new found extent is better, store it in the context */ if (bex->fe_len < gex->fe_len) { /* if the request isn't satisfied, any found extent * larger than previous best one is better */ if (ex->fe_len > bex->fe_len) *bex = *ex; } else if (ex->fe_len > gex->fe_len) { /* if the request is satisfied, then we try to find * an extent that still satisfy the request, but is * smaller than previous one */ if (ex->fe_len < bex->fe_len) *bex = *ex; } ext4_mb_check_limits(ac, e4b, 0); } static noinline_for_stack int ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_free_extent ex = ac->ac_b_ex; ext4_group_t group = ex.fe_group; int max; int err; BUG_ON(ex.fe_len <= 0); err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex); if (max > 0) { ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } static noinline_for_stack int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { ext4_group_t group = ac->ac_g_ex.fe_group; int max; int err; struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_free_extent ex; if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) return 0; err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start, ac->ac_g_ex.fe_len, &ex); if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { ext4_fsblk_t start; start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + ex.fe_start; /* use do_div to get remainder (would be 64-bit modulo) */ if (do_div(start, sbi->s_stripe) == 0) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } } else if (max >= ac->ac_g_ex.fe_len) { BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { /* Sometimes, caller may want to merge even small * number of blocks to an existing extent */ BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } /* * The routine scans buddy structures (not bitmap!) from given order * to max order and tries to find big enough chunk to satisfy the req */ static noinline_for_stack void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_group_info *grp = e4b->bd_info; void *buddy; int i; int k; int max; BUG_ON(ac->ac_2order <= 0); for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { if (grp->bb_counters[i] == 0) continue; buddy = mb_find_buddy(e4b, i, &max); BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); BUG_ON(k >= max); ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; ac->ac_b_ex.fe_start = k << i; ac->ac_b_ex.fe_group = e4b->bd_group; ext4_mb_use_best_found(ac, e4b); BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) atomic_inc(&EXT4_SB(sb)->s_bal_2orders); break; } } /* * The routine scans the group and measures all found extents. * In order to optimize scanning, caller must pass number of * free blocks in the group, so the routine can know upper limit. */ static noinline_for_stack void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; void *bitmap = EXT4_MB_BITMAP(e4b); struct ext4_free_extent ex; int i; int free; free = e4b->bd_info->bb_free; BUG_ON(free <= 0); i = e4b->bd_info->bb_first_free; while (free && ac->ac_status == AC_STATUS_CONTINUE) { i = mb_find_next_zero_bit(bitmap, EXT4_BLOCKS_PER_GROUP(sb), i); if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { /* * IF we have corrupt bitmap, we won't find any * free blocks even though group info says we * we have free blocks */ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free blocks as per " "group info. But bitmap says 0", free); break; } mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); BUG_ON(ex.fe_len <= 0); if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free blocks as per " "group info. But got %d blocks", free, ex.fe_len); /* * The number of free blocks differs. This mostly * indicate that the bitmap is corrupt. So exit * without claiming the space. */ break; } ext4_mb_measure_extent(ac, &ex, e4b); i += ex.fe_len; free -= ex.fe_len; } ext4_mb_check_limits(ac, e4b, 1); } /* * This is a special case for storages like raid5 * we try to find stripe-aligned chunks for stripe-size-multiple requests */ static noinline_for_stack void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); void *bitmap = EXT4_MB_BITMAP(e4b); struct ext4_free_extent ex; ext4_fsblk_t first_group_block; ext4_fsblk_t a; ext4_grpblk_t i; int max; BUG_ON(sbi->s_stripe == 0); /* find first stripe-aligned block in group */ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); a = first_group_block + sbi->s_stripe - 1; do_div(a, sbi->s_stripe); i = (a * sbi->s_stripe) - first_group_block; while (i < EXT4_BLOCKS_PER_GROUP(sb)) { if (!mb_test_bit(i, bitmap)) { max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex); if (max >= sbi->s_stripe) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); break; } } i += sbi->s_stripe; } } /* This is now called BEFORE we load the buddy bitmap. */ static int ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { unsigned free, fragments; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); BUG_ON(cr < 0 || cr >= 4); /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { int ret = ext4_mb_init_group(ac->ac_sb, group); if (ret) return 0; } free = grp->bb_free; fragments = grp->bb_fragments; if (free == 0) return 0; if (fragments == 0) return 0; switch (cr) { case 0: BUG_ON(ac->ac_2order == 0); if (grp->bb_largest_free_order < ac->ac_2order) return 0; /* Avoid using the first bg of a flexgroup for data files */ if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) return 0; return 1; case 1: if ((free / fragments) >= ac->ac_g_ex.fe_len) return 1; break; case 2: if (free >= ac->ac_g_ex.fe_len) return 1; break; case 3: return 1; default: BUG(); } return 0; } /* * lock the group_info alloc_sem of all the groups * belonging to the same buddy cache page. This * make sure other parallel operation on the buddy * cache doesn't happen whild holding the buddy cache * lock */ int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group) { int i; int block, pnum; int blocks_per_page; int groups_per_page; ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t first_group; struct ext4_group_info *grp; blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; first_group = pnum * blocks_per_page / 2; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; /* read all groups the page covers into the cache */ for (i = 0; i < groups_per_page; i++) { if ((first_group + i) >= ngroups) break; grp = ext4_get_group_info(sb, first_group + i); /* take all groups write allocation * semaphore. This make sure there is * no block allocation going on in any * of that groups */ down_write_nested(&grp->alloc_sem, i); } return i; } void ext4_mb_put_buddy_cache_lock(struct super_block *sb, ext4_group_t group, int locked_group) { int i; int block, pnum; int blocks_per_page; ext4_group_t first_group; struct ext4_group_info *grp; blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; first_group = pnum * blocks_per_page / 2; /* release locks on all the groups */ for (i = 0; i < locked_group; i++) { grp = ext4_get_group_info(sb, first_group + i); /* take all groups write allocation * semaphore. This make sure there is * no block allocation going on in any * of that groups */ up_write(&grp->alloc_sem); } } static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t ngroups, group, i; int cr; int err = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; sb = ac->ac_sb; sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); /* non-extent files are limited to low blocks/groups */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) ngroups = sbi->s_blockfile_groups; BUG_ON(ac->ac_status == AC_STATUS_FOUND); /* first, try the goal */ err = ext4_mb_find_by_goal(ac, &e4b); if (err || ac->ac_status == AC_STATUS_FOUND) goto out; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) goto out; /* * ac->ac2_order is set only if the fe_len is a power of 2 * if ac2_order is set we also set criteria to 0 so that we * try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); ac->ac_2order = 0; /* * We search using buddy data only if the order of the request * is greater than equal to the sbi_s_mb_order2_reqs * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req */ if (i >= sbi->s_mb_order2_reqs) { /* * This should tell if fe_len is exactly power of 2 */ if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) ac->ac_2order = i - 1; } /* if stream allocation is enabled, use global goal */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { /* TBD: may be hot point */ spin_lock(&sbi->s_md_lock); ac->ac_g_ex.fe_group = sbi->s_mb_last_group; ac->ac_g_ex.fe_start = sbi->s_mb_last_start; spin_unlock(&sbi->s_md_lock); } /* Let's just scan groups to find more-less suitable blocks */ cr = ac->ac_2order ? 0 : 1; /* * cr == 0 try to get exact allocation, * cr == 3 try to get anything */ repeat: for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { ac->ac_criteria = cr; /* * searching for the right group start * from the goal value specified */ group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { if (group == ngroups) group = 0; /* This now checks without needing the buddy page */ if (!ext4_mb_good_group(ac, group, cr)) continue; err = ext4_mb_load_buddy(sb, group, &e4b); if (err) goto out; ext4_lock_group(sb, group); /* * We need to check again after locking the * block group */ if (!ext4_mb_good_group(ac, group, cr)) { ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); continue; } ac->ac_groups_scanned++; if (cr == 0) ext4_mb_simple_scan_group(ac, &e4b); else if (cr == 1 && sbi->s_stripe && !(ac->ac_g_ex.fe_len % sbi->s_stripe)) ext4_mb_scan_aligned(ac, &e4b); else ext4_mb_complex_scan_group(ac, &e4b); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); if (ac->ac_status != AC_STATUS_CONTINUE) break; } } if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { /* * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); */ ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; atomic_inc(&sbi->s_mb_lost_chunks); goto repeat; } } out: return err; } static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; ++*pos; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) { struct super_block *sb = seq->private; ext4_group_t group = (ext4_group_t) ((unsigned long) v); int i; int err; struct ext4_buddy e4b; struct sg { struct ext4_group_info info; ext4_grpblk_t counters[16]; } sg; group--; if (group == 0) seq_printf(seq, "#%-5s: %-5s %-5s %-5s " "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", "group", "free", "frags", "first", "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + sizeof(struct ext4_group_info); err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { seq_printf(seq, "#%-5u: I/O error\n", group); return 0; } ext4_lock_group(sb, group); memcpy(&sg, ext4_get_group_info(sb, group), i); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, sg.info.bb_fragments, sg.info.bb_first_free); for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); seq_printf(seq, " ]\n"); return 0; } static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) { } static const struct seq_operations ext4_mb_seq_groups_ops = { .start = ext4_mb_seq_groups_start, .next = ext4_mb_seq_groups_next, .stop = ext4_mb_seq_groups_stop, .show = ext4_mb_seq_groups_show, }; static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) { struct super_block *sb = PDE(inode)->data; int rc; rc = seq_open(file, &ext4_mb_seq_groups_ops); if (rc == 0) { struct seq_file *m = file->private_data; m->private = sb; } return rc; } static const struct file_operations ext4_mb_seq_groups_fops = { .owner = THIS_MODULE, .open = ext4_mb_seq_groups_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Create and initialize ext4_group_info data for the given group. */ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *desc) { int i, len; int metalen = 0; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_info **meta_group_info; /* * First check if this group is the first of a reserved block. * If it's true, we have to allocate a new table of pointers * to ext4_group_info structures */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); meta_group_info = kmalloc(metalen, GFP_KERNEL); if (meta_group_info == NULL) { printk(KERN_ERR "EXT4-fs: can't allocate mem for a " "buddy group\n"); goto exit_meta_group_info; } sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = meta_group_info; } /* * calculate needed size. if change bb_counters size, * don't forget about ext4_mb_generate_buddy() */ len = offsetof(typeof(**meta_group_info), bb_counters[sb->s_blocksize_bits + 2]); meta_group_info = sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); meta_group_info[i] = kzalloc(len, GFP_KERNEL); if (meta_group_info[i] == NULL) { printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n"); goto exit_group_info; } set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(meta_group_info[i]->bb_state)); /* * initialize bb_free to be able to skip * empty groups without initialization */ if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { meta_group_info[i]->bb_free = ext4_free_blocks_after_init(sb, group, desc); } else { meta_group_info[i]->bb_free = ext4_free_blks_count(sb, desc); } INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); meta_group_info[i]->bb_free_root = RB_ROOT; meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ #ifdef DOUBLE_CHECK { struct buffer_head *bh; meta_group_info[i]->bb_bitmap = kmalloc(sb->s_blocksize, GFP_KERNEL); BUG_ON(meta_group_info[i]->bb_bitmap == NULL); bh = ext4_read_block_bitmap(sb, group); BUG_ON(bh == NULL); memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, sb->s_blocksize); put_bh(bh); } #endif return 0; exit_group_info: /* If a meta_group_info table has been allocated, release it now */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); exit_meta_group_info: return -ENOMEM; } /* ext4_mb_add_groupinfo */ static int ext4_mb_init_backend(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int num_meta_group_infos; int num_meta_group_infos_max; int array_size; struct ext4_group_desc *desc; /* This is the number of blocks used by GDT */ num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); /* * This is the total number of blocks used by GDT including * the number of reserved blocks for GDT. * The s_group_info array is allocated with this value * to allow a clean online resize without a complex * manipulation of pointer. * The drawback is the unused memory when no resize * occurs but it's very low in terms of pages * (see comments below) * Need to handle this properly when META_BG resizing is allowed */ num_meta_group_infos_max = num_meta_group_infos + le16_to_cpu(es->s_reserved_gdt_blocks); /* * array_size is the size of s_group_info array. We round it * to the next power of two because this approximation is done * internally by kmalloc so we can have some more memory * for free here (e.g. may be used for META_BG resize). */ array_size = 1; while (array_size < sizeof(*sbi->s_group_info) * num_meta_group_infos_max) array_size = array_size << 1; /* An 8TB filesystem with 64-bit pointers requires a 4096 byte * kmalloc. A 128kb malloc should suffice for a 256TB filesystem. * So a two level scheme suffices for now. */ sbi->s_group_info = kmalloc(array_size, GFP_KERNEL); if (sbi->s_group_info == NULL) { printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n"); return -ENOMEM; } sbi->s_buddy_cache = new_inode(sb); if (sbi->s_buddy_cache == NULL) { printk(KERN_ERR "EXT4-fs: can't get new inode\n"); goto err_freesgi; } EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; for (i = 0; i < ngroups; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc == NULL) { printk(KERN_ERR "EXT4-fs: can't read descriptor %u\n", i); goto err_freebuddy; } if (ext4_mb_add_groupinfo(sb, i, desc) != 0) goto err_freebuddy; } return 0; err_freebuddy: while (i-- > 0) kfree(ext4_get_group_info(sb, i)); i = num_meta_group_infos; while (i-- > 0) kfree(sbi->s_group_info[i]); iput(sbi->s_buddy_cache); err_freesgi: kfree(sbi->s_group_info); return -ENOMEM; } int ext4_mb_init(struct super_block *sb, int needs_recovery) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; unsigned offset; unsigned max; int ret; i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_offsets == NULL) { return -ENOMEM; } i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_maxs == NULL) { kfree(sbi->s_mb_offsets); return -ENOMEM; } /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; sbi->s_mb_offsets[0] = 0; i = 1; offset = 0; max = sb->s_blocksize << 2; do { sbi->s_mb_offsets[i] = offset; sbi->s_mb_maxs[i] = max; offset += 1 << (sb->s_blocksize_bits - i); max = max >> 1; i++; } while (i <= sb->s_blocksize_bits + 1); /* init file for buddy data */ ret = ext4_mb_init_backend(sb); if (ret != 0) { kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); return ret; } spin_lock_init(&sbi->s_md_lock); spin_lock_init(&sbi->s_bal_lock); sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); if (sbi->s_locality_groups == NULL) { kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); return -ENOMEM; } for_each_possible_cpu(i) { struct ext4_locality_group *lg; lg = per_cpu_ptr(sbi->s_locality_groups, i); mutex_init(&lg->lg_mutex); for (j = 0; j < PREALLOC_TB_SIZE; j++) INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); spin_lock_init(&lg->lg_prealloc_lock); } if (sbi->s_proc) proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_fops, sb); if (sbi->s_journal) sbi->s_journal->j_commit_callback = release_blocks_on_commit; return 0; } /* need to called with the ext4 group lock held */ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) { struct ext4_prealloc_space *pa; struct list_head *cur, *tmp; int count = 0; list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); list_del(&pa->pa_group_list); count++; kmem_cache_free(ext4_pspace_cachep, pa); } if (count) mb_debug(1, "mballoc: %u PAs left\n", count); } int ext4_mb_release(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; int num_meta_group_infos; struct ext4_group_info *grinfo; struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { grinfo = ext4_get_group_info(sb, i); #ifdef DOUBLE_CHECK kfree(grinfo->bb_bitmap); #endif ext4_lock_group(sb, i); ext4_mb_cleanup_pa(grinfo); ext4_unlock_group(sb, i); kfree(grinfo); } num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); for (i = 0; i < num_meta_group_infos; i++) kfree(sbi->s_group_info[i]); kfree(sbi->s_group_info); } kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); if (sbi->s_buddy_cache) iput(sbi->s_buddy_cache); if (sbi->s_mb_stats) { printk(KERN_INFO "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n", atomic_read(&sbi->s_bal_allocated), atomic_read(&sbi->s_bal_reqs), atomic_read(&sbi->s_bal_success)); printk(KERN_INFO "EXT4-fs: mballoc: %u extents scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost\n", atomic_read(&sbi->s_bal_ex_scanned), atomic_read(&sbi->s_bal_goals), atomic_read(&sbi->s_bal_2orders), atomic_read(&sbi->s_bal_breaks), atomic_read(&sbi->s_mb_lost_chunks)); printk(KERN_INFO "EXT4-fs: mballoc: %lu generated and it took %Lu\n", sbi->s_mb_buddies_generated++, sbi->s_mb_generation_time); printk(KERN_INFO "EXT4-fs: mballoc: %u preallocated, %u discarded\n", atomic_read(&sbi->s_mb_preallocated), atomic_read(&sbi->s_mb_discarded)); } free_percpu(sbi->s_locality_groups); if (sbi->s_proc) remove_proc_entry("mb_groups", sbi->s_proc); return 0; } static inline void ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t block, int count) { int ret; ext4_fsblk_t discard_block; discard_block = block + ext4_group_first_block_no(sb, block_group); trace_ext4_discard_blocks(sb, (unsigned long long) discard_block, count); ret = sb_issue_discard(sb, discard_block, count); if (ret == EOPNOTSUPP) { ext4_warning(sb, "discard not supported, disabling"); clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); } } /* * This function is called by the jbd2 layer once the commit has finished, * so we know we can free the blocks that were released with that commit. */ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; struct ext4_buddy e4b; struct ext4_group_info *db; int err, count = 0, count2 = 0; struct ext4_free_data *entry; struct list_head *l, *ltmp; list_for_each_safe(l, ltmp, &txn->t_private_list) { entry = list_entry(l, struct ext4_free_data, list); mb_debug(1, "gonna free %u blocks in group %u (0x%p):", entry->count, entry->group, entry); if (test_opt(sb, DISCARD)) ext4_issue_discard(sb, entry->group, entry->start_blk, entry->count); err = ext4_mb_load_buddy(sb, entry->group, &e4b); /* we expect to find existing buddy because it's pinned */ BUG_ON(err != 0); db = e4b.bd_info; /* there are blocks to put in buddy to make them really free */ count += entry->count; count2++; ext4_lock_group(sb, entry->group); /* Take it out of per group rb tree */ rb_erase(&entry->node, &(db->bb_free_root)); mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count); if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ page_cache_release(e4b.bd_buddy_page); page_cache_release(e4b.bd_bitmap_page); } ext4_unlock_group(sb, entry->group); kmem_cache_free(ext4_free_ext_cachep, entry); ext4_mb_unload_buddy(&e4b); } mb_debug(1, "freed %u blocks in %u structures\n", count, count2); } #ifdef CONFIG_EXT4_DEBUG u8 mb_enable_debug __read_mostly; static struct dentry *debugfs_dir; static struct dentry *debugfs_debug; static void __init ext4_create_debugfs_entry(void) { debugfs_dir = debugfs_create_dir("ext4", NULL); if (debugfs_dir) debugfs_debug = debugfs_create_u8("mballoc-debug", S_IRUGO | S_IWUSR, debugfs_dir, &mb_enable_debug); } static void ext4_remove_debugfs_entry(void) { debugfs_remove(debugfs_debug); debugfs_remove(debugfs_dir); } #else static void __init ext4_create_debugfs_entry(void) { } static void ext4_remove_debugfs_entry(void) { } #endif int __init init_ext4_mballoc(void) { ext4_pspace_cachep = kmem_cache_create("ext4_prealloc_space", sizeof(struct ext4_prealloc_space), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (ext4_pspace_cachep == NULL) return -ENOMEM; ext4_ac_cachep = kmem_cache_create("ext4_alloc_context", sizeof(struct ext4_allocation_context), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (ext4_ac_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); return -ENOMEM; } ext4_free_ext_cachep = kmem_cache_create("ext4_free_block_extents", sizeof(struct ext4_free_data), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (ext4_free_ext_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); return -ENOMEM; } ext4_create_debugfs_entry(); return 0; } void exit_ext4_mballoc(void) { /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. */ rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_ext_cachep); ext4_remove_debugfs_entry(); } /* * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps * Returns 0 if success or error code */ static noinline_for_stack int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle, unsigned int reserv_blks) { struct buffer_head *bitmap_bh = NULL; struct ext4_group_desc *gdp; struct buffer_head *gdp_bh; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block; int err, len; BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(ac->ac_b_ex.fe_len <= 0); sb = ac->ac_sb; sbi = EXT4_SB(sb); err = -EIO; bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); if (!bitmap_bh) goto out_err; err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto out_err; err = -EIO; gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); if (!gdp) goto out_err; ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_free_blks_count(sb, gdp)); err = ext4_journal_get_write_access(handle, gdp_bh); if (err) goto out_err; block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = ac->ac_b_ex.fe_len; if (!ext4_data_block_valid(sbi, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata\n", block, block+len); /* File system mounted not to panic on error * Fix the bitmap and repeat the block allocation * We leak some of the blocks here. */ ext4_lock_group(sb, ac->ac_b_ex.fe_group); mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!err) err = -EAGAIN; goto out_err; } ext4_lock_group(sb, ac->ac_b_ex.fe_group); #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < ac->ac_b_ex.fe_len; i++) { BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, bitmap_bh->b_data)); } } #endif mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_blks_set(sb, gdp, ext4_free_blocks_after_init(sb, ac->ac_b_ex.fe_group, gdp)); } len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; ext4_free_blks_set(sb, gdp, len); gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); /* * Now reduce the dirty block count also. Should not go negative */ if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); atomic_sub(ac->ac_b_ex.fe_len, &sbi->s_flex_groups[flex_group].free_blocks); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (err) goto out_err; err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); out_err: ext4_mark_super_dirty(sb); brelse(bitmap_bh); return err; } /* * here we normalize request for locality group * Group request are normalized to s_strip size if we set the same via mount * option. If not we set it to s_mb_group_prealloc which can be configured via * /sys/fs/ext4/<partition>/mb_group_prealloc * * XXX: should we try to preallocate more than the group has now? */ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; BUG_ON(lg == NULL); if (EXT4_SB(sb)->s_stripe) ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe; else ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; mb_debug(1, "#%u: goal %u blocks for locality group\n", current->pid, ac->ac_g_ex.fe_len); } /* * Normalization means making request better in terms of * size and alignment */ static noinline_for_stack void ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { int bsbits, max; ext4_lblk_t end; loff_t size, orig_size, start_off; ext4_lblk_t start; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_prealloc_space *pa; /* do normalize only data requests, metadata requests do not need preallocation */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; /* sometime caller may want exact blocks */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; /* caller may indicate that preallocation isn't * required (it's a tail, for example) */ if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) return; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { ext4_mb_normalize_group_request(ac); return ; } bsbits = ac->ac_sb->s_blocksize_bits; /* first, let's learn actual file size * given current request is allocated */ size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; size = size << bsbits; if (size < i_size_read(ac->ac_inode)) size = i_size_read(ac->ac_inode); orig_size = size; /* max size of free chunks */ max = 2 << bsbits; #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ (req <= (size) || max <= (chunk_size)) /* first, try to predict filesize */ /* XXX: should this table be tunable? */ start_off = 0; if (size <= 16 * 1024) { size = 16 * 1024; } else if (size <= 32 * 1024) { size = 32 * 1024; } else if (size <= 64 * 1024) { size = 64 * 1024; } else if (size <= 128 * 1024) { size = 128 * 1024; } else if (size <= 256 * 1024) { size = 256 * 1024; } else if (size <= 512 * 1024) { size = 512 * 1024; } else if (size <= 1024 * 1024) { size = 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (21 - bsbits)) << 21; size = 2 * 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (22 - bsbits)) << 22; size = 4 * 1024 * 1024; } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, (8<<20)>>bsbits, max, 8 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (23 - bsbits)) << 23; size = 8 * 1024 * 1024; } else { start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; size = ac->ac_o_ex.fe_len << bsbits; } size = size >> bsbits; start = start_off >> bsbits; /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; start = ar->lleft + 1; } if (ar->pright && start + size - 1 >= ar->lright) size -= start + size - ar->lright; end = start + size; /* check we don't cross already preallocated blocks */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; if (pa->pa_deleted) continue; spin_lock(&pa->pa_lock); if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } pa_end = pa->pa_lstart + pa->pa_len; /* PA must not overlap original request */ BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || ac->ac_o_ex.fe_logical < pa->pa_lstart)); /* skip PAs this normalized request doesn't overlap with */ if (pa->pa_lstart >= end || pa_end <= start) { spin_unlock(&pa->pa_lock); continue; } BUG_ON(pa->pa_lstart <= start && pa_end >= end); /* adjust start or end to be adjacent to this pa */ if (pa_end <= ac->ac_o_ex.fe_logical) { BUG_ON(pa_end < start); start = pa_end; } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { BUG_ON(pa->pa_lstart > end); end = pa->pa_lstart; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); size = end - start; /* XXX: extra loop to check we really don't overlap preallocations */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0) { pa_end = pa->pa_lstart + pa->pa_len; BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); if (start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical) { printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n", (unsigned long) start, (unsigned long) size, (unsigned long) ac->ac_o_ex.fe_logical); } BUG_ON(start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical); BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ /* XXX: is it better to align blocks WRT to logical * placement or satisfy big request as is */ ac->ac_g_ex.fe_logical = start; ac->ac_g_ex.fe_len = size; /* define goal start in order to merge */ if (ar->pright && (ar->lright == (start + size))) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } if (ar->pleft && (ar->lleft + 1 == start)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, (unsigned) orig_size, (unsigned) start); } static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) atomic_inc(&sbi->s_bal_goals); if (ac->ac_found > sbi->s_mb_max_to_scan) atomic_inc(&sbi->s_bal_breaks); } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) trace_ext4_mballoc_alloc(ac); else trace_ext4_mballoc_prealloc(ac); } /* * Called on failure; free up any blocks from the inode PA for this * context. We don't need this for MB_GROUP_PA because we only change * pa_free in ext4_mb_release_context(), but on failure, we've already * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. */ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; int len; if (pa && pa->pa_type == MB_INODE_PA) { len = ac->ac_b_ex.fe_len; pa->pa_free += len; } } /* * use blocks preallocated to inode */ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { ext4_fsblk_t start; ext4_fsblk_t end; int len; /* found preallocated blocks, use them */ start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len); len = end - start; ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; BUG_ON(start < pa->pa_pstart); BUG_ON(start + len > pa->pa_pstart + pa->pa_len); BUG_ON(pa->pa_free < len); pa->pa_free -= len; mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); } /* * use blocks preallocated to locality group */ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { unsigned int len = ac->ac_o_ex.fe_len; ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; /* we don't correct pa_pstart or pa_plen here to avoid * possible race when the group is being loaded concurrently * instead we correct pa later, after blocks are marked * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); } /* * Return the prealloc space that have minimal distance * from the goal block. @cpa is the prealloc * space that is having currently known minimal distance * from the goal block. */ static struct ext4_prealloc_space * ext4_mb_check_group_pa(ext4_fsblk_t goal_block, struct ext4_prealloc_space *pa, struct ext4_prealloc_space *cpa) { ext4_fsblk_t cur_distance, new_distance; if (cpa == NULL) { atomic_inc(&pa->pa_count); return pa; } cur_distance = abs(goal_block - cpa->pa_pstart); new_distance = abs(goal_block - pa->pa_pstart); if (cur_distance < new_distance) return cpa; /* drop the previous reference */ atomic_dec(&cpa->pa_count); atomic_inc(&pa->pa_count); return pa; } /* * search goal blocks in preallocated space */ static noinline_for_stack int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { int order, i; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_locality_group *lg; struct ext4_prealloc_space *pa, *cpa = NULL; ext4_fsblk_t goal_block; /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return 0; /* first, try per-file preallocation */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { /* all fields in this condition don't change, * so we can skip locking for them */ if (ac->ac_o_ex.fe_logical < pa->pa_lstart || ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len) continue; /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS) continue; /* found preallocated blocks, use them */ spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free) { atomic_inc(&pa->pa_count); ext4_mb_use_inode_pa(ac, pa); spin_unlock(&pa->pa_lock); ac->ac_criteria = 10; rcu_read_unlock(); return 1; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); /* can we use group allocation? */ if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) return 0; /* inode may have no locality group for some reason */ lg = ac->ac_lg; if (lg == NULL) return 0; order = fls(ac->ac_o_ex.fe_len) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); /* * search for the prealloc space that is having * minimal distance from the goal block. */ for (i = order; i < PREALLOC_TB_SIZE; i++) { rcu_read_lock(); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], pa_inode_list) { spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) { cpa = ext4_mb_check_group_pa(goal_block, pa, cpa); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); } if (cpa) { ext4_mb_use_group_pa(ac, cpa); ac->ac_criteria = 20; return 1; } return 0; } /* * the function goes through all block freed in the group * but not yet committed and marks them used in in-core bitmap. * buddy must be generated from this bitmap * Need to be called with the ext4 group lock held */ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group) { struct rb_node *n; struct ext4_group_info *grp; struct ext4_free_data *entry; grp = ext4_get_group_info(sb, group); n = rb_first(&(grp->bb_free_root)); while (n) { entry = rb_entry(n, struct ext4_free_data, node); mb_set_bits(bitmap, entry->start_blk, entry->count); n = rb_next(n); } return; } /* * the function goes through all preallocation in this group and marks them * used in in-core bitmap. buddy must be generated from this bitmap * Need to be called with ext4 group lock held */ static noinline_for_stack void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_prealloc_space *pa; struct list_head *cur; ext4_group_t groupnr; ext4_grpblk_t start; int preallocated = 0; int count = 0; int len; /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here * notice we do NOT ignore preallocations with pa_deleted * otherwise we could leave used blocks available for * allocation in buddy when concurrent ext4_mb_put_pa() * is dropping preallocation */ list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start); len = pa->pa_len; spin_unlock(&pa->pa_lock); if (unlikely(len == 0)) continue; BUG_ON(groupnr != group); mb_set_bits(bitmap, start, len); preallocated += len; count++; } mb_debug(1, "prellocated %u for group %u\n", preallocated, group); } static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); kmem_cache_free(ext4_pspace_cachep, pa); } /* * drops a reference to preallocated space descriptor * if this was the last reference and the space is consumed */ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, struct super_block *sb, struct ext4_prealloc_space *pa) { ext4_group_t grp; ext4_fsblk_t grp_blk; if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) return; /* in this short window concurrent discard can set pa_deleted */ spin_lock(&pa->pa_lock); if (pa->pa_deleted == 1) { spin_unlock(&pa->pa_lock); return; } pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; /* * If doing group-based preallocation, pa_pstart may be in the * next group when pa is used up */ if (pa->pa_type == MB_GROUP_PA) grp_blk--; ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL); /* * possible race: * * P1 (buddy init) P2 (regular allocation) * find block B in PA * copy on-disk bitmap to buddy * mark B in on-disk bitmap * drop PA from group * mark all PAs in buddy * * thus, P1 initializes buddy with B available. to prevent this * we make "copy" and "mark all PAs" atomic and serialize "drop PA" * against that pair */ ext4_lock_group(sb, grp); list_del(&pa->pa_group_list); ext4_unlock_group(sb, grp); spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } /* * creates new preallocated space for given inode */ static noinline_for_stack int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; struct ext4_inode_info *ei; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { int winl; int wins; int win; int offs; /* we can't allocate as much as normalizer wants. * so, found space must get proper lstart * to cover original request */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); /* we're limited by original request in that * logical block must be covered any way * winl is window we can move our chunk within */ winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; /* also, we should cover whole original request */ wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len; /* the smallest one defines real window */ win = min(winl, wins); offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len; if (offs && offs < win) win = offs; ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win; BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); } /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_lstart = ac->ac_b_ex.fe_logical; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); return 0; } /* * creates new preallocated space for locality group inodes belongs to */ static noinline_for_stack int ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ext4_pspace_cachep == NULL); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; BUG_ON(lg == NULL); pa->pa_obj_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ return 0; } static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) { int err; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) err = ext4_mb_new_group_pa(ac); else err = ext4_mb_new_inode_pa(ac); return err; } /* * finds all unused blocks in on-disk bitmap, frees them in * in-core bitmap and buddy. * @pa must be unlinked from inode and group lists, so that * nobody else can find/use it. * the caller MUST hold group/inode locks. * TODO: optimize the case when there are no in-core structures yet */ static noinline_for_stack int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa, struct ext4_allocation_context *ac) { struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned int end; unsigned int next; ext4_group_t group; ext4_grpblk_t bit; unsigned long long grp_blk_start; int err = 0; int free = 0; BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); grp_blk_start = pa->pa_pstart - bit; BUG_ON(group != e4b->bd_group && pa->pa_len != 0); end = bit + pa->pa_len; if (ac) { ac->ac_sb = sb; ac->ac_inode = pa->pa_inode; } while (bit < end) { bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); if (bit >= end) break; next = mb_find_next_bit(bitmap_bh->b_data, end, bit); mb_debug(1, " free preallocated %u/%u in group %u\n", (unsigned) ext4_group_first_block_no(sb, group) + bit, (unsigned) next - bit, (unsigned) group); free += next - bit; if (ac) { ac->ac_b_ex.fe_group = group; ac->ac_b_ex.fe_start = bit; ac->ac_b_ex.fe_len = next - bit; ac->ac_b_ex.fe_logical = 0; trace_ext4_mballoc_discard(ac); } trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit, next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } if (free != pa->pa_free) { printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, (unsigned long) pa->pa_len); ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", free, pa->pa_free); /* * pa is already deleted so we use the value obtained * from the bitmap and continue. */ } atomic_add(free, &sbi->s_mb_discarded); return err; } static noinline_for_stack int ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa, struct ext4_allocation_context *ac) { struct super_block *sb = e4b->bd_sb; ext4_group_t group; ext4_grpblk_t bit; trace_ext4_mb_release_group_pa(sb, ac, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); if (ac) { ac->ac_sb = sb; ac->ac_inode = NULL; ac->ac_b_ex.fe_group = group; ac->ac_b_ex.fe_start = bit; ac->ac_b_ex.fe_len = pa->pa_len; ac->ac_b_ex.fe_logical = 0; trace_ext4_mballoc_discard(ac); } return 0; } /* * releases all preallocations in given group * * first, we need to decide discard policy: * - when do we discard * 1) ENOSPC * - how many do we discard * 1) how many requested */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int needed) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; struct ext4_allocation_context *ac; struct list_head list; struct ext4_buddy e4b; int err; int busy = 0; int free = 0; mb_debug(1, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) return 0; bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); return 0; } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); put_bh(bitmap_bh); return 0; } if (needed == 0) needed = EXT4_BLOCKS_PER_GROUP(sb) + 1; INIT_LIST_HEAD(&list); ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (ac) ac->ac_sb = sb; repeat: ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); busy = 1; continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* seems this one can be freed ... */ pa->pa_deleted = 1; /* we can trust pa_free ... */ free += pa->pa_free; spin_unlock(&pa->pa_lock); list_del(&pa->pa_group_list); list_add(&pa->u.pa_tmp_list, &list); } /* if we still need more blocks and some PAs were used, try again */ if (free < needed && busy) { busy = 0; ext4_unlock_group(sb, group); /* * Yield the CPU here so that we don't get soft lockup * in non preempt case. */ yield(); goto repeat; } /* found anything to free? */ if (list_empty(&list)) { BUG_ON(free != 0); goto out; } /* now free all selected PAs */ list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { /* remove from object (inode or locality group) */ spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); if (pa->pa_type == MB_GROUP_PA) ext4_mb_release_group_pa(&e4b, pa, ac); else ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } out: ext4_unlock_group(sb, group); if (ac) kmem_cache_free(ext4_ac_cachep, ac); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); return free; } /* * releases all non-used preallocated blocks for given inode * * It's important to discard preallocations under i_data_sem * We don't want another block to be served from the prealloc * space when we are discarding the inode prealloc space. * * FIXME!! Make sure it is valid at all the call sites */ void ext4_discard_preallocations(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; struct ext4_allocation_context *ac; ext4_group_t group = 0; struct list_head list; struct ext4_buddy e4b; int err; if (!S_ISREG(inode->i_mode)) { /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ return; } mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode); INIT_LIST_HEAD(&list); ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (ac) { ac->ac_sb = sb; ac->ac_inode = inode; } repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); while (!list_empty(&ei->i_prealloc_list)) { pa = list_entry(ei->i_prealloc_list.next, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* this shouldn't happen often - nobody should * use preallocation while we're discarding it */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); printk(KERN_ERR "uh-oh! used pa while discarding\n"); WARN_ON(1); schedule_timeout_uninterruptible(HZ); goto repeat; } if (pa->pa_deleted == 0) { pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); continue; } /* someone is deleting pa right now */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); /* we have to wait here because pa_deleted * doesn't mean pa is already unlinked from * the list. as we might be called from * ->clear_inode() the inode will get freed * and concurrent thread which is unlinking * pa from inode's list may access already * freed memory, bad-bad-bad */ /* XXX: if this happens too often, we can * add a flag to force wait only in case * of ->clear_inode(), but not in case of * regular truncate */ schedule_timeout_uninterruptible(HZ); goto repeat; } spin_unlock(&ei->i_prealloc_lock); list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { BUG_ON(pa->pa_type != MB_INODE_PA); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); ext4_mb_unload_buddy(&e4b); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } if (ac) kmem_cache_free(ext4_ac_cachep, ac); } /* * finds all preallocated spaces and return blocks being freed to them * if preallocated space becomes full (no block is used from the space) * then the function frees space in buddy * XXX: at the moment, truncate (which is the only way to free blocks) * discards all preallocations */ static void ext4_mb_return_to_preallocation(struct inode *inode, struct ext4_buddy *e4b, sector_t block, int count) { BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list)); } #ifdef CONFIG_EXT4_DEBUG static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; ext4_group_t ngroups, i; if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) return; printk(KERN_ERR "EXT4-fs: Can't allocate:" " Allocation context details:\n"); printk(KERN_ERR "EXT4-fs: status %d flags %d\n", ac->ac_status, ac->ac_flags); printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, " "best %lu/%lu/%lu@%lu cr %d\n", (unsigned long)ac->ac_o_ex.fe_group, (unsigned long)ac->ac_o_ex.fe_start, (unsigned long)ac->ac_o_ex.fe_len, (unsigned long)ac->ac_o_ex.fe_logical, (unsigned long)ac->ac_g_ex.fe_group, (unsigned long)ac->ac_g_ex.fe_start, (unsigned long)ac->ac_g_ex.fe_len, (unsigned long)ac->ac_g_ex.fe_logical, (unsigned long)ac->ac_b_ex.fe_group, (unsigned long)ac->ac_b_ex.fe_start, (unsigned long)ac->ac_b_ex.fe_len, (unsigned long)ac->ac_b_ex.fe_logical, (int)ac->ac_criteria); printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned, ac->ac_found); printk(KERN_ERR "EXT4-fs: groups: \n"); ngroups = ext4_get_groups_count(sb); for (i = 0; i < ngroups; i++) { struct ext4_group_info *grp = ext4_get_group_info(sb, i); struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start); spin_unlock(&pa->pa_lock); printk(KERN_ERR "PA:%u:%d:%u \n", i, start, pa->pa_len); } ext4_unlock_group(sb, i); if (grp->bb_free == 0) continue; printk(KERN_ERR "%u: %d/%d \n", i, grp->bb_free, grp->bb_fragments); } printk(KERN_ERR "\n"); } #else static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) { return; } #endif /* * We use locality group preallocation for small size file. The size of the * file is determined by the current size or the resulting size after * allocation which ever is larger * * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req */ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits = ac->ac_sb->s_blocksize_bits; loff_t size, isize; if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) >> bsbits; if ((size == isize) && !ext4_fs_is_busy(sbi) && (atomic_read(&ac->ac_inode->i_writecount) == 0)) { ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; return; } /* don't use group allocation for large files */ size = max(size, isize); if (size > sbi->s_mb_stream_request) { ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; } BUG_ON(ac->ac_lg != NULL); /* * locality group prealloc space are per cpu. The reason for having * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; /* serialize all allocations in the group */ mutex_lock(&ac->ac_lg->lg_mutex); } static noinline_for_stack int ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct super_block *sb = ar->inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t group; unsigned int len; ext4_fsblk_t goal; ext4_grpblk_t block; /* we can't allocate > group size */ len = ar->len; /* just a dirty hack to filter too big requests */ if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10) len = EXT4_BLOCKS_PER_GROUP(sb) - 10; /* start searching from the goal */ goal = ar->goal; if (goal < le32_to_cpu(es->s_first_data_block) || goal >= ext4_blocks_count(es)) goal = le32_to_cpu(es->s_first_data_block); ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ memset(ac, 0, sizeof(struct ext4_allocation_context)); ac->ac_b_ex.fe_logical = ar->logical; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_sb = sb; ac->ac_inode = ar->inode; ac->ac_o_ex.fe_logical = ar->logical; ac->ac_o_ex.fe_group = group; ac->ac_o_ex.fe_start = block; ac->ac_o_ex.fe_len = len; ac->ac_g_ex.fe_logical = ar->logical; ac->ac_g_ex.fe_group = group; ac->ac_g_ex.fe_start = block; ac->ac_g_ex.fe_len = len; ac->ac_flags = ar->flags; /* we have to define context: we'll we work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, (unsigned) ar->lleft, (unsigned) ar->pleft, (unsigned) ar->lright, (unsigned) ar->pright, atomic_read(&ar->inode->i_writecount) ? "" : "non-"); return 0; } static noinline_for_stack void ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries) { ext4_group_t group = 0; struct ext4_buddy e4b; struct list_head discard_list; struct ext4_prealloc_space *pa, *tmp; struct ext4_allocation_context *ac; mb_debug(1, "discard locality group preallocation\n"); INIT_LIST_HEAD(&discard_list); ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (ac) ac->ac_sb = sb; spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* * This is the pa that we just used * for block allocation. So don't * free that */ spin_unlock(&pa->pa_lock); continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* only lg prealloc space */ BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &discard_list); total_entries--; if (total_entries <= 5) { /* * we want to keep only 5 entries * allowing it to grow to 8. This * mak sure we don't call discard * soon for this list. */ break; } } spin_unlock(&lg->lg_prealloc_lock); list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); if (ext4_mb_load_buddy(sb, group, &e4b)) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_group_pa(&e4b, pa, ac); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } if (ac) kmem_cache_free(ext4_ac_cachep, ac); } /* * We have incremented pa_count. So it cannot be freed at this * point. Also we hold lg_mutex. So no parallel allocation is * possible from this lg. That means pa_free cannot be updated. * * A parallel ext4_mb_discard_group_preallocations is possible. * which can cause the lg_prealloc_list to be updated. */ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) { int order, added = 0, lg_prealloc_count = 1; struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; order = fls(pa->pa_free) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ rcu_read_lock(); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted) { spin_unlock(&tmp_pa->pa_lock); continue; } if (!added && pa->pa_free < tmp_pa->pa_free) { /* Add to the tail of the previous entry */ list_add_tail_rcu(&pa->pa_inode_list, &tmp_pa->pa_inode_list); added = 1; /* * we want to count the total * number of entries in the list */ } spin_unlock(&tmp_pa->pa_lock); lg_prealloc_count++; } if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); rcu_read_unlock(); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, order, lg_prealloc_count); return; } return ; } /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { if (pa->pa_type == MB_GROUP_PA) { /* see comment in ext4_mb_use_group_pa() */ spin_lock(&pa->pa_lock); pa->pa_pstart += ac->ac_b_ex.fe_len; pa->pa_lstart += ac->ac_b_ex.fe_len; pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); } } if (ac->alloc_semp) up_read(ac->alloc_semp); if (pa) { /* * We want to add the pa to the right bucket. * Remove it from the list and while adding * make sure the list to which we are adding * doesn't grow big. We need to release * alloc_semp before calling ext4_mb_add_n_trim() */ if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); ext4_mb_add_n_trim(ac); } ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) page_cache_release(ac->ac_bitmap_page); if (ac->ac_buddy_page) page_cache_release(ac->ac_buddy_page); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); return 0; } static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; int freed = 0; trace_ext4_mb_discard_preallocations(sb, needed); for (i = 0; i < ngroups && needed > 0; i++) { ret = ext4_mb_discard_group_preallocations(sb, i, needed); freed += ret; needed -= ret; } return freed; } /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back * to usual allocation */ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { int freed; struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_blks = 0; sb = ar->inode->i_sb; sbi = EXT4_SB(sb); trace_ext4_request_blocks(ar); /* * For delayed allocation, we could skip the ENOSPC and * EDQUOT check, as blocks and quotas have been already * reserved when data being copied into pagecache. */ if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) ar->flags |= EXT4_MB_DELALLOC_RESERVED; else { /* Without delayed allocation we need to verify * there is enough free blocks to do block allocation * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { /* let others to free the space */ yield(); ar->len = ar->len >> 1; } if (!ar->len) { *errp = -ENOSPC; return 0; } reserv_blks = ar->len; while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } inquota = ar->len; if (ar->len == 0) { *errp = -EDQUOT; goto out; } } ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { ar->len = 0; *errp = -ENOMEM; goto out; } *errp = ext4_mb_initialize_context(ac, ar); if (*errp) { ar->len = 0; goto out; } ac->ac_op = EXT4_MB_HISTORY_PREALLOC; if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); if (*errp) goto errout; /* as we've just preallocated more space than * user requested orinally, we store allocated * space in a special descriptor */ if (ac->ac_status == AC_STATUS_FOUND && ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) ext4_mb_new_preallocation(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks); if (*errp == -EAGAIN) { /* * drop the reference that we took * in ext4_mb_use_best_found */ ext4_mb_release_context(ac); ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; goto repeat; } else if (*errp) errout: ext4_discard_allocated_blocks(ac); else { block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); ar->len = ac->ac_b_ex.fe_len; } } else { freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); if (freed) goto repeat; *errp = -ENOSPC; } if (*errp) { ac->ac_b_ex.fe_len = 0; ar->len = 0; ext4_mb_show_ac(ac); } ext4_mb_release_context(ac); out: if (ac) kmem_cache_free(ext4_ac_cachep, ac); if (inquota && ar->len < inquota) dquot_free_block(ar->inode, inquota - ar->len); if (!ar->len) { if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); } trace_ext4_allocate_blocks(ar, (unsigned long long)block); return block; } /* * We can merge two free data extents only if the physical blocks * are contiguous, AND the extents were freed by the same transaction, * AND the blocks are associated with the same group. */ static int can_merge(struct ext4_free_data *entry1, struct ext4_free_data *entry2) { if ((entry1->t_tid == entry2->t_tid) && (entry1->group == entry2->group) && ((entry1->start_blk + entry1->count) == entry2->start_blk)) return 1; return 0; } static noinline_for_stack int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, struct ext4_free_data *new_entry) { ext4_group_t group = e4b->bd_group; ext4_grpblk_t block; struct ext4_free_data *entry; struct ext4_group_info *db = e4b->bd_info; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct rb_node **n = &db->bb_free_root.rb_node, *node; struct rb_node *parent = NULL, *new_node; BUG_ON(!ext4_handle_valid(handle)); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); new_node = &new_entry->node; block = new_entry->start_blk; if (!*n) { /* first free block exent. We need to protect buddy cache from being freed, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ page_cache_get(e4b->bd_buddy_page); page_cache_get(e4b->bd_bitmap_page); } while (*n) { parent = *n; entry = rb_entry(parent, struct ext4_free_data, node); if (block < entry->start_blk) n = &(*n)->rb_left; else if (block >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; else { ext4_grp_locked_error(sb, group, 0, ext4_group_first_block_no(sb, group) + block, "Block already on to-be-freed list"); return 0; } } rb_link_node(new_node, parent, n); rb_insert_color(new_node, &db->bb_free_root); /* Now try to see the extent can be merged to left and right */ node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, node); if (can_merge(entry, new_entry)) { new_entry->start_blk = entry->start_blk; new_entry->count += entry->count; rb_erase(node, &(db->bb_free_root)); spin_lock(&sbi->s_md_lock); list_del(&entry->list); spin_unlock(&sbi->s_md_lock); kmem_cache_free(ext4_free_ext_cachep, entry); } } node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, node); if (can_merge(new_entry, entry)) { new_entry->count += entry->count; rb_erase(node, &(db->bb_free_root)); spin_lock(&sbi->s_md_lock); list_del(&entry->list); spin_unlock(&sbi->s_md_lock); kmem_cache_free(ext4_free_ext_cachep, entry); } } /* Add the extent to transaction's private list */ spin_lock(&sbi->s_md_lock); list_add(&new_entry->list, &handle->h_transaction->t_private_list); spin_unlock(&sbi->s_md_lock); return 0; } /** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @block: start physical block to free * @count: number of blocks to count * @metadata: Are these metadata blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags) { struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_allocation_context *ac = NULL; struct ext4_group_desc *gdp; unsigned long freed = 0; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; ext4_group_t block_group; struct ext4_sb_info *sbi; struct ext4_buddy e4b; int err = 0; int ret; if (bh) { if (block) BUG_ON(block != bh->b_blocknr); else block = bh->b_blocknr; } sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_data_block_valid(sbi, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; } ext4_debug("freeing block %llu\n", block); trace_ext4_free_blocks(inode, block, count, flags); if (flags & EXT4_FREE_BLOCKS_FORGET) { struct buffer_head *tbh = bh; int i; BUG_ON(bh && (count > 1)); for (i = 0; i < count; i++) { if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); } } /* * We need to make sure we don't reuse the freed block until * after the transaction is committed, which we can do by * treating the block as metadata, below. We make an * exception if the inode is to be written in writeback mode * since writeback mode has weak data consistency guarantees. */ if (!ext4_should_writeback_data(inode)) flags |= EXT4_FREE_BLOCKS_METADATA; ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (ac) { ac->ac_inode = inode; ac->ac_sb = sb; } do_more: overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; } bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; goto error_return; } gdp = ext4_get_group_desc(sb, block_group, &gd_bh); if (!gdp) { err = -EIO; goto error_return; } if (in_range(ext4_block_bitmap(sb, gdp), block, count) || in_range(ext4_inode_bitmap(sb, gdp), block, count) || in_range(block, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group)) { ext4_error(sb, "Freeing blocks in system zone - " "Block = %llu, count = %lu", block, count); /* err = 0. ext4_std_error should be a no op */ goto error_return; } BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < count; i++) BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); } #endif if (ac) { ac->ac_b_ex.fe_group = block_group; ac->ac_b_ex.fe_start = bit; ac->ac_b_ex.fe_len = count; trace_ext4_mballoc_free(ac); } err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_return; if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { struct ext4_free_data *new_entry; /* * blocks being freed are metadata. these blocks shouldn't * be used until this transaction is committed */ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); new_entry->start_blk = bit; new_entry->group = block_group; new_entry->count = count; new_entry->t_tid = handle->h_transaction->t_tid; ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count); ext4_mb_free_metadata(handle, &e4b, new_entry); } else { /* need to update group_info->bb_free and bitmap * with group lock held. generate_buddy look at * them with group lock_held */ ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count); mb_free_blocks(inode, &e4b, bit, count); ext4_mb_return_to_preallocation(inode, &e4b, block, count); if (test_opt(sb, DISCARD)) ext4_issue_discard(sb, block_group, bit, count); } ret = ext4_free_blks_count(sb, gdp) + count; ext4_free_blks_set(sb, gdp, ret); gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeblocks_counter, count); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks); } ext4_mb_unload_buddy(&e4b); freed += count; /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); if (!err) err = ret; if (overflow && !err) { block += count; count = overflow; put_bh(bitmap_bh); goto do_more; } ext4_mark_super_dirty(sb); error_return: if (freed) dquot_free_block(inode, freed); brelse(bitmap_bh); ext4_std_error(sb, err); if (ac) kmem_cache_free(ext4_ac_cachep, ac); return; }
gpl-2.0
kakaroto/gst-plugins-bad
sys/decklink/win/DeckLinkAPIDispatch.cpp
76
1731
/* -LICENSE-START- ** Copyright (c) 2011 Blackmagic Design ** ** Permission is hereby granted, free of charge, to any person or organization ** obtaining a copy of the software and accompanying documentation covered by ** this license (the "Software") to use, reproduce, display, distribute, ** execute, and transmit the Software, and to prepare derivative works of the ** Software, and to permit third-parties to whom the Software is furnished to ** do so, all subject to the following: ** ** The copyright notices in the Software and this entire statement, including ** the above license grant, this restriction and the following disclaimer, ** must be included in all copies of the Software, in whole or in part, and ** all derivative works of the Software, unless such copies or derivative ** works are solely in the form of machine-executable object code generated by ** a source language processor. ** ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT ** SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE ** FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ** ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ** DEALINGS IN THE SOFTWARE. ** -LICENSE-END- **/ #include "DeckLinkAPI.h" extern "C" { IDeckLinkIterator* CreateDeckLinkIteratorInstance (void) { IDeckLinkIterator *deckLinkIterator = NULL; HRESULT res = CoCreateInstance(CLSID_CDeckLinkIterator, NULL, CLSCTX_ALL, IID_IDeckLinkIterator, (void**)&deckLinkIterator); return deckLinkIterator; } };
gpl-2.0
rjmccabe3701/LinuxViewPageTables
drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
76
39921
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2010, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/ldlm/ldlm_resource.c * * Author: Phil Schwan <phil@clusterfs.com> * Author: Peter Braam <braam@clusterfs.com> */ #define DEBUG_SUBSYSTEM S_LDLM # include <lustre_dlm.h> #include <lustre_fid.h> #include <obd_class.h> #include "ldlm_internal.h" struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab; int ldlm_srv_namespace_nr = 0; int ldlm_cli_namespace_nr = 0; struct mutex ldlm_srv_namespace_lock; LIST_HEAD(ldlm_srv_namespace_list); struct mutex ldlm_cli_namespace_lock; /* Client Namespaces that have active resources in them. * Once all resources go away, ldlm_poold moves such namespaces to the * inactive list */ LIST_HEAD(ldlm_cli_active_namespace_list); /* Client namespaces that don't have any locks in them */ LIST_HEAD(ldlm_cli_inactive_namespace_list); struct proc_dir_entry *ldlm_type_proc_dir = NULL; struct proc_dir_entry *ldlm_ns_proc_dir = NULL; struct proc_dir_entry *ldlm_svc_proc_dir = NULL; extern unsigned int ldlm_cancel_unused_locks_before_replay; /* during debug dump certain amount of granted locks for one resource to avoid * DDOS. */ unsigned int ldlm_dump_granted_max = 256; #ifdef LPROCFS static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer, size_t count, loff_t *off) { ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); return count; } LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns); LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint); LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint); int ldlm_proc_setup(void) { int rc; struct lprocfs_vars list[] = { { "dump_namespaces", &ldlm_dump_ns_fops, 0, 0222 }, { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max }, { "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops, &ldlm_cancel_unused_locks_before_replay }, { NULL }}; LASSERT(ldlm_ns_proc_dir == NULL); ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME, proc_lustre_root, NULL, NULL); if (IS_ERR(ldlm_type_proc_dir)) { CERROR("LProcFS failed in ldlm-init\n"); rc = PTR_ERR(ldlm_type_proc_dir); GOTO(err, rc); } ldlm_ns_proc_dir = lprocfs_register("namespaces", ldlm_type_proc_dir, NULL, NULL); if (IS_ERR(ldlm_ns_proc_dir)) { CERROR("LProcFS failed in ldlm-init\n"); rc = PTR_ERR(ldlm_ns_proc_dir); GOTO(err_type, rc); } ldlm_svc_proc_dir = lprocfs_register("services", ldlm_type_proc_dir, NULL, NULL); if (IS_ERR(ldlm_svc_proc_dir)) { CERROR("LProcFS failed in ldlm-init\n"); rc = PTR_ERR(ldlm_svc_proc_dir); GOTO(err_ns, rc); } rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL); return 0; err_ns: lprocfs_remove(&ldlm_ns_proc_dir); err_type: lprocfs_remove(&ldlm_type_proc_dir); err: ldlm_svc_proc_dir = NULL; ldlm_type_proc_dir = NULL; ldlm_ns_proc_dir = NULL; return rc; } void ldlm_proc_cleanup(void) { if (ldlm_svc_proc_dir) lprocfs_remove(&ldlm_svc_proc_dir); if (ldlm_ns_proc_dir) lprocfs_remove(&ldlm_ns_proc_dir); if (ldlm_type_proc_dir) lprocfs_remove(&ldlm_type_proc_dir); ldlm_svc_proc_dir = NULL; ldlm_type_proc_dir = NULL; ldlm_ns_proc_dir = NULL; } static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v) { struct ldlm_namespace *ns = m->private; __u64 res = 0; struct cfs_hash_bd bd; int i; /* result is not strictly consistant */ cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i) res += cfs_hash_bd_count_get(&bd); return lprocfs_rd_u64(m, &res); } LPROC_SEQ_FOPS_RO(lprocfs_ns_resources); static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v) { struct ldlm_namespace *ns = m->private; __u64 locks; locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS, LPROCFS_FIELDS_FLAGS_SUM); return lprocfs_rd_u64(m, &locks); } LPROC_SEQ_FOPS_RO(lprocfs_ns_locks); static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v) { struct ldlm_namespace *ns = m->private; __u32 *nr = &ns->ns_max_unused; if (ns_connect_lru_resize(ns)) nr = &ns->ns_nr_unused; return lprocfs_rd_uint(m, nr); } static ssize_t lprocfs_lru_size_seq_write(struct file *file, const char *buffer, size_t count, loff_t *off) { struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private; char dummy[MAX_STRING_SIZE + 1], *end; unsigned long tmp; int lru_resize; dummy[MAX_STRING_SIZE] = '\0'; if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) return -EFAULT; if (strncmp(dummy, "clear", 5) == 0) { CDEBUG(D_DLMTRACE, "dropping all unused locks from namespace %s\n", ldlm_ns_name(ns)); if (ns_connect_lru_resize(ns)) { int canceled, unused = ns->ns_nr_unused; /* Try to cancel all @ns_nr_unused locks. */ canceled = ldlm_cancel_lru(ns, unused, 0, LDLM_CANCEL_PASSED); if (canceled < unused) { CDEBUG(D_DLMTRACE, "not all requested locks are canceled, " "requested: %d, canceled: %d\n", unused, canceled); return -EINVAL; } } else { tmp = ns->ns_max_unused; ns->ns_max_unused = 0; ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED); ns->ns_max_unused = tmp; } return count; } tmp = simple_strtoul(dummy, &end, 0); if (dummy == end) { CERROR("invalid value written\n"); return -EINVAL; } lru_resize = (tmp == 0); if (ns_connect_lru_resize(ns)) { if (!lru_resize) ns->ns_max_unused = (unsigned int)tmp; if (tmp > ns->ns_nr_unused) tmp = ns->ns_nr_unused; tmp = ns->ns_nr_unused - tmp; CDEBUG(D_DLMTRACE, "changing namespace %s unused locks from %u to %u\n", ldlm_ns_name(ns), ns->ns_nr_unused, (unsigned int)tmp); ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED); if (!lru_resize) { CDEBUG(D_DLMTRACE, "disable lru_resize for namespace %s\n", ldlm_ns_name(ns)); ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE; } } else { CDEBUG(D_DLMTRACE, "changing namespace %s max_unused from %u to %u\n", ldlm_ns_name(ns), ns->ns_max_unused, (unsigned int)tmp); ns->ns_max_unused = (unsigned int)tmp; ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); /* Make sure that LRU resize was originally supported before * turning it on here. */ if (lru_resize && (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { CDEBUG(D_DLMTRACE, "enable lru_resize for namespace %s\n", ldlm_ns_name(ns)); ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE; } } return count; } LPROC_SEQ_FOPS(lprocfs_lru_size); static int lprocfs_elc_seq_show(struct seq_file *m, void *v) { struct ldlm_namespace *ns = m->private; unsigned int supp = ns_connect_cancelset(ns); return lprocfs_rd_uint(m, &supp); } static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer, size_t count, loff_t *off) { struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private; unsigned int supp = -1; int rc; rc = lprocfs_wr_uint(file, buffer, count, &supp); if (rc < 0) return rc; if (supp == 0) ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET; else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET) ns->ns_connect_flags |= OBD_CONNECT_CANCELSET; return count; } LPROC_SEQ_FOPS(lprocfs_elc); void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns) { if (ns->ns_proc_dir_entry == NULL) CERROR("dlm namespace %s has no procfs dir?\n", ldlm_ns_name(ns)); else lprocfs_remove(&ns->ns_proc_dir_entry); if (ns->ns_stats != NULL) lprocfs_free_stats(&ns->ns_stats); } #define LDLM_NS_ADD_VAR(name, var, ops) \ do { \ snprintf(lock_name, MAX_STRING_SIZE, name); \ lock_vars[0].data = var; \ lock_vars[0].fops = ops; \ lprocfs_add_vars(ns_pde, lock_vars, 0); \ } while (0) int ldlm_namespace_proc_register(struct ldlm_namespace *ns) { struct lprocfs_vars lock_vars[2]; char lock_name[MAX_STRING_SIZE + 1]; struct proc_dir_entry *ns_pde; LASSERT(ns != NULL); LASSERT(ns->ns_rs_hash != NULL); if (ns->ns_proc_dir_entry != NULL) { ns_pde = ns->ns_proc_dir_entry; } else { ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir); if (ns_pde == NULL) return -ENOMEM; ns->ns_proc_dir_entry = ns_pde; } ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); if (ns->ns_stats == NULL) return -ENOMEM; lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS, LPROCFS_CNTR_AVGMINMAX, "locks", "locks"); lock_name[MAX_STRING_SIZE] = '\0'; memset(lock_vars, 0, sizeof(lock_vars)); lock_vars[0].name = lock_name; LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops); LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops); if (ns_is_client(ns)) { LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused, &ldlm_uint_fops); LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops); LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age, &ldlm_rw_uint_fops); LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops); } else { LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit, &ldlm_rw_uint_fops); LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts, &ldlm_uint_fops); LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size, &ldlm_rw_uint_fops); LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time, &ldlm_rw_uint_fops); LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks, &ldlm_rw_uint_fops); LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast, &ldlm_rw_uint_fops); } return 0; } #undef MAX_STRING_SIZE #else /* LPROCFS */ #define ldlm_namespace_proc_unregister(ns) ({;}) #define ldlm_namespace_proc_register(ns) ({0;}) #endif /* LPROCFS */ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask) { const struct ldlm_res_id *id = key; unsigned val = 0; unsigned i; for (i = 0; i < RES_NAME_SIZE; i++) val += id->name[i]; return val & mask; } static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs, const void *key, unsigned mask) { const struct ldlm_res_id *id = key; struct lu_fid fid; __u32 hash; __u32 val; fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF]; fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF]; fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32); hash = fid_flatten32(&fid); hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) { val = id->name[LUSTRE_RES_ID_HSH_OFF]; hash += (val >> 5) + (val << 11); } else { val = fid_oid(&fid); } hash = cfs_hash_long(hash, hs->hs_bkt_bits); /* give me another random factor */ hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3); hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1); return hash & mask; } static void *ldlm_res_hop_key(struct hlist_node *hnode) { struct ldlm_resource *res; res = hlist_entry(hnode, struct ldlm_resource, lr_hash); return &res->lr_name; } static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode) { struct ldlm_resource *res; res = hlist_entry(hnode, struct ldlm_resource, lr_hash); return ldlm_res_eq((const struct ldlm_res_id *)key, (const struct ldlm_res_id *)&res->lr_name); } static void *ldlm_res_hop_object(struct hlist_node *hnode) { return hlist_entry(hnode, struct ldlm_resource, lr_hash); } static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; res = hlist_entry(hnode, struct ldlm_resource, lr_hash); ldlm_resource_getref(res); } static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* cfs_hash_for_each_nolock is the only chance we call it */ ldlm_resource_putref_locked(res); } static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; res = hlist_entry(hnode, struct ldlm_resource, lr_hash); ldlm_resource_putref(res); } cfs_hash_ops_t ldlm_ns_hash_ops = { .hs_hash = ldlm_res_hop_hash, .hs_key = ldlm_res_hop_key, .hs_keycmp = ldlm_res_hop_keycmp, .hs_keycpy = NULL, .hs_object = ldlm_res_hop_object, .hs_get = ldlm_res_hop_get_locked, .hs_put_locked = ldlm_res_hop_put_locked, .hs_put = ldlm_res_hop_put }; cfs_hash_ops_t ldlm_ns_fid_hash_ops = { .hs_hash = ldlm_res_hop_fid_hash, .hs_key = ldlm_res_hop_key, .hs_keycmp = ldlm_res_hop_keycmp, .hs_keycpy = NULL, .hs_object = ldlm_res_hop_object, .hs_get = ldlm_res_hop_get_locked, .hs_put_locked = ldlm_res_hop_put_locked, .hs_put = ldlm_res_hop_put }; typedef struct { ldlm_ns_type_t nsd_type; /** hash bucket bits */ unsigned nsd_bkt_bits; /** hash bits */ unsigned nsd_all_bits; /** hash operations */ cfs_hash_ops_t *nsd_hops; } ldlm_ns_hash_def_t; ldlm_ns_hash_def_t ldlm_ns_hash_defs[] = { { .nsd_type = LDLM_NS_TYPE_MDC, .nsd_bkt_bits = 11, .nsd_all_bits = 16, .nsd_hops = &ldlm_ns_fid_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_MDT, .nsd_bkt_bits = 14, .nsd_all_bits = 21, .nsd_hops = &ldlm_ns_fid_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_OSC, .nsd_bkt_bits = 8, .nsd_all_bits = 12, .nsd_hops = &ldlm_ns_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_OST, .nsd_bkt_bits = 11, .nsd_all_bits = 17, .nsd_hops = &ldlm_ns_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_MGC, .nsd_bkt_bits = 4, .nsd_all_bits = 4, .nsd_hops = &ldlm_ns_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_MGT, .nsd_bkt_bits = 4, .nsd_all_bits = 4, .nsd_hops = &ldlm_ns_hash_ops, }, { .nsd_type = LDLM_NS_TYPE_UNKNOWN, }, }; /** * Create and initialize new empty namespace. */ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, ldlm_side_t client, ldlm_appetite_t apt, ldlm_ns_type_t ns_type) { struct ldlm_namespace *ns = NULL; struct ldlm_ns_bucket *nsb; ldlm_ns_hash_def_t *nsd; struct cfs_hash_bd bd; int idx; int rc; LASSERT(obd != NULL); rc = ldlm_get_ref(); if (rc) { CERROR("ldlm_get_ref failed: %d\n", rc); return NULL; } for (idx = 0;;idx++) { nsd = &ldlm_ns_hash_defs[idx]; if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) { CERROR("Unknown type %d for ns %s\n", ns_type, name); GOTO(out_ref, NULL); } if (nsd->nsd_type == ns_type) break; } OBD_ALLOC_PTR(ns); if (!ns) GOTO(out_ref, NULL); ns->ns_rs_hash = cfs_hash_create(name, nsd->nsd_all_bits, nsd->nsd_all_bits, nsd->nsd_bkt_bits, sizeof(*nsb), CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA, nsd->nsd_hops, CFS_HASH_DEPTH | CFS_HASH_BIGNAME | CFS_HASH_SPIN_BKTLOCK | CFS_HASH_NO_ITEMREF); if (ns->ns_rs_hash == NULL) GOTO(out_ns, NULL); cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0); nsb->nsb_namespace = ns; } ns->ns_obd = obd; ns->ns_appetite = apt; ns->ns_client = client; INIT_LIST_HEAD(&ns->ns_list_chain); INIT_LIST_HEAD(&ns->ns_unused_list); spin_lock_init(&ns->ns_lock); atomic_set(&ns->ns_bref, 0); init_waitqueue_head(&ns->ns_waitq); ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES; ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS; ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS; ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT; ns->ns_nr_unused = 0; ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE; ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE; ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT; ns->ns_timeouts = 0; ns->ns_orig_connect_flags = 0; ns->ns_connect_flags = 0; ns->ns_stopping = 0; rc = ldlm_namespace_proc_register(ns); if (rc != 0) { CERROR("Can't initialize ns proc, rc %d\n", rc); GOTO(out_hash, rc); } idx = ldlm_namespace_nr_read(client); rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client); if (rc) { CERROR("Can't initialize lock pool, rc %d\n", rc); GOTO(out_proc, rc); } ldlm_namespace_register(ns, client); return ns; out_proc: ldlm_namespace_proc_unregister(ns); ldlm_namespace_cleanup(ns, 0); out_hash: cfs_hash_putref(ns->ns_rs_hash); out_ns: OBD_FREE_PTR(ns); out_ref: ldlm_put_ref(); return NULL; } EXPORT_SYMBOL(ldlm_namespace_new); extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); /** * Cancel and destroy all locks on a resource. * * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just * clean up. This is currently only used for recovery, and we make * certain assumptions as a result--notably, that we shouldn't cancel * locks with refs. */ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, __u64 flags) { struct list_head *tmp; int rc = 0, client = ns_is_client(ldlm_res_to_ns(res)); bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY); do { struct ldlm_lock *lock = NULL; /* First, we look for non-cleaned-yet lock * all cleaned locks are marked by CLEANED flag. */ lock_res(res); list_for_each(tmp, q) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); if (lock->l_flags & LDLM_FL_CLEANED) { lock = NULL; continue; } LDLM_LOCK_GET(lock); lock->l_flags |= LDLM_FL_CLEANED; break; } if (lock == NULL) { unlock_res(res); break; } /* Set CBPENDING so nothing in the cancellation path * can match this lock. */ lock->l_flags |= LDLM_FL_CBPENDING; lock->l_flags |= LDLM_FL_FAILED; lock->l_flags |= flags; /* ... without sending a CANCEL message for local_only. */ if (local_only) lock->l_flags |= LDLM_FL_LOCAL_ONLY; if (local_only && (lock->l_readers || lock->l_writers)) { /* This is a little bit gross, but much better than the * alternative: pretend that we got a blocking AST from * the server, so that when the lock is decref'd, it * will go away ... */ unlock_res(res); LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); if (lock->l_completion_ast) lock->l_completion_ast(lock, 0, NULL); LDLM_LOCK_RELEASE(lock); continue; } if (client) { struct lustre_handle lockh; unlock_res(res); ldlm_lock2handle(lock, &lockh); rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); if (rc) CERROR("ldlm_cli_cancel: %d\n", rc); } else { ldlm_resource_unlink_lock(lock); unlock_res(res); LDLM_DEBUG(lock, "Freeing a lock still held by a " "client node"); ldlm_lock_destroy(lock); } LDLM_LOCK_RELEASE(lock); } while (1); } static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); __u64 flags = *(__u64 *)arg; cleanup_resource(res, &res->lr_granted, flags); cleanup_resource(res, &res->lr_converting, flags); cleanup_resource(res, &res->lr_waiting, flags); return 0; } static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); lock_res(res); CERROR("Namespace %s resource refcount nonzero " "(%d) after lock cleanup; forcing " "cleanup.\n", ldlm_ns_name(ldlm_res_to_ns(res)), atomic_read(&res->lr_refcount) - 1); CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/" LPU64") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1], res->lr_name.name[2], res->lr_name.name[3], atomic_read(&res->lr_refcount) - 1); ldlm_resource_dump(D_ERROR, res); unlock_res(res); return 0; } /** * Cancel and destroy all locks in the namespace. * * Typically used during evictions when server notified client that it was * evicted and all of its state needs to be destroyed. * Also used during shutdown. */ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) { if (ns == NULL) { CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); return ELDLM_OK; } cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags); cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL); return ELDLM_OK; } EXPORT_SYMBOL(ldlm_namespace_cleanup); /** * Attempts to free namespace. * * Only used when namespace goes away, like during an unmount. */ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force) { /* At shutdown time, don't call the cancellation callback */ ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0); if (atomic_read(&ns->ns_bref) > 0) { struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); int rc; CDEBUG(D_DLMTRACE, "dlm namespace %s free waiting on refcount %d\n", ldlm_ns_name(ns), atomic_read(&ns->ns_bref)); force_wait: if (force) lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL); rc = l_wait_event(ns->ns_waitq, atomic_read(&ns->ns_bref) == 0, &lwi); /* Forced cleanups should be able to reclaim all references, * so it's safe to wait forever... we can't leak locks... */ if (force && rc == -ETIMEDOUT) { LCONSOLE_ERROR("Forced cleanup waiting for %s " "namespace with %d resources in use, " "(rc=%d)\n", ldlm_ns_name(ns), atomic_read(&ns->ns_bref), rc); GOTO(force_wait, rc); } if (atomic_read(&ns->ns_bref)) { LCONSOLE_ERROR("Cleanup waiting for %s namespace " "with %d resources in use, (rc=%d)\n", ldlm_ns_name(ns), atomic_read(&ns->ns_bref), rc); return ELDLM_NAMESPACE_EXISTS; } CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n", ldlm_ns_name(ns)); } return ELDLM_OK; } /** * Performs various cleanups for passed \a ns to make it drop refc and be * ready for freeing. Waits for refc == 0. * * The following is done: * (0) Unregister \a ns from its list to make inaccessible for potential * users like pools thread and others; * (1) Clear all locks in \a ns. */ void ldlm_namespace_free_prior(struct ldlm_namespace *ns, struct obd_import *imp, int force) { int rc; if (!ns) { return; } spin_lock(&ns->ns_lock); ns->ns_stopping = 1; spin_unlock(&ns->ns_lock); /* * Can fail with -EINTR when force == 0 in which case try harder. */ rc = __ldlm_namespace_free(ns, force); if (rc != ELDLM_OK) { if (imp) { ptlrpc_disconnect_import(imp, 0); ptlrpc_invalidate_import(imp); } /* * With all requests dropped and the import inactive * we are gaurenteed all reference will be dropped. */ rc = __ldlm_namespace_free(ns, 1); LASSERT(rc == 0); } } /** * Performs freeing memory structures related to \a ns. This is only done * when ldlm_namespce_free_prior() successfully removed all resources * referencing \a ns and its refc == 0. */ void ldlm_namespace_free_post(struct ldlm_namespace *ns) { if (!ns) { return; } /* Make sure that nobody can find this ns in its list. */ ldlm_namespace_unregister(ns, ns->ns_client); /* Fini pool _before_ parent proc dir is removed. This is important as * ldlm_pool_fini() removes own proc dir which is child to @dir. * Removing it after @dir may cause oops. */ ldlm_pool_fini(&ns->ns_pool); ldlm_namespace_proc_unregister(ns); cfs_hash_putref(ns->ns_rs_hash); /* Namespace \a ns should be not on list at this time, otherwise * this will cause issues related to using freed \a ns in poold * thread. */ LASSERT(list_empty(&ns->ns_list_chain)); OBD_FREE_PTR(ns); ldlm_put_ref(); } /** * Cleanup the resource, and free namespace. * bug 12864: * Deadlock issue: * proc1: destroy import * class_disconnect_export(grab cl_sem) -> * -> ldlm_namespace_free -> * -> lprocfs_remove(grab _lprocfs_lock). * proc2: read proc info * lprocfs_fops_read(grab _lprocfs_lock) -> * -> osc_rd_active, etc(grab cl_sem). * * So that I have to split the ldlm_namespace_free into two parts - the first * part ldlm_namespace_free_prior is used to cleanup the resource which is * being used; the 2nd part ldlm_namespace_free_post is used to unregister the * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem * held. */ void ldlm_namespace_free(struct ldlm_namespace *ns, struct obd_import *imp, int force) { ldlm_namespace_free_prior(ns, imp, force); ldlm_namespace_free_post(ns); } EXPORT_SYMBOL(ldlm_namespace_free); void ldlm_namespace_get(struct ldlm_namespace *ns) { atomic_inc(&ns->ns_bref); } EXPORT_SYMBOL(ldlm_namespace_get); /* This is only for callers that care about refcount */ int ldlm_namespace_get_return(struct ldlm_namespace *ns) { return atomic_inc_return(&ns->ns_bref); } void ldlm_namespace_put(struct ldlm_namespace *ns) { if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) { wake_up(&ns->ns_waitq); spin_unlock(&ns->ns_lock); } } EXPORT_SYMBOL(ldlm_namespace_put); /** Register \a ns in the list of namespaces */ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client) { mutex_lock(ldlm_namespace_lock(client)); LASSERT(list_empty(&ns->ns_list_chain)); list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client)); ldlm_namespace_nr_inc(client); mutex_unlock(ldlm_namespace_lock(client)); } /** Unregister \a ns from the list of namespaces. */ void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client) { mutex_lock(ldlm_namespace_lock(client)); LASSERT(!list_empty(&ns->ns_list_chain)); /* Some asserts and possibly other parts of the code are still * using list_empty(&ns->ns_list_chain). This is why it is * important to use list_del_init() here. */ list_del_init(&ns->ns_list_chain); ldlm_namespace_nr_dec(client); mutex_unlock(ldlm_namespace_lock(client)); } /** Should be called with ldlm_namespace_lock(client) taken. */ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, ldlm_side_t client) { LASSERT(!list_empty(&ns->ns_list_chain)); LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client)); } /** Should be called with ldlm_namespace_lock(client) taken. */ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, ldlm_side_t client) { LASSERT(!list_empty(&ns->ns_list_chain)); LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); list_move_tail(&ns->ns_list_chain, ldlm_namespace_inactive_list(client)); } /** Should be called with ldlm_namespace_lock(client) taken. */ struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client) { LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); LASSERT(!list_empty(ldlm_namespace_list(client))); return container_of(ldlm_namespace_list(client)->next, struct ldlm_namespace, ns_list_chain); } /** Create and initialize new resource. */ static struct ldlm_resource *ldlm_resource_new(void) { struct ldlm_resource *res; int idx; OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO); if (res == NULL) return NULL; INIT_LIST_HEAD(&res->lr_granted); INIT_LIST_HEAD(&res->lr_converting); INIT_LIST_HEAD(&res->lr_waiting); /* Initialize interval trees for each lock mode. */ for (idx = 0; idx < LCK_MODE_NUM; idx++) { res->lr_itree[idx].lit_size = 0; res->lr_itree[idx].lit_mode = 1 << idx; res->lr_itree[idx].lit_root = NULL; } atomic_set(&res->lr_refcount, 1); spin_lock_init(&res->lr_lock); lu_ref_init(&res->lr_reference); /* The creator of the resource must unlock the mutex after LVB * initialization. */ mutex_init(&res->lr_lvb_mutex); mutex_lock(&res->lr_lvb_mutex); return res; } /** * Return a reference to resource with given name, creating it if necessary. * Args: namespace with ns_lock unlocked * Locks: takes and releases NS hash-lock and res->lr_lock * Returns: referenced, unlocked ldlm_resource or NULL */ struct ldlm_resource * ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, const struct ldlm_res_id *name, ldlm_type_t type, int create) { struct hlist_node *hnode; struct ldlm_resource *res; struct cfs_hash_bd bd; __u64 version; int ns_refcount = 0; LASSERT(ns != NULL); LASSERT(parent == NULL); LASSERT(ns->ns_rs_hash != NULL); LASSERT(name->name[0] != 0); cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); if (hnode != NULL) { cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* Synchronize with regard to resource creation. */ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { mutex_lock(&res->lr_lvb_mutex); mutex_unlock(&res->lr_lvb_mutex); } if (unlikely(res->lr_lvb_len < 0)) { ldlm_resource_putref(res); res = NULL; } return res; } version = cfs_hash_bd_version_get(&bd); cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); if (create == 0) return NULL; LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE, "type: %d\n", type); res = ldlm_resource_new(); if (!res) return NULL; res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); res->lr_name = *name; res->lr_type = type; res->lr_most_restr = LCK_NL; cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); if (hnode != NULL) { /* Someone won the race and already added the resource. */ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* Clean lu_ref for failed resource. */ lu_ref_fini(&res->lr_reference); /* We have taken lr_lvb_mutex. Drop it. */ mutex_unlock(&res->lr_lvb_mutex); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* Synchronize with regard to resource creation. */ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { mutex_lock(&res->lr_lvb_mutex); mutex_unlock(&res->lr_lvb_mutex); } if (unlikely(res->lr_lvb_len < 0)) { ldlm_resource_putref(res); res = NULL; } return res; } /* We won! Let's add the resource. */ cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash); if (cfs_hash_bd_count_get(&bd) == 1) ns_refcount = ldlm_namespace_get_return(ns); cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { int rc; OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2); rc = ns->ns_lvbo->lvbo_init(res); if (rc < 0) { CERROR("%s: lvbo_init failed for resource "LPX64":" LPX64": rc = %d\n", ns->ns_obd->obd_name, name->name[0], name->name[1], rc); if (res->lr_lvb_data) { OBD_FREE(res->lr_lvb_data, res->lr_lvb_len); res->lr_lvb_data = NULL; } res->lr_lvb_len = rc; mutex_unlock(&res->lr_lvb_mutex); ldlm_resource_putref(res); return NULL; } } /* We create resource with locked lr_lvb_mutex. */ mutex_unlock(&res->lr_lvb_mutex); /* Let's see if we happened to be the very first resource in this * namespace. If so, and this is a client namespace, we need to move * the namespace into the active namespaces list to be patrolled by * the ldlm_poold. */ if (ns_is_client(ns) && ns_refcount == 1) { mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); } return res; } EXPORT_SYMBOL(ldlm_resource_get); struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res) { LASSERT(res != NULL); LASSERT(res != LP_POISON); atomic_inc(&res->lr_refcount); CDEBUG(D_INFO, "getref res: %p count: %d\n", res, atomic_read(&res->lr_refcount)); return res; } static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd, struct ldlm_resource *res) { struct ldlm_ns_bucket *nsb = res->lr_ns_bucket; if (!list_empty(&res->lr_granted)) { ldlm_resource_dump(D_ERROR, res); LBUG(); } if (!list_empty(&res->lr_converting)) { ldlm_resource_dump(D_ERROR, res); LBUG(); } if (!list_empty(&res->lr_waiting)) { ldlm_resource_dump(D_ERROR, res); LBUG(); } cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash, bd, &res->lr_hash); lu_ref_fini(&res->lr_reference); if (cfs_hash_bd_count_get(bd) == 0) ldlm_namespace_put(nsb->nsb_namespace); } /* Returns 1 if the resource was freed, 0 if it remains. */ int ldlm_resource_putref(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); struct cfs_hash_bd bd; LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); CDEBUG(D_INFO, "putref res: %p count: %d\n", res, atomic_read(&res->lr_refcount) - 1); cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd); if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) { __ldlm_resource_putref_final(&bd, res); cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) ns->ns_lvbo->lvbo_free(res); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); return 1; } return 0; } EXPORT_SYMBOL(ldlm_resource_putref); /* Returns 1 if the resource was freed, 0 if it remains. */ int ldlm_resource_putref_locked(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); CDEBUG(D_INFO, "putref res: %p count: %d\n", res, atomic_read(&res->lr_refcount) - 1); if (atomic_dec_and_test(&res->lr_refcount)) { struct cfs_hash_bd bd; cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash, &res->lr_name, &bd); __ldlm_resource_putref_final(&bd, res); cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF, * so we should never be here while calling cfs_hash_del, * cfs_hash_for_each_nolock is the only case we can get * here, which is safe to release cfs_hash_bd_lock. */ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) ns->ns_lvbo->lvbo_free(res); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res)); cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); return 1; } return 0; } /** * Add a lock into a given resource into specified lock list. */ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, struct ldlm_lock *lock) { check_res_locked(res); LDLM_DEBUG(lock, "About to add this lock:\n"); if (lock->l_flags & LDLM_FL_DESTROYED) { CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); return; } LASSERT(list_empty(&lock->l_res_link)); list_add_tail(&lock->l_res_link, head); } /** * Insert a lock into resource after specified lock. * * Obtain resource description from the lock we are inserting after. */ void ldlm_resource_insert_lock_after(struct ldlm_lock *original, struct ldlm_lock *new) { struct ldlm_resource *res = original->l_resource; check_res_locked(res); ldlm_resource_dump(D_INFO, res); LDLM_DEBUG(new, "About to insert this lock after %p:\n", original); if (new->l_flags & LDLM_FL_DESTROYED) { CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); goto out; } LASSERT(list_empty(&new->l_res_link)); list_add(&new->l_res_link, &original->l_res_link); out:; } void ldlm_resource_unlink_lock(struct ldlm_lock *lock) { int type = lock->l_resource->lr_type; check_res_locked(lock->l_resource); if (type == LDLM_IBITS || type == LDLM_PLAIN) ldlm_unlink_lock_skiplist(lock); else if (type == LDLM_EXTENT) ldlm_extent_unlink_lock(lock); list_del_init(&lock->l_res_link); } EXPORT_SYMBOL(ldlm_resource_unlink_lock); void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc) { desc->lr_type = res->lr_type; desc->lr_name = res->lr_name; } /** * Print information about all locks in all namespaces on this node to debug * log. */ void ldlm_dump_all_namespaces(ldlm_side_t client, int level) { struct list_head *tmp; if (!((libcfs_debug | D_ERROR) & level)) return; mutex_lock(ldlm_namespace_lock(client)); list_for_each(tmp, ldlm_namespace_list(client)) { struct ldlm_namespace *ns; ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain); ldlm_namespace_dump(level, ns); } mutex_unlock(ldlm_namespace_lock(client)); } EXPORT_SYMBOL(ldlm_dump_all_namespaces); static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *arg) { struct ldlm_resource *res = cfs_hash_object(hs, hnode); int level = (int)(unsigned long)arg; lock_res(res); ldlm_resource_dump(level, res); unlock_res(res); return 0; } /** * Print information about all locks in this namespace on this node to debug * log. */ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) { if (!((libcfs_debug | D_ERROR) & level)) return; CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n", ldlm_ns_name(ns), atomic_read(&ns->ns_bref), ns_is_client(ns) ? "client" : "server"); if (cfs_time_before(cfs_time_current(), ns->ns_next_dump)) return; cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_res_hash_dump, (void *)(unsigned long)level); spin_lock(&ns->ns_lock); ns->ns_next_dump = cfs_time_shift(10); spin_unlock(&ns->ns_lock); } EXPORT_SYMBOL(ldlm_namespace_dump); /** * Print information about all locks in this resource to debug log. */ void ldlm_resource_dump(int level, struct ldlm_resource *res) { struct ldlm_lock *lock; unsigned int granted = 0; CLASSERT(RES_NAME_SIZE == 4); if (!((libcfs_debug | D_ERROR) & level)) return; CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64 ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1], res->lr_name.name[2], res->lr_name.name[3], atomic_read(&res->lr_refcount)); if (!list_empty(&res->lr_granted)) { CDEBUG(level, "Granted locks (in reverse order):\n"); list_for_each_entry_reverse(lock, &res->lr_granted, l_res_link) { LDLM_DEBUG_LIMIT(level, lock, "###"); if (!(level & D_CANTMASK) && ++granted > ldlm_dump_granted_max) { CDEBUG(level, "only dump %d granted locks to " "avoid DDOS.\n", granted); break; } } } if (!list_empty(&res->lr_converting)) { CDEBUG(level, "Converting locks:\n"); list_for_each_entry(lock, &res->lr_converting, l_res_link) LDLM_DEBUG_LIMIT(level, lock, "###"); } if (!list_empty(&res->lr_waiting)) { CDEBUG(level, "Waiting locks:\n"); list_for_each_entry(lock, &res->lr_waiting, l_res_link) LDLM_DEBUG_LIMIT(level, lock, "###"); } }
gpl-2.0
ciarancourtney/YAKM
drivers/net/phy/mdio-bcm-unimac.c
332
4914
/* * Broadcom UniMAC MDIO bus controller driver * * Copyright (C) 2014, Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) #define MDIO_READ_FAIL (1 << 28) #define MDIO_RD (2 << 26) #define MDIO_WR (1 << 26) #define MDIO_PMD_SHIFT 21 #define MDIO_PMD_MASK 0x1F #define MDIO_REG_SHIFT 16 #define MDIO_REG_MASK 0x1F #define MDIO_CFG 0x04 #define MDIO_C22 (1 << 0) #define MDIO_C45 0 #define MDIO_CLK_DIV_SHIFT 4 #define MDIO_CLK_DIV_MASK 0x3F #define MDIO_SUPP_PREAMBLE (1 << 12) struct unimac_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; }; static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) { u32 reg; reg = __raw_readl(priv->base + MDIO_CMD); reg |= MDIO_START_BUSY; __raw_writel(reg, priv->base + MDIO_CMD); } static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) { return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; } static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; unsigned int timeout = 1000; u32 cmd; /* Prepare the read operation */ cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); __raw_writel(cmd, priv->base + MDIO_CMD); /* Start MDIO transaction */ unimac_mdio_start(priv); do { if (!unimac_mdio_busy(priv)) break; usleep_range(1000, 2000); } while (timeout--); if (!timeout) return -ETIMEDOUT; cmd = __raw_readl(priv->base + MDIO_CMD); if (cmd & MDIO_READ_FAIL) return -EIO; return cmd & 0xffff; } static int unimac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct unimac_mdio_priv *priv = bus->priv; unsigned int timeout = 1000; u32 cmd; /* Prepare the write operation */ cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT) | (0xffff & val); __raw_writel(cmd, priv->base + MDIO_CMD); unimac_mdio_start(priv); do { if (!unimac_mdio_busy(priv)) break; usleep_range(1000, 2000); } while (timeout--); if (!timeout) return -ETIMEDOUT; return 0; } static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; struct resource *r; int ret; np = pdev->dev.of_node; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range */ priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!priv->base) { dev_err(&pdev->dev, "failed to remap register\n"); return -ENOMEM; } priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) return -ENOMEM; bus = priv->mii_bus; bus->priv = priv; bus->name = "unimac MII bus"; bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto out_mdio_free; } ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); goto out_mdio_irq; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); return 0; out_mdio_irq: kfree(bus->irq); out_mdio_free: mdiobus_free(bus); return ret; } static int unimac_mdio_remove(struct platform_device *pdev) { struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return 0; } static struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, { .compatible = "brcm,genet-mdio-v2", }, { .compatible = "brcm,genet-mdio-v1", }, { .compatible = "brcm,unimac-mdio", }, { /* sentinel */ }, }; static struct platform_driver unimac_mdio_driver = { .driver = { .name = "unimac-mdio", .owner = THIS_MODULE, .of_match_table = unimac_mdio_ids, }, .probe = unimac_mdio_probe, .remove = unimac_mdio_remove, }; module_platform_driver(unimac_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:unimac-mdio");
gpl-2.0
bgly/ibmvscsi_tgt
arch/arm/mach-omap2/voltage.c
1100
8640
/* * OMAP3/OMAP4 Voltage Management Routines * * Author: Thara Gopinath <thara@ti.com> * * Copyright (C) 2007 Texas Instruments, Inc. * Rajendra Nayak <rnayak@ti.com> * Lesly A M <x0080970@ti.com> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <thara@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/err.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/clk.h> #include "common.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "control.h" #include "voltage.h" #include "powerdomain.h" #include "vc.h" #include "vp.h" static LIST_HEAD(voltdm_list); /* Public functions */ /** * voltdm_get_voltage() - Gets the current non-auto-compensated voltage * @voltdm: pointer to the voltdm for which current voltage info is needed * * API to get the current non-auto-compensated voltage for a voltage domain. * Returns 0 in case of error else returns the current voltage. */ unsigned long voltdm_get_voltage(struct voltagedomain *voltdm) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return 0; } return voltdm->nominal_volt; } /** * voltdm_scale() - API to scale voltage of a particular voltage domain. * @voltdm: pointer to the voltage domain which is to be scaled. * @target_volt: The target voltage of the voltage domain * * This API should be called by the kernel to do the voltage scaling * for a particular voltage domain during DVFS. */ int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt) { int ret, i; unsigned long volt = 0; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return -EINVAL; } if (!voltdm->scale) { pr_err("%s: No voltage scale API registered for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } /* Adjust voltage to the exact voltage from the OPP table */ for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { if (voltdm->volt_data[i].volt_nominal >= target_volt) { volt = voltdm->volt_data[i].volt_nominal; break; } } if (!volt) { pr_warn("%s: not scaling. OPP voltage for %lu, not found.\n", __func__, target_volt); return -EINVAL; } ret = voltdm->scale(voltdm, volt); if (!ret) voltdm->nominal_volt = volt; return ret; } /** * voltdm_reset() - Resets the voltage of a particular voltage domain * to that of the current OPP. * @voltdm: pointer to the voltage domain whose voltage is to be reset. * * This API finds out the correct voltage the voltage domain is supposed * to be at and resets the voltage to that level. Should be used especially * while disabling any voltage compensation modules. */ void voltdm_reset(struct voltagedomain *voltdm) { unsigned long target_volt; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } target_volt = voltdm_get_voltage(voltdm); if (!target_volt) { pr_err("%s: unable to find current voltage for vdd_%s\n", __func__, voltdm->name); return; } voltdm_scale(voltdm, target_volt); } /** * omap_voltage_get_volttable() - API to get the voltage table associated with a * particular voltage domain. * @voltdm: pointer to the VDD for which the voltage table is required * @volt_data: the voltage table for the particular vdd which is to be * populated by this API * * This API populates the voltage table associated with a VDD into the * passed parameter pointer. Returns the count of distinct voltages * supported by this vdd. * */ void omap_voltage_get_volttable(struct voltagedomain *voltdm, struct omap_volt_data **volt_data) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } *volt_data = voltdm->volt_data; } /** * omap_voltage_get_voltdata() - API to get the voltage table entry for a * particular voltage * @voltdm: pointer to the VDD whose voltage table has to be searched * @volt: the voltage to be searched in the voltage table * * This API searches through the voltage table for the required voltage * domain and tries to find a matching entry for the passed voltage volt. * If a matching entry is found volt_data is populated with that entry. * This API searches only through the non-compensated voltages int the * voltage table. * Returns pointer to the voltage table entry corresponding to volt on * success. Returns -ENODATA if no voltage table exisits for the passed voltage * domain or if there is no matching entry. */ struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm, unsigned long volt) { int i; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return ERR_PTR(-EINVAL); } if (!voltdm->volt_data) { pr_warn("%s: voltage table does not exist for vdd_%s\n", __func__, voltdm->name); return ERR_PTR(-ENODATA); } for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { if (voltdm->volt_data[i].volt_nominal == volt) return &voltdm->volt_data[i]; } pr_notice("%s: Unable to match the current voltage with the voltage table for vdd_%s\n", __func__, voltdm->name); return ERR_PTR(-ENODATA); } /** * omap_voltage_register_pmic() - API to register PMIC specific data * @voltdm: pointer to the VDD for which the PMIC specific data is * to be registered * @pmic: the structure containing pmic info * * This API is to be called by the SOC/PMIC file to specify the * pmic specific info as present in omap_voltdm_pmic structure. */ int omap_voltage_register_pmic(struct voltagedomain *voltdm, struct omap_voltdm_pmic *pmic) { if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return -EINVAL; } voltdm->pmic = pmic; return 0; } /** * omap_voltage_late_init() - Init the various voltage parameters * * This API is to be called in the later stages of the * system boot to init the voltage controller and * voltage processors. */ int __init omap_voltage_late_init(void) { struct voltagedomain *voltdm; if (list_empty(&voltdm_list)) { pr_err("%s: Voltage driver support not added\n", __func__); return -EINVAL; } list_for_each_entry(voltdm, &voltdm_list, node) { struct clk *sys_ck; if (!voltdm->scalable) continue; sys_ck = clk_get(NULL, voltdm->sys_clk.name); if (IS_ERR(sys_ck)) { pr_warn("%s: Could not get sys clk.\n", __func__); return -EINVAL; } voltdm->sys_clk.rate = clk_get_rate(sys_ck); WARN_ON(!voltdm->sys_clk.rate); clk_put(sys_ck); if (voltdm->vc) { voltdm->scale = omap_vc_bypass_scale; omap_vc_init_channel(voltdm); } if (voltdm->vp) { voltdm->scale = omap_vp_forceupdate_scale; omap_vp_init(voltdm); } } return 0; } static struct voltagedomain *_voltdm_lookup(const char *name) { struct voltagedomain *voltdm, *temp_voltdm; voltdm = NULL; list_for_each_entry(temp_voltdm, &voltdm_list, node) { if (!strcmp(name, temp_voltdm->name)) { voltdm = temp_voltdm; break; } } return voltdm; } static int _voltdm_register(struct voltagedomain *voltdm) { if (!voltdm || !voltdm->name) return -EINVAL; list_add(&voltdm->node, &voltdm_list); pr_debug("voltagedomain: registered %s\n", voltdm->name); return 0; } /** * voltdm_lookup - look up a voltagedomain by name, return a pointer * @name: name of voltagedomain * * Find a registered voltagedomain by its name @name. Returns a pointer * to the struct voltagedomain if found, or NULL otherwise. */ struct voltagedomain *voltdm_lookup(const char *name) { struct voltagedomain *voltdm ; if (!name) return NULL; voltdm = _voltdm_lookup(name); return voltdm; } /** * voltdm_init - set up the voltagedomain layer * @voltdm_list: array of struct voltagedomain pointers to register * * Loop through the array of voltagedomains @voltdm_list, registering all * that are available on the current CPU. If voltdm_list is supplied * and not null, all of the referenced voltagedomains will be * registered. No return value. */ void voltdm_init(struct voltagedomain **voltdms) { struct voltagedomain **v; if (voltdms) { for (v = voltdms; *v; v++) _voltdm_register(*v); } }
gpl-2.0
hroark13/WARP_KERNEL
drivers/watchdog/pcwd_pci.c
1100
21272
/* * Berkshire PCI-PC Watchdog Card Driver * * (c) Copyright 2003-2007 Wim Van Sebroeck <wim@iguana.be>. * * Based on source code of the following authors: * Ken Hollis <kenji@bitgate.com>, * Lindsay Harris <lindsay@bluegum.com>, * Alan Cox <alan@lxorguk.ukuu.org.uk>, * Matt Domsch <Matt_Domsch@dell.com>, * Rob Radez <rob@osinvestor.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * A bells and whistles driver is available from: * http://www.kernel.org/pub/linux/kernel/people/wim/pcwd/pcwd_pci/ * * More info available at * http://www.berkprod.com/ or http://www.pcwatchdog.com/ */ /* * Includes, defines, variables, module parameters, ... */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ #include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ #include <linux/pci.h> /* For pci functions */ #include <linux/ioport.h> /* For io-port access */ #include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ /* Module and version information */ #define WATCHDOG_VERSION "1.03" #define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog" #define WATCHDOG_NAME "pcwd_pci" #define PFX WATCHDOG_NAME ": " #define DRIVER_VERSION WATCHDOG_DRIVER_NAME " driver, v" WATCHDOG_VERSION "\n" /* Stuff for the PCI ID's */ #ifndef PCI_VENDOR_ID_QUICKLOGIC #define PCI_VENDOR_ID_QUICKLOGIC 0x11e3 #endif #ifndef PCI_DEVICE_ID_WATCHDOG_PCIPCWD #define PCI_DEVICE_ID_WATCHDOG_PCIPCWD 0x5030 #endif /* * These are the defines that describe the control status bits for the * PCI-PC Watchdog card. */ /* Port 1 : Control Status #1 */ #define WD_PCI_WTRP 0x01 /* Watchdog Trip status */ #define WD_PCI_HRBT 0x02 /* Watchdog Heartbeat */ #define WD_PCI_TTRP 0x04 /* Temperature Trip status */ #define WD_PCI_RL2A 0x08 /* Relay 2 Active */ #define WD_PCI_RL1A 0x10 /* Relay 1 Active */ #define WD_PCI_R2DS 0x40 /* Relay 2 Disable Temperature-trip / reset */ #define WD_PCI_RLY2 0x80 /* Activate Relay 2 on the board */ /* Port 2 : Control Status #2 */ #define WD_PCI_WDIS 0x10 /* Watchdog Disable */ #define WD_PCI_ENTP 0x20 /* Enable Temperature Trip Reset */ #define WD_PCI_WRSP 0x40 /* Watchdog wrote response */ #define WD_PCI_PCMD 0x80 /* PC has sent command */ /* according to documentation max. time to process a command for the pci * watchdog card is 100 ms, so we give it 150 ms to do it's job */ #define PCI_COMMAND_TIMEOUT 150 /* Watchdog's internal commands */ #define CMD_GET_STATUS 0x04 #define CMD_GET_FIRMWARE_VERSION 0x08 #define CMD_READ_WATCHDOG_TIMEOUT 0x18 #define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 #define CMD_GET_CLEAR_RESET_COUNT 0x84 /* Watchdog's Dip Switch heartbeat values */ static const int heartbeat_tbl[] = { 5, /* OFF-OFF-OFF = 5 Sec */ 10, /* OFF-OFF-ON = 10 Sec */ 30, /* OFF-ON-OFF = 30 Sec */ 60, /* OFF-ON-ON = 1 Min */ 300, /* ON-OFF-OFF = 5 Min */ 600, /* ON-OFF-ON = 10 Min */ 1800, /* ON-ON-OFF = 30 Min */ 3600, /* ON-ON-ON = 1 hour */ }; /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; /* internal variables */ static int temp_panic; static unsigned long is_active; static char expect_release; /* this is private data for each PCI-PC watchdog card */ static struct { /* Wether or not the card has a temperature device */ int supports_temp; /* The card's boot status */ int boot_status; /* The cards I/O address */ unsigned long io_addr; /* the lock for io operations */ spinlock_t io_lock; /* the PCI-device */ struct pci_dev *pdev; } pcipcwd_private; /* module parameters */ #define QUIET 0 /* Default */ #define VERBOSE 1 /* Verbose */ #define DEBUG 2 /* print fancy stuff too */ static int debug = QUIET; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)"); #define WATCHDOG_HEARTBEAT 0 /* default heartbeat = delay-time from dip-switches */ static int heartbeat = WATCHDOG_HEARTBEAT; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " "(0<heartbeat<65536 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Internal functions */ static int send_command(int cmd, int *msb, int *lsb) { int got_response, count; if (debug >= DEBUG) printk(KERN_DEBUG PFX "sending following data " "cmd=0x%02x msb=0x%02x lsb=0x%02x\n", cmd, *msb, *lsb); spin_lock(&pcipcwd_private.io_lock); /* If a command requires data it should be written first. * Data for commands with 8 bits of data should be written to port 4. * Commands with 16 bits of data, should be written as LSB to port 4 * and MSB to port 5. * After the required data has been written then write the command to * port 6. */ outb_p(*lsb, pcipcwd_private.io_addr + 4); outb_p(*msb, pcipcwd_private.io_addr + 5); outb_p(cmd, pcipcwd_private.io_addr + 6); /* wait till the pci card processed the command, signaled by * the WRSP bit in port 2 and give it a max. timeout of * PCI_COMMAND_TIMEOUT to process */ got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; for (count = 0; (count < PCI_COMMAND_TIMEOUT) && (!got_response); count++) { mdelay(1); got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; } if (debug >= DEBUG) { if (got_response) { printk(KERN_DEBUG PFX "time to process command was: %d ms\n", count); } else { printk(KERN_DEBUG PFX "card did not respond on command!\n"); } } if (got_response) { /* read back response */ *lsb = inb_p(pcipcwd_private.io_addr + 4); *msb = inb_p(pcipcwd_private.io_addr + 5); /* clear WRSP bit */ inb_p(pcipcwd_private.io_addr + 6); if (debug >= DEBUG) printk(KERN_DEBUG PFX "received following data for " "cmd=0x%02x: msb=0x%02x lsb=0x%02x\n", cmd, *msb, *lsb); } spin_unlock(&pcipcwd_private.io_lock); return got_response; } static inline void pcipcwd_check_temperature_support(void) { if (inb_p(pcipcwd_private.io_addr) != 0xF0) pcipcwd_private.supports_temp = 1; } static int pcipcwd_get_option_switches(void) { int option_switches; option_switches = inb_p(pcipcwd_private.io_addr + 3); return option_switches; } static void pcipcwd_show_card_info(void) { int got_fw_rev, fw_rev_major, fw_rev_minor; char fw_ver_str[20]; /* The cards firmware version */ int option_switches; got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); if (got_fw_rev) sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); else sprintf(fw_ver_str, "<card no answer>"); /* Get switch settings */ option_switches = pcipcwd_get_option_switches(); printk(KERN_INFO PFX "Found card at port " "0x%04x (Firmware: %s) %s temp option\n", (int) pcipcwd_private.io_addr, fw_ver_str, (pcipcwd_private.supports_temp ? "with" : "without")); printk(KERN_INFO PFX "Option switches (0x%02x): " "Temperature Reset Enable=%s, Power On Delay=%s\n", option_switches, ((option_switches & 0x10) ? "ON" : "OFF"), ((option_switches & 0x08) ? "ON" : "OFF")); if (pcipcwd_private.boot_status & WDIOF_CARDRESET) printk(KERN_INFO PFX "Previous reset was caused by the Watchdog card\n"); if (pcipcwd_private.boot_status & WDIOF_OVERHEAT) printk(KERN_INFO PFX "Card sensed a CPU Overheat\n"); if (pcipcwd_private.boot_status == 0) printk(KERN_INFO PFX "No previous trip detected - Cold boot or reset\n"); } static int pcipcwd_start(void) { int stat_reg; spin_lock(&pcipcwd_private.io_lock); outb_p(0x00, pcipcwd_private.io_addr + 3); udelay(1000); stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); if (stat_reg & WD_PCI_WDIS) { printk(KERN_ERR PFX "Card timer not enabled\n"); return -1; } if (debug >= VERBOSE) printk(KERN_DEBUG PFX "Watchdog started\n"); return 0; } static int pcipcwd_stop(void) { int stat_reg; spin_lock(&pcipcwd_private.io_lock); outb_p(0xA5, pcipcwd_private.io_addr + 3); udelay(1000); outb_p(0xA5, pcipcwd_private.io_addr + 3); udelay(1000); stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); if (!(stat_reg & WD_PCI_WDIS)) { printk(KERN_ERR PFX "Card did not acknowledge disable attempt\n"); return -1; } if (debug >= VERBOSE) printk(KERN_DEBUG PFX "Watchdog stopped\n"); return 0; } static int pcipcwd_keepalive(void) { /* Re-trigger watchdog by writing to port 0 */ spin_lock(&pcipcwd_private.io_lock); outb_p(0x42, pcipcwd_private.io_addr); /* send out any data */ spin_unlock(&pcipcwd_private.io_lock); if (debug >= DEBUG) printk(KERN_DEBUG PFX "Watchdog keepalive signal send\n"); return 0; } static int pcipcwd_set_heartbeat(int t) { int t_msb = t / 256; int t_lsb = t % 256; if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; /* Write new heartbeat to watchdog */ send_command(CMD_WRITE_WATCHDOG_TIMEOUT, &t_msb, &t_lsb); heartbeat = t; if (debug >= VERBOSE) printk(KERN_DEBUG PFX "New heartbeat: %d\n", heartbeat); return 0; } static int pcipcwd_get_status(int *status) { int control_status; *status = 0; control_status = inb_p(pcipcwd_private.io_addr + 1); if (control_status & WD_PCI_WTRP) *status |= WDIOF_CARDRESET; if (control_status & WD_PCI_TTRP) { *status |= WDIOF_OVERHEAT; if (temp_panic) panic(PFX "Temperature overheat trip!\n"); } if (debug >= DEBUG) printk(KERN_DEBUG PFX "Control Status #1: 0x%02x\n", control_status); return 0; } static int pcipcwd_clear_status(void) { int control_status; int msb; int reset_counter; if (debug >= VERBOSE) printk(KERN_INFO PFX "clearing watchdog trip status & LED\n"); control_status = inb_p(pcipcwd_private.io_addr + 1); if (debug >= DEBUG) { printk(KERN_DEBUG PFX "status was: 0x%02x\n", control_status); printk(KERN_DEBUG PFX "sending: 0x%02x\n", (control_status & WD_PCI_R2DS) | WD_PCI_WTRP); } /* clear trip status & LED and keep mode of relay 2 */ outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP, pcipcwd_private.io_addr + 1); /* clear reset counter */ msb = 0; reset_counter = 0xff; send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter); if (debug >= DEBUG) { printk(KERN_DEBUG PFX "reset count was: 0x%02x\n", reset_counter); } return 0; } static int pcipcwd_get_temperature(int *temperature) { *temperature = 0; if (!pcipcwd_private.supports_temp) return -ENODEV; spin_lock(&pcipcwd_private.io_lock); *temperature = inb_p(pcipcwd_private.io_addr); spin_unlock(&pcipcwd_private.io_lock); /* * Convert celsius to fahrenheit, since this was * the decided 'standard' for this return value. */ *temperature = (*temperature * 9 / 5) + 32; if (debug >= DEBUG) { printk(KERN_DEBUG PFX "temperature is: %d F\n", *temperature); } return 0; } static int pcipcwd_get_timeleft(int *time_left) { int msb; int lsb; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ send_command(CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb); *time_left = (msb << 8) + lsb; if (debug >= VERBOSE) printk(KERN_DEBUG PFX "Time left before next reboot: %d\n", *time_left); return 0; } /* * /dev/watchdog handling */ static ssize_t pcipcwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ expect_release = 0; /* scan to see whether or not we got the * magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } /* someone wrote to us, we should reload the timer */ pcipcwd_keepalive(); } return len; } static long pcipcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_DRIVER_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: { int status; pcipcwd_get_status(&status); return put_user(status, p); } case WDIOC_GETBOOTSTATUS: return put_user(pcipcwd_private.boot_status, p); case WDIOC_GETTEMP: { int temperature; if (pcipcwd_get_temperature(&temperature)) return -EFAULT; return put_user(temperature, p); } case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { if (pcipcwd_stop()) return -EIO; retval = 0; } if (new_options & WDIOS_ENABLECARD) { if (pcipcwd_start()) return -EIO; retval = 0; } if (new_options & WDIOS_TEMPPANIC) { temp_panic = 1; retval = 0; } return retval; } case WDIOC_KEEPALIVE: pcipcwd_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_heartbeat; if (get_user(new_heartbeat, p)) return -EFAULT; if (pcipcwd_set_heartbeat(new_heartbeat)) return -EINVAL; pcipcwd_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); case WDIOC_GETTIMELEFT: { int time_left; if (pcipcwd_get_timeleft(&time_left)) return -EFAULT; return put_user(time_left, p); } default: return -ENOTTY; } } static int pcipcwd_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &is_active)) { if (debug >= VERBOSE) printk(KERN_ERR PFX "Attempt to open already opened device.\n"); return -EBUSY; } /* Activate */ pcipcwd_start(); pcipcwd_keepalive(); return nonseekable_open(inode, file); } static int pcipcwd_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (expect_release == 42) { pcipcwd_stop(); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); pcipcwd_keepalive(); } expect_release = 0; clear_bit(0, &is_active); return 0; } /* * /dev/temperature handling */ static ssize_t pcipcwd_temp_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int temperature; if (pcipcwd_get_temperature(&temperature)) return -EFAULT; if (copy_to_user(data, &temperature, 1)) return -EFAULT; return 1; } static int pcipcwd_temp_open(struct inode *inode, struct file *file) { if (!pcipcwd_private.supports_temp) return -ENODEV; return nonseekable_open(inode, file); } static int pcipcwd_temp_release(struct inode *inode, struct file *file) { return 0; } /* * Notify system */ static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) pcipcwd_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations pcipcwd_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pcipcwd_write, .unlocked_ioctl = pcipcwd_ioctl, .open = pcipcwd_open, .release = pcipcwd_release, }; static struct miscdevice pcipcwd_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pcipcwd_fops, }; static const struct file_operations pcipcwd_temp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pcipcwd_temp_read, .open = pcipcwd_temp_open, .release = pcipcwd_temp_release, }; static struct miscdevice pcipcwd_temp_miscdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &pcipcwd_temp_fops, }; static struct notifier_block pcipcwd_notifier = { .notifier_call = pcipcwd_notify_sys, }; /* * Init & exit routines */ static int __devinit pcipcwd_card_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = -EIO; cards_found++; if (cards_found == 1) printk(KERN_INFO PFX DRIVER_VERSION); if (cards_found > 1) { printk(KERN_ERR PFX "This driver only supports 1 device\n"); return -ENODEV; } if (pci_enable_device(pdev)) { printk(KERN_ERR PFX "Not possible to enable PCI Device\n"); return -ENODEV; } if (pci_resource_start(pdev, 0) == 0x0000) { printk(KERN_ERR PFX "No I/O-Address for card detected\n"); ret = -ENODEV; goto err_out_disable_device; } pcipcwd_private.pdev = pdev; pcipcwd_private.io_addr = pci_resource_start(pdev, 0); if (pci_request_regions(pdev, WATCHDOG_NAME)) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", (int) pcipcwd_private.io_addr); ret = -EIO; goto err_out_disable_device; } /* get the boot_status */ pcipcwd_get_status(&pcipcwd_private.boot_status); /* clear the "card caused reboot" flag */ pcipcwd_clear_status(); /* disable card */ pcipcwd_stop(); /* Check whether or not the card supports the temperature device */ pcipcwd_check_temperature_support(); /* Show info about the card itself */ pcipcwd_show_card_info(); /* If heartbeat = 0 then we use the heartbeat from the dip-switches */ if (heartbeat == 0) heartbeat = heartbeat_tbl[(pcipcwd_get_option_switches() & 0x07)]; /* Check that the heartbeat value is within it's range ; * if not reset to the default */ if (pcipcwd_set_heartbeat(heartbeat)) { pcipcwd_set_heartbeat(WATCHDOG_HEARTBEAT); printk(KERN_INFO PFX "heartbeat value must be 0<heartbeat<65536, using %d\n", WATCHDOG_HEARTBEAT); } ret = register_reboot_notifier(&pcipcwd_notifier); if (ret != 0) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); goto err_out_release_region; } if (pcipcwd_private.supports_temp) { ret = misc_register(&pcipcwd_temp_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on " "minor=%d (err=%d)\n", TEMP_MINOR, ret); goto err_out_unregister_reboot; } } ret = misc_register(&pcipcwd_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_out_misc_deregister; } printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; err_out_misc_deregister: if (pcipcwd_private.supports_temp) misc_deregister(&pcipcwd_temp_miscdev); err_out_unregister_reboot: unregister_reboot_notifier(&pcipcwd_notifier); err_out_release_region: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); return ret; } static void __devexit pcipcwd_card_exit(struct pci_dev *pdev) { /* Stop the timer before we leave */ if (!nowayout) pcipcwd_stop(); /* Deregister */ misc_deregister(&pcipcwd_miscdev); if (pcipcwd_private.supports_temp) misc_deregister(&pcipcwd_temp_miscdev); unregister_reboot_notifier(&pcipcwd_notifier); pci_release_regions(pdev); pci_disable_device(pdev); cards_found--; } static struct pci_device_id pcipcwd_pci_tbl[] = { { PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD, PCI_ANY_ID, PCI_ANY_ID, }, { 0 }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl); static struct pci_driver pcipcwd_driver = { .name = WATCHDOG_NAME, .id_table = pcipcwd_pci_tbl, .probe = pcipcwd_card_init, .remove = __devexit_p(pcipcwd_card_exit), }; static int __init pcipcwd_init_module(void) { spin_lock_init(&pcipcwd_private.io_lock); return pci_register_driver(&pcipcwd_driver); } static void __exit pcipcwd_cleanup_module(void) { pci_unregister_driver(&pcipcwd_driver); printk(KERN_INFO PFX "Watchdog Module Unloaded.\n"); } module_init(pcipcwd_init_module); module_exit(pcipcwd_cleanup_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR);
gpl-2.0
froggy666uk/Froggy_SensMod_CM10.1
arch/arm/mach-omap2/board-cm-t35.c
1868
18337
/* * board-cm-t35.c (CompuLab CM-T35 module) * * Copyright (C) 2009 CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/i2c/at24.h> #include <linux/i2c/twl.h> #include <linux/regulator/machine.h> #include <linux/mmc/host.h> #include <linux/spi/spi.h> #include <linux/spi/tdo24m.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/board.h> #include <plat/common.h> #include <plat/nand.h> #include <plat/gpmc.h> #include <plat/usb.h> #include <video/omapdss.h> #include <video/omap-panel-generic-dpi.h> #include <plat/mcspi.h> #include <mach/hardware.h> #include "mux.h" #include "sdram-micron-mt46h32m32lf-6.h" #include "hsmmc.h" #include "common-board-devices.h" #define CM_T35_GPIO_PENDOWN 57 #define CM_T35_SMSC911X_CS 5 #define CM_T35_SMSC911X_GPIO 163 #define SB_T35_SMSC911X_CS 4 #define SB_T35_SMSC911X_GPIO 65 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> #include <plat/gpmc-smsc911x.h> static struct omap_smsc911x_platform_data cm_t35_smsc911x_cfg = { .id = 0, .cs = CM_T35_SMSC911X_CS, .gpio_irq = CM_T35_SMSC911X_GPIO, .gpio_reset = -EINVAL, .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; static struct omap_smsc911x_platform_data sb_t35_smsc911x_cfg = { .id = 1, .cs = SB_T35_SMSC911X_CS, .gpio_irq = SB_T35_SMSC911X_GPIO, .gpio_reset = -EINVAL, .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; static void __init cm_t35_init_ethernet(void) { gpmc_smsc911x_init(&cm_t35_smsc911x_cfg); gpmc_smsc911x_init(&sb_t35_smsc911x_cfg); } #else static inline void __init cm_t35_init_ethernet(void) { return; } #endif #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) #include <linux/leds.h> static struct gpio_led cm_t35_leds[] = { [0] = { .gpio = 186, .name = "cm-t35:green", .default_trigger = "heartbeat", .active_low = 0, }, }; static struct gpio_led_platform_data cm_t35_led_pdata = { .num_leds = ARRAY_SIZE(cm_t35_leds), .leds = cm_t35_leds, }; static struct platform_device cm_t35_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &cm_t35_led_pdata, }, }; static void __init cm_t35_init_led(void) { platform_device_register(&cm_t35_led_device); } #else static inline void cm_t35_init_led(void) {} #endif #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> static struct mtd_partition cm_t35_nand_partitions[] = { { .name = "xloader", .offset = 0, /* Offset = 0x00000 */ .size = 4 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE }, { .name = "uboot", .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */ .size = 15 * NAND_BLOCK_SIZE, }, { .name = "uboot environment", .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */ .size = 2 * NAND_BLOCK_SIZE, }, { .name = "linux", .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */ .size = 32 * NAND_BLOCK_SIZE, }, { .name = "rootfs", .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */ .size = MTDPART_SIZ_FULL, }, }; static struct omap_nand_platform_data cm_t35_nand_data = { .parts = cm_t35_nand_partitions, .nr_parts = ARRAY_SIZE(cm_t35_nand_partitions), .dma_channel = -1, /* disable DMA in OMAP NAND driver */ .cs = 0, }; static void __init cm_t35_init_nand(void) { if (gpmc_nand_init(&cm_t35_nand_data) < 0) pr_err("CM-T35: Unable to register NAND device\n"); } #else static inline void cm_t35_init_nand(void) {} #endif #define CM_T35_LCD_EN_GPIO 157 #define CM_T35_LCD_BL_GPIO 58 #define CM_T35_DVI_EN_GPIO 54 static int lcd_enabled; static int dvi_enabled; static int cm_t35_panel_enable_lcd(struct omap_dss_device *dssdev) { if (dvi_enabled) { printk(KERN_ERR "cannot enable LCD, DVI is enabled\n"); return -EINVAL; } gpio_set_value(CM_T35_LCD_EN_GPIO, 1); gpio_set_value(CM_T35_LCD_BL_GPIO, 1); lcd_enabled = 1; return 0; } static void cm_t35_panel_disable_lcd(struct omap_dss_device *dssdev) { lcd_enabled = 0; gpio_set_value(CM_T35_LCD_BL_GPIO, 0); gpio_set_value(CM_T35_LCD_EN_GPIO, 0); } static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) { if (lcd_enabled) { printk(KERN_ERR "cannot enable DVI, LCD is enabled\n"); return -EINVAL; } gpio_set_value(CM_T35_DVI_EN_GPIO, 0); dvi_enabled = 1; return 0; } static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev) { gpio_set_value(CM_T35_DVI_EN_GPIO, 1); dvi_enabled = 0; } static int cm_t35_panel_enable_tv(struct omap_dss_device *dssdev) { return 0; } static void cm_t35_panel_disable_tv(struct omap_dss_device *dssdev) { } static struct panel_generic_dpi_data lcd_panel = { .name = "toppoly_tdo35s", .platform_enable = cm_t35_panel_enable_lcd, .platform_disable = cm_t35_panel_disable_lcd, }; static struct omap_dss_device cm_t35_lcd_device = { .name = "lcd", .type = OMAP_DISPLAY_TYPE_DPI, .driver_name = "generic_dpi_panel", .data = &lcd_panel, .phy.dpi.data_lines = 18, }; static struct panel_generic_dpi_data dvi_panel = { .name = "generic", .platform_enable = cm_t35_panel_enable_dvi, .platform_disable = cm_t35_panel_disable_dvi, }; static struct omap_dss_device cm_t35_dvi_device = { .name = "dvi", .type = OMAP_DISPLAY_TYPE_DPI, .driver_name = "generic_dpi_panel", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device cm_t35_tv_device = { .name = "tv", .driver_name = "venc", .type = OMAP_DISPLAY_TYPE_VENC, .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, .platform_enable = cm_t35_panel_enable_tv, .platform_disable = cm_t35_panel_disable_tv, }; static struct omap_dss_device *cm_t35_dss_devices[] = { &cm_t35_lcd_device, &cm_t35_dvi_device, &cm_t35_tv_device, }; static struct omap_dss_board_info cm_t35_dss_data = { .num_devices = ARRAY_SIZE(cm_t35_dss_devices), .devices = cm_t35_dss_devices, .default_device = &cm_t35_dvi_device, }; static struct omap2_mcspi_device_config tdo24m_mcspi_config = { .turbo_mode = 0, .single_channel = 1, /* 0: slave, 1: master */ }; static struct tdo24m_platform_data tdo24m_config = { .model = TDO35S, }; static struct spi_board_info cm_t35_lcd_spi_board_info[] __initdata = { { .modalias = "tdo24m", .bus_num = 4, .chip_select = 0, .max_speed_hz = 1000000, .controller_data = &tdo24m_mcspi_config, .platform_data = &tdo24m_config, }, }; static struct gpio cm_t35_dss_gpios[] __initdata = { { CM_T35_LCD_EN_GPIO, GPIOF_OUT_INIT_LOW, "lcd enable" }, { CM_T35_LCD_BL_GPIO, GPIOF_OUT_INIT_LOW, "lcd bl enable" }, { CM_T35_DVI_EN_GPIO, GPIOF_OUT_INIT_HIGH, "dvi enable" }, }; static void __init cm_t35_init_display(void) { int err; spi_register_board_info(cm_t35_lcd_spi_board_info, ARRAY_SIZE(cm_t35_lcd_spi_board_info)); err = gpio_request_array(cm_t35_dss_gpios, ARRAY_SIZE(cm_t35_dss_gpios)); if (err) { pr_err("CM-T35: failed to request DSS control GPIOs\n"); return; } gpio_export(CM_T35_LCD_EN_GPIO, 0); gpio_export(CM_T35_LCD_BL_GPIO, 0); gpio_export(CM_T35_DVI_EN_GPIO, 0); msleep(50); gpio_set_value(CM_T35_LCD_EN_GPIO, 1); err = omap_display_init(&cm_t35_dss_data); if (err) { pr_err("CM-T35: failed to register DSS device\n"); gpio_free_array(cm_t35_dss_gpios, ARRAY_SIZE(cm_t35_dss_gpios)); } } static struct regulator_consumer_supply cm_t35_vmmc1_supply = { .supply = "vmmc", }; static struct regulator_consumer_supply cm_t35_vsim_supply = { .supply = "vmmc_aux", }; static struct regulator_consumer_supply cm_t35_vdac_supply = REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"); static struct regulator_consumer_supply cm_t35_vdvi_supply = REGULATOR_SUPPLY("vdvi", "omapdss"); /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ static struct regulator_init_data cm_t35_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &cm_t35_vmmc1_supply, }; /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */ static struct regulator_init_data cm_t35_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &cm_t35_vsim_supply, }; /* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */ static struct regulator_init_data cm_t35_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &cm_t35_vdac_supply, }; /* VPLL2 for digital video outputs */ static struct regulator_init_data cm_t35_vpll2 = { .constraints = { .name = "VDVI", .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &cm_t35_vdvi_supply, }; static struct twl4030_usb_data cm_t35_usb_data = { .usb_mode = T2_USB_MODE_ULPI, }; static uint32_t cm_t35_keymap[] = { KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_LEFT), KEY(1, 0, KEY_UP), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_DOWN), KEY(2, 0, KEY_RIGHT), KEY(2, 1, KEY_C), KEY(2, 2, KEY_D), }; static struct matrix_keymap_data cm_t35_keymap_data = { .keymap = cm_t35_keymap, .keymap_size = ARRAY_SIZE(cm_t35_keymap), }; static struct twl4030_keypad_data cm_t35_kp_data = { .keymap_data = &cm_t35_keymap_data, .rows = 3, .cols = 3, .rep = 1, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, }, { .mmc = 2, .caps = MMC_CAP_4_BIT_DATA, .transceiver = 1, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, .ocr_mask = 0x00100000, /* 3.3V */ }, {} /* Terminator */ }; static struct usbhs_omap_board_data usbhs_bdata __initdata = { .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = OMAP_MAX_GPIO_LINES + 6, .reset_gpio_port[1] = OMAP_MAX_GPIO_LINES + 7, .reset_gpio_port[2] = -EINVAL }; static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { int wlan_rst = gpio + 2; if (gpio_request_one(wlan_rst, GPIOF_OUT_INIT_HIGH, "WLAN RST") == 0) { gpio_export(wlan_rst, 0); udelay(10); gpio_set_value(wlan_rst, 0); udelay(10); gpio_set_value(wlan_rst, 1); } else { pr_err("CM-T35: could not obtain gpio for WiFi reset\n"); } /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); /* link regulators to MMC adapters */ cm_t35_vmmc1_supply.dev = mmc[0].dev; cm_t35_vsim_supply.dev = mmc[0].dev; return 0; } static struct twl4030_gpio_platform_data cm_t35_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .setup = cm_t35_twl_gpio_setup, }; static struct twl4030_platform_data cm_t35_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, /* platform_data for children goes here */ .keypad = &cm_t35_kp_data, .usb = &cm_t35_usb_data, .gpio = &cm_t35_gpio_data, .vmmc1 = &cm_t35_vmmc1, .vsim = &cm_t35_vsim, .vdac = &cm_t35_vdac, .vpll2 = &cm_t35_vpll2, }; static void __init cm_t35_init_i2c(void) { omap3_pmic_init("tps65930", &cm_t35_twldata); } static void __init cm_t35_init_early(void) { omap2_init_common_infrastructure(); omap2_init_common_devices(mt46h32m32lf6_sdrc_params, mt46h32m32lf6_sdrc_params); } #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { /* nCS and IRQ for CM-T35 ethernet */ OMAP3_MUX(GPMC_NCS5, OMAP_MUX_MODE0), OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP), /* nCS and IRQ for SB-T35 ethernet */ OMAP3_MUX(GPMC_NCS4, OMAP_MUX_MODE0), OMAP3_MUX(GPMC_WAIT3, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP), /* PENDOWN GPIO */ OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), /* mUSB */ OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), /* MMC 2 */ OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT), OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT), OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT), OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT), /* McSPI 1 */ OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN), /* McSPI 4 */ OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP), /* McBSP 2 */ OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), /* serial ports */ OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT), OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT), OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), /* DSS */ OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), /* display controls */ OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* TPS IRQ */ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \ OMAP_PIN_INPUT_PULLUP), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif static struct omap_board_config_kernel cm_t35_config[] __initdata = { }; static void __init cm_t35_init(void) { omap_board_config = cm_t35_config; omap_board_config_size = ARRAY_SIZE(cm_t35_config); omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); omap_serial_init(); cm_t35_init_i2c(); cm_t35_init_nand(); omap_ads7846_init(1, CM_T35_GPIO_PENDOWN, 0, NULL); cm_t35_init_ethernet(); cm_t35_init_led(); cm_t35_init_display(); usb_musb_init(NULL); usbhs_init(&usbhs_bdata); } MACHINE_START(CM_T35, "Compulab CM-T35") .boot_params = 0x80000100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = cm_t35_init_early, .init_irq = omap_init_irq, .init_machine = cm_t35_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
locnp13/linux
drivers/macintosh/macio_asic.c
1868
20774
/* * Bus & driver management routines for devices within * a MacIO ASIC. Interface to new driver model mostly * stolen from the PCI version. * * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * TODO: * * - Don't probe below media bay by default, but instead provide * some hooks for media bay to dynamically add/remove it's own * sub-devices. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/machdep.h> #include <asm/macio.h> #include <asm/pmac_feature.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #undef DEBUG #define MAX_NODE_NAME_SIZE (20 - 12) static struct macio_chip *macio_on_hold; static int macio_bus_match(struct device *dev, struct device_driver *drv) { const struct of_device_id * matches = drv->of_match_table; if (!matches) return 0; return of_match_device(matches, dev) != NULL; } struct macio_dev *macio_dev_get(struct macio_dev *dev) { struct device *tmp; if (!dev) return NULL; tmp = get_device(&dev->ofdev.dev); if (tmp) return to_macio_device(tmp); else return NULL; } void macio_dev_put(struct macio_dev *dev) { if (dev) put_device(&dev->ofdev.dev); } static int macio_device_probe(struct device *dev) { int error = -ENODEV; struct macio_driver *drv; struct macio_dev *macio_dev; const struct of_device_id *match; drv = to_macio_driver(dev->driver); macio_dev = to_macio_device(dev); if (!drv->probe) return error; macio_dev_get(macio_dev); match = of_match_device(drv->driver.of_match_table, dev); if (match) error = drv->probe(macio_dev, match); if (error) macio_dev_put(macio_dev); return error; } static int macio_device_remove(struct device *dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(macio_dev); macio_dev_put(macio_dev); return 0; } static void macio_device_shutdown(struct device *dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(macio_dev); } static int macio_device_suspend(struct device *dev, pm_message_t state) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->suspend) return drv->suspend(macio_dev, state); return 0; } static int macio_device_resume(struct device * dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->resume) return drv->resume(macio_dev); return 0; } extern struct device_attribute macio_dev_attrs[]; struct bus_type macio_bus_type = { .name = "macio", .match = macio_bus_match, .uevent = of_device_uevent_modalias, .probe = macio_device_probe, .remove = macio_device_remove, .shutdown = macio_device_shutdown, .suspend = macio_device_suspend, .resume = macio_device_resume, .dev_attrs = macio_dev_attrs, }; static int __init macio_bus_driver_init(void) { return bus_register(&macio_bus_type); } postcore_initcall(macio_bus_driver_init); /** * macio_release_dev - free a macio device structure when all users of it are * finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this macio device * are done. This currently means never as we don't hot remove any macio * device yet, though that will happen with mediabay based devices in a later * implementation. */ static void macio_release_dev(struct device *dev) { struct macio_dev *mdev; mdev = to_macio_device(dev); kfree(mdev); } /** * macio_resource_quirks - tweak or skip some resources for a device * @np: pointer to the device node * @res: resulting resource * @index: index of resource in node * * If this routine returns non-null, then the resource is completely * skipped. */ static int macio_resource_quirks(struct device_node *np, struct resource *res, int index) { /* Only quirks for memory resources for now */ if ((res->flags & IORESOURCE_MEM) == 0) return 0; /* Grand Central has too large resource 0 on some machines */ if (index == 0 && !strcmp(np->name, "gc")) res->end = res->start + 0x1ffff; /* Airport has bogus resource 2 */ if (index >= 2 && !strcmp(np->name, "radio")) return 1; #ifndef CONFIG_PPC64 /* DBDMAs may have bogus sizes */ if ((res->start & 0x0001f000) == 0x00008000) res->end = res->start + 0xff; #endif /* CONFIG_PPC64 */ /* ESCC parent eats child resources. We could have added a * level of hierarchy, but I don't really feel the need * for it */ if (!strcmp(np->name, "escc")) return 1; /* ESCC has bogus resources >= 3 */ if (index >= 3 && !(strcmp(np->name, "ch-a") && strcmp(np->name, "ch-b"))) return 1; /* Media bay has too many resources, keep only first one */ if (index > 0 && !strcmp(np->name, "media-bay")) return 1; /* Some older IDE resources have bogus sizes */ if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { if (index == 0 && (res->end - res->start) > 0xfff) res->end = res->start + 0xfff; if (index == 1 && (res->end - res->start) > 0xff) res->end = res->start + 0xff; } return 0; } static void macio_create_fixup_irq(struct macio_dev *dev, int index, unsigned int line) { unsigned int irq; irq = irq_create_mapping(NULL, line); if (irq != NO_IRQ) { dev->interrupt[index].start = irq; dev->interrupt[index].flags = IORESOURCE_IRQ; dev->interrupt[index].name = dev_name(&dev->ofdev.dev); } if (dev->n_interrupts <= index) dev->n_interrupts = index + 1; } static void macio_add_missing_resources(struct macio_dev *dev) { struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq_base; /* Gatwick has some missing interrupts on child nodes */ if (dev->bus->chip->type != macio_gatwick) return; /* irq_base is always 64 on gatwick. I have no cleaner way to get * that value from here at this point */ irq_base = 64; /* Fix SCC */ if (strcmp(np->name, "ch-a") == 0) { macio_create_fixup_irq(dev, 0, 15 + irq_base); macio_create_fixup_irq(dev, 1, 4 + irq_base); macio_create_fixup_irq(dev, 2, 5 + irq_base); printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n"); } /* Fix media-bay */ if (strcmp(np->name, "media-bay") == 0) { macio_create_fixup_irq(dev, 0, 29 + irq_base); printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n"); } /* Fix left media bay childs */ if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) { macio_create_fixup_irq(dev, 0, 19 + irq_base); macio_create_fixup_irq(dev, 1, 1 + irq_base); printk(KERN_INFO "macio: fixed left floppy irqs\n"); } if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) { macio_create_fixup_irq(dev, 0, 14 + irq_base); macio_create_fixup_irq(dev, 0, 3 + irq_base); printk(KERN_INFO "macio: fixed left ide irqs\n"); } } static void macio_setup_interrupts(struct macio_dev *dev) { struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq; int i = 0, j = 0; for (;;) { struct resource *res; if (j >= MACIO_DEV_COUNT_IRQS) break; res = &dev->interrupt[j]; irq = irq_of_parse_and_map(np, i++); if (irq == NO_IRQ) break; res->start = irq; res->flags = IORESOURCE_IRQ; res->name = dev_name(&dev->ofdev.dev); if (macio_resource_quirks(np, res, i - 1)) { memset(res, 0, sizeof(struct resource)); continue; } else j++; } dev->n_interrupts = j; } static void macio_setup_resources(struct macio_dev *dev, struct resource *parent_res) { struct device_node *np = dev->ofdev.dev.of_node; struct resource r; int index; for (index = 0; of_address_to_resource(np, index, &r) == 0; index++) { struct resource *res; if (index >= MACIO_DEV_COUNT_RESOURCES) break; res = &dev->resource[index]; *res = r; res->name = dev_name(&dev->ofdev.dev); if (macio_resource_quirks(np, res, index)) { memset(res, 0, sizeof(struct resource)); continue; } /* Currently, we consider failure as harmless, this may * change in the future, once I've found all the device * tree bugs in older machines & worked around them */ if (insert_resource(parent_res, res)) { printk(KERN_WARNING "Can't request resource " "%d for MacIO device %s\n", index, dev_name(&dev->ofdev.dev)); } } dev->n_resources = index; } /** * macio_add_one_device - Add one device from OF node to the device tree * @chip: pointer to the macio_chip holding the device * @np: pointer to the device node in the OF tree * @in_bay: set to 1 if device is part of a media-bay * * When media-bay is changed to hotswap drivers, this function will * be exposed to the bay driver some way... */ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct device *parent, struct device_node *np, struct macio_dev *in_bay, struct resource *parent_res) { struct macio_dev *dev; const u32 *reg; if (np == NULL) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus = &chip->lbus; dev->media_bay = in_bay; dev->ofdev.dev.of_node = np; dev->ofdev.archdata.dma_mask = 0xffffffffUL; dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; dev->ofdev.dev.dma_parms = &dev->dma_parms; /* Standard DMA paremeters */ dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); #ifdef CONFIG_PCI /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine * * To get all the fields, copy all archdata */ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; #endif /* CONFIG_PCI */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); #endif /* MacIO itself has a different reg, we use it's PCI base */ if (np == chip->of_node) { dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, #ifdef CONFIG_PCI (unsigned int)pci_resource_start(chip->lbus.pdev, 0), #else 0, /* NuBus may want to do something better here */ #endif MAX_NODE_NAME_SIZE, np->name); } else { reg = of_get_property(np, "reg", NULL); dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); } /* Setup interrupts & resources */ macio_setup_interrupts(dev); macio_setup_resources(dev, parent_res); macio_add_missing_resources(dev); /* Register with core */ if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); kfree(dev); return NULL; } return dev; } static int macio_skip_device(struct device_node *np) { if (strncmp(np->name, "battery", 7) == 0) return 1; if (strncmp(np->name, "escc-legacy", 11) == 0) return 1; return 0; } /** * macio_pci_add_devices - Adds sub-devices of mac-io to the device tree * @chip: pointer to the macio_chip holding the devices * * This function will do the job of extracting devices from the * Open Firmware device tree, build macio_dev structures and add * them to the Linux device tree. * * For now, childs of media-bay are added now as well. This will * change rsn though. */ static void macio_pci_add_devices(struct macio_chip *chip) { struct device_node *np, *pnode; struct macio_dev *rdev, *mdev, *mbdev = NULL, *sdev = NULL; struct device *parent = NULL; struct resource *root_res = &iomem_resource; /* Add a node for the macio bus itself */ #ifdef CONFIG_PCI if (chip->lbus.pdev) { parent = &chip->lbus.pdev->dev; root_res = &chip->lbus.pdev->resource[0]; } #endif pnode = of_node_get(chip->of_node); if (pnode == NULL) return; /* Add macio itself to hierarchy */ rdev = macio_add_one_device(chip, parent, pnode, NULL, root_res); if (rdev == NULL) return; root_res = &rdev->resource[0]; /* First scan 1st level */ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); mdev = macio_add_one_device(chip, &rdev->ofdev.dev, np, NULL, root_res); if (mdev == NULL) of_node_put(np); else if (strncmp(np->name, "media-bay", 9) == 0) mbdev = mdev; else if (strncmp(np->name, "escc", 4) == 0) sdev = mdev; } /* Add media bay devices if any */ if (mbdev) { pnode = mbdev->ofdev.dev.of_node; for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); if (macio_add_one_device(chip, &mbdev->ofdev.dev, np, mbdev, root_res) == NULL) of_node_put(np); } } /* Add serial ports if any */ if (sdev) { pnode = sdev->ofdev.dev.of_node; for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); if (macio_add_one_device(chip, &sdev->ofdev.dev, np, NULL, root_res) == NULL) of_node_put(np); } } } /** * macio_register_driver - Registers a new MacIO device driver * @drv: pointer to the driver definition structure */ int macio_register_driver(struct macio_driver *drv) { /* initialize common driver fields */ drv->driver.bus = &macio_bus_type; /* register with core */ return driver_register(&drv->driver); } /** * macio_unregister_driver - Unregisters a new MacIO device driver * @drv: pointer to the driver definition structure */ void macio_unregister_driver(struct macio_driver *drv) { driver_unregister(&drv->driver); } /* Managed MacIO resources */ struct macio_devres { u32 res_mask; }; static void maciom_release(struct device *gendev, void *res) { struct macio_dev *dev = to_macio_device(gendev); struct macio_devres *dr = res; int i, max; max = min(dev->n_resources, 32); for (i = 0; i < max; i++) { if (dr->res_mask & (1 << i)) macio_release_resource(dev, i); } } int macio_enable_devres(struct macio_dev *dev) { struct macio_devres *dr; dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); if (!dr) { dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; } return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL; } static struct macio_devres * find_macio_dr(struct macio_dev *dev) { return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); } /** * macio_request_resource - Request an MMIO resource * @dev: pointer to the device holding the resource * @resource_no: resource number to request * @name: resource name * * Mark memory region number @resource_no associated with MacIO * device @dev as being reserved by owner @name. Do not access * any address inside the memory regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name) { struct macio_devres *dr = find_macio_dr(dev); if (macio_resource_len(dev, resource_no) == 0) return 0; if (!request_mem_region(macio_resource_start(dev, resource_no), macio_resource_len(dev, resource_no), name)) goto err_out; if (dr && resource_no < 32) dr->res_mask |= 1 << resource_no; return 0; err_out: printk (KERN_WARNING "MacIO: Unable to reserve resource #%d:%lx@%lx" " for device %s\n", resource_no, macio_resource_len(dev, resource_no), macio_resource_start(dev, resource_no), dev_name(&dev->ofdev.dev)); return -EBUSY; } /** * macio_release_resource - Release an MMIO resource * @dev: pointer to the device holding the resource * @resource_no: resource number to release */ void macio_release_resource(struct macio_dev *dev, int resource_no) { struct macio_devres *dr = find_macio_dr(dev); if (macio_resource_len(dev, resource_no) == 0) return; release_mem_region(macio_resource_start(dev, resource_no), macio_resource_len(dev, resource_no)); if (dr && resource_no < 32) dr->res_mask &= ~(1 << resource_no); } /** * macio_request_resources - Reserve all memory resources * @dev: MacIO device whose resources are to be reserved * @name: Name to be associated with resource. * * Mark all memory regions associated with MacIO device @dev as * being reserved by owner @name. Do not access any address inside * the memory regions unless this call returns successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int macio_request_resources(struct macio_dev *dev, const char *name) { int i; for (i = 0; i < dev->n_resources; i++) if (macio_request_resource(dev, i, name)) goto err_out; return 0; err_out: while(--i >= 0) macio_release_resource(dev, i); return -EBUSY; } /** * macio_release_resources - Release reserved memory resources * @dev: MacIO device whose resources were previously reserved */ void macio_release_resources(struct macio_dev *dev) { int i; for (i = 0; i < dev->n_resources; i++) macio_release_resource(dev, i); } #ifdef CONFIG_PCI static int macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node* np; struct macio_chip* chip; if (ent->vendor != PCI_VENDOR_ID_APPLE) return -ENODEV; /* Note regarding refcounting: We assume pci_device_to_OF_node() is * ported to new OF APIs and returns a node with refcount incremented. */ np = pci_device_to_OF_node(pdev); if (np == NULL) return -ENODEV; /* The above assumption is wrong !!! * fix that here for now until I fix the arch code */ of_node_get(np); /* We also assume that pmac_feature will have done a get() on nodes * stored in the macio chips array */ chip = macio_find(np, macio_unknown); of_node_put(np); if (chip == NULL) return -ENODEV; /* XXX Need locking ??? */ if (chip->lbus.pdev == NULL) { chip->lbus.pdev = pdev; chip->lbus.chip = chip; pci_set_drvdata(pdev, &chip->lbus); pci_set_master(pdev); } printk(KERN_INFO "MacIO PCI driver attached to %s chipset\n", chip->name); /* * HACK ALERT: The WallStreet PowerBook and some OHare based machines * have 2 macio ASICs. I must probe the "main" one first or IDE * ordering will be incorrect. So I put on "hold" the second one since * it seem to appear first on PCI */ if (chip->type == macio_gatwick || chip->type == macio_ohareII) if (macio_chips[0].lbus.pdev == NULL) { macio_on_hold = chip; return 0; } macio_pci_add_devices(chip); if (macio_on_hold && macio_chips[0].lbus.pdev != NULL) { macio_pci_add_devices(macio_on_hold); macio_on_hold = NULL; } return 0; } static void macio_pci_remove(struct pci_dev* pdev) { panic("removing of macio-asic not supported !\n"); } /* * MacIO is matched against any Apple ID, it's probe() function * will then decide wether it applies or not */ static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_APPLE, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver macio_pci_driver = { .name = (char *) "macio", .id_table = pci_ids, .probe = macio_pci_probe, .remove = macio_pci_remove, }; #endif /* CONFIG_PCI */ static int __init macio_module_init (void) { #ifdef CONFIG_PCI int rc; rc = pci_register_driver(&macio_pci_driver); if (rc) return rc; #endif /* CONFIG_PCI */ return 0; } module_init(macio_module_init); EXPORT_SYMBOL(macio_register_driver); EXPORT_SYMBOL(macio_unregister_driver); EXPORT_SYMBOL(macio_dev_get); EXPORT_SYMBOL(macio_dev_put); EXPORT_SYMBOL(macio_request_resource); EXPORT_SYMBOL(macio_release_resource); EXPORT_SYMBOL(macio_request_resources); EXPORT_SYMBOL(macio_release_resources); EXPORT_SYMBOL(macio_enable_devres);
gpl-2.0
Insswer/kernel_imx
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2124
98487
/* * omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips * * Copyright (C) 2009-2010 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The data in this file should be completely autogeneratable from * the TI hardware database or other technical documentation. * * XXX these should be marked initdata for multi-OMAP kernels */ #include <plat/omap_hwmod.h> #include <mach/irqs.h> #include <plat/cpu.h> #include <plat/dma.h> #include <plat/serial.h> #include <plat/l3_3xxx.h> #include <plat/l4_3xxx.h> #include <plat/i2c.h> #include <plat/gpio.h> #include <plat/mmc.h> #include <plat/mcbsp.h> #include <plat/mcspi.h> #include <plat/dmtimer.h> #include "omap_hwmod_common_data.h" #include "prm-regbits-34xx.h" #include "cm-regbits-34xx.h" #include "wd_timer.h" #include <mach/am35xx.h> /* * OMAP3xxx hardware module integration data * * ALl of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ static struct omap_hwmod omap3xxx_mpu_hwmod; static struct omap_hwmod omap3xxx_iva_hwmod; static struct omap_hwmod omap3xxx_l3_main_hwmod; static struct omap_hwmod omap3xxx_l4_core_hwmod; static struct omap_hwmod omap3xxx_l4_per_hwmod; static struct omap_hwmod omap3xxx_wd_timer2_hwmod; static struct omap_hwmod omap3430es1_dss_core_hwmod; static struct omap_hwmod omap3xxx_dss_core_hwmod; static struct omap_hwmod omap3xxx_dss_dispc_hwmod; static struct omap_hwmod omap3xxx_dss_dsi1_hwmod; static struct omap_hwmod omap3xxx_dss_rfbi_hwmod; static struct omap_hwmod omap3xxx_dss_venc_hwmod; static struct omap_hwmod omap3xxx_i2c1_hwmod; static struct omap_hwmod omap3xxx_i2c2_hwmod; static struct omap_hwmod omap3xxx_i2c3_hwmod; static struct omap_hwmod omap3xxx_gpio1_hwmod; static struct omap_hwmod omap3xxx_gpio2_hwmod; static struct omap_hwmod omap3xxx_gpio3_hwmod; static struct omap_hwmod omap3xxx_gpio4_hwmod; static struct omap_hwmod omap3xxx_gpio5_hwmod; static struct omap_hwmod omap3xxx_gpio6_hwmod; static struct omap_hwmod omap34xx_sr1_hwmod; static struct omap_hwmod omap34xx_sr2_hwmod; static struct omap_hwmod omap34xx_mcspi1; static struct omap_hwmod omap34xx_mcspi2; static struct omap_hwmod omap34xx_mcspi3; static struct omap_hwmod omap34xx_mcspi4; static struct omap_hwmod omap3xxx_mmc1_hwmod; static struct omap_hwmod omap3xxx_mmc2_hwmod; static struct omap_hwmod omap3xxx_mmc3_hwmod; static struct omap_hwmod am35xx_usbhsotg_hwmod; static struct omap_hwmod omap3xxx_dma_system_hwmod; static struct omap_hwmod omap3xxx_mcbsp1_hwmod; static struct omap_hwmod omap3xxx_mcbsp2_hwmod; static struct omap_hwmod omap3xxx_mcbsp3_hwmod; static struct omap_hwmod omap3xxx_mcbsp4_hwmod; static struct omap_hwmod omap3xxx_mcbsp5_hwmod; static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod; static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_core_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L3 -> L4_PER interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_per_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L3 taret configuration and error log registers */ static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = { { .irq = INT_34XX_L3_DBG_IRQ }, { .irq = INT_34XX_L3_APP_IRQ }, }; static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = { { .pa_start = 0x68000000, .pa_end = 0x6800ffff, .flags = ADDR_TYPE_RT, }, }; /* MPU -> L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = { .master = &omap3xxx_mpu_hwmod, .slave = &omap3xxx_l3_main_hwmod, .addr = omap3xxx_l3_main_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_l3_main_addrs), .user = OCP_USER_MPU, }; /* Slave interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_slaves[] = { &omap3xxx_mpu__l3_main, }; /* DSS -> l3 */ static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = { .master = &omap3xxx_dss_core_hwmod, .slave = &omap3xxx_l3_main_hwmod, .fw = { .omap2 = { .l3_perm_bit = OMAP3_L3_CORE_FW_INIT_ID_DSS, .flags = OMAP_FIREWALL_L3, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* Master interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = { &omap3xxx_l3_main__l4_core, &omap3xxx_l3_main__l4_per, }; /* L3 */ static struct omap_hwmod omap3xxx_l3_main_hwmod = { .name = "l3_main", .class = &l3_hwmod_class, .mpu_irqs = omap3xxx_l3_main_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_l3_main_irqs), .masters = omap3xxx_l3_main_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_l3_main_masters), .slaves = omap3xxx_l3_main_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l3_main_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod omap3xxx_l4_wkup_hwmod; static struct omap_hwmod omap3xxx_uart1_hwmod; static struct omap_hwmod omap3xxx_uart2_hwmod; static struct omap_hwmod omap3xxx_uart3_hwmod; static struct omap_hwmod omap3xxx_uart4_hwmod; static struct omap_hwmod omap3xxx_usbhsotg_hwmod; /* l3_core -> usbhsotg interface */ static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = { .master = &omap3xxx_usbhsotg_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; /* l3_core -> am35xx_usbhsotg interface */ static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = { .master = &am35xx_usbhsotg_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_l4_wkup_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> MMC1 interface */ static struct omap_hwmod_addr_space omap3xxx_mmc1_addr_space[] = { { .pa_start = 0x4809c000, .pa_end = 0x4809c1ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc1_hwmod, .clk = "mmchs1_ick", .addr = omap3xxx_mmc1_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_mmc1_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> MMC2 interface */ static struct omap_hwmod_addr_space omap3xxx_mmc2_addr_space[] = { { .pa_start = 0x480b4000, .pa_end = 0x480b41ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc2_hwmod, .clk = "mmchs2_ick", .addr = omap3xxx_mmc2_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_mmc2_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> MMC3 interface */ static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = { { .pa_start = 0x480ad000, .pa_end = 0x480ad1ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc3_hwmod, .clk = "mmchs3_ick", .addr = omap3xxx_mmc3_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_mmc3_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> UART1 interface */ static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = { { .pa_start = OMAP3_UART1_BASE, .pa_end = OMAP3_UART1_BASE + SZ_8K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart1_hwmod, .clk = "uart1_ick", .addr = omap3xxx_uart1_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_uart1_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART2 interface */ static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = { { .pa_start = OMAP3_UART2_BASE, .pa_end = OMAP3_UART2_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart2_hwmod, .clk = "uart2_ick", .addr = omap3xxx_uart2_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_uart2_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART3 interface */ static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = { { .pa_start = OMAP3_UART3_BASE, .pa_end = OMAP3_UART3_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_uart3_hwmod, .clk = "uart3_ick", .addr = omap3xxx_uart3_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_uart3_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART4 interface */ static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = { { .pa_start = OMAP3_UART4_BASE, .pa_end = OMAP3_UART4_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_uart4_hwmod, .clk = "uart4_ick", .addr = omap3xxx_uart4_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_uart4_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* I2C IP block address space length (in bytes) */ #define OMAP2_I2C_AS_LEN 128 /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_addr_space omap3xxx_i2c1_addr_space[] = { { .pa_start = 0x48070000, .pa_end = 0x48070000 + OMAP2_I2C_AS_LEN - 1, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c1_hwmod, .clk = "i2c1_ick", .addr = omap3xxx_i2c1_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_i2c1_addr_space), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C1_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C2 interface */ static struct omap_hwmod_addr_space omap3xxx_i2c2_addr_space[] = { { .pa_start = 0x48072000, .pa_end = 0x48072000 + OMAP2_I2C_AS_LEN - 1, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c2_hwmod, .clk = "i2c2_ick", .addr = omap3xxx_i2c2_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_i2c2_addr_space), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C2_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C3 interface */ static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = { { .pa_start = 0x48060000, .pa_end = 0x48060000 + OMAP2_I2C_AS_LEN - 1, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c3_hwmod, .clk = "i2c3_ick", .addr = omap3xxx_i2c3_addr_space, .addr_cnt = ARRAY_SIZE(omap3xxx_i2c3_addr_space), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C3_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> SR1 interface */ static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = { { .pa_start = OMAP34XX_SR1_BASE, .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr1_hwmod, .clk = "sr_l4_ick", .addr = omap3_sr1_addr_space, .addr_cnt = ARRAY_SIZE(omap3_sr1_addr_space), .user = OCP_USER_MPU, }; /* L4 CORE -> SR1 interface */ static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = { { .pa_start = OMAP34XX_SR2_BASE, .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr2_hwmod, .clk = "sr_l4_ick", .addr = omap3_sr2_addr_space, .addr_cnt = ARRAY_SIZE(omap3_sr2_addr_space), .user = OCP_USER_MPU, }; /* * usbhsotg interface data */ static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = { { .pa_start = OMAP34XX_HSUSB_OTG_BASE, .pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_core -> usbhsotg */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usbhsotg_hwmod, .clk = "l4_ick", .addr = omap3xxx_usbhsotg_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_addrs), .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = { &omap3xxx_usbhsotg__l3, }; static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = { &omap3xxx_l4_core__usbhsotg, }; static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = { { .pa_start = AM35XX_IPSS_USBOTGSS_BASE, .pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_core -> usbhsotg */ static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_usbhsotg_hwmod, .clk = "l4_ick", .addr = am35xx_usbhsotg_addrs, .addr_cnt = ARRAY_SIZE(am35xx_usbhsotg_addrs), .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = { &am35xx_usbhsotg__l3, }; static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = { &am35xx_l4_core__usbhsotg, }; /* Slave interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = { &omap3xxx_l3_main__l4_core, }; /* L4 CORE */ static struct omap_hwmod omap3xxx_l4_core_hwmod = { .name = "l4_core", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_core_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_core_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), .flags = HWMOD_NO_IDLEST, }; /* Slave interfaces on the L4_PER interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_per_slaves[] = { &omap3xxx_l3_main__l4_per, }; /* L4 PER */ static struct omap_hwmod omap3xxx_l4_per_hwmod = { .name = "l4_per", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_per_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_per_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), .flags = HWMOD_NO_IDLEST, }; /* Slave interfaces on the L4_WKUP interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_wkup_slaves[] = { &omap3xxx_l4_core__l4_wkup, }; /* L4 WKUP */ static struct omap_hwmod omap3xxx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_wkup_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_wkup_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), .flags = HWMOD_NO_IDLEST, }; /* Master interfaces on the MPU device */ static struct omap_hwmod_ocp_if *omap3xxx_mpu_masters[] = { &omap3xxx_mpu__l3_main, }; /* MPU */ static struct omap_hwmod omap3xxx_mpu_hwmod = { .name = "mpu", .class = &mpu_hwmod_class, .main_clk = "arm_fck", .masters = omap3xxx_mpu_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_mpu_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* * IVA2_2 interface data */ /* IVA2 <- L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_l3__iva = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_iva_hwmod, .clk = "iva2_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap3xxx_iva_masters[] = { &omap3xxx_l3__iva, }; /* * IVA2 (IVA2) */ static struct omap_hwmod omap3xxx_iva_hwmod = { .name = "iva", .class = &iva_hwmod_class, .masters = omap3xxx_iva_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_iva_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer class */ static struct omap_hwmod_class_sysconfig omap3xxx_timer_1ms_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_timer_1ms_hwmod_class = { .name = "timer", .sysc = &omap3xxx_timer_1ms_sysc, .rev = OMAP_TIMER_IP_VERSION_1, }; static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_timer_hwmod_class = { .name = "timer", .sysc = &omap3xxx_timer_sysc, .rev = OMAP_TIMER_IP_VERSION_1, }; /* timer1 */ static struct omap_hwmod omap3xxx_timer1_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer1_mpu_irqs[] = { { .irq = 37, }, }; static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = { { .pa_start = 0x48318000, .pa_end = 0x48318000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_wkup -> timer1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_timer1_hwmod, .clk = "gpt1_ick", .addr = omap3xxx_timer1_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer1_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer1 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = { &omap3xxx_l4_wkup__timer1, }; /* timer1 hwmod */ static struct omap_hwmod omap3xxx_timer1_hwmod = { .name = "timer1", .mpu_irqs = omap3xxx_timer1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer1_mpu_irqs), .main_clk = "gpt1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT1_SHIFT, }, }, .slaves = omap3xxx_timer1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer1_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer2 */ static struct omap_hwmod omap3xxx_timer2_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer2_mpu_irqs[] = { { .irq = 38, }, }; static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = { { .pa_start = 0x49032000, .pa_end = 0x49032000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer2_hwmod, .clk = "gpt2_ick", .addr = omap3xxx_timer2_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer2_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer2 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = { &omap3xxx_l4_per__timer2, }; /* timer2 hwmod */ static struct omap_hwmod omap3xxx_timer2_hwmod = { .name = "timer2", .mpu_irqs = omap3xxx_timer2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer2_mpu_irqs), .main_clk = "gpt2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT, }, }, .slaves = omap3xxx_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer2_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer3 */ static struct omap_hwmod omap3xxx_timer3_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer3_mpu_irqs[] = { { .irq = 39, }, }; static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = { { .pa_start = 0x49034000, .pa_end = 0x49034000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer3_hwmod, .clk = "gpt3_ick", .addr = omap3xxx_timer3_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer3_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer3 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = { &omap3xxx_l4_per__timer3, }; /* timer3 hwmod */ static struct omap_hwmod omap3xxx_timer3_hwmod = { .name = "timer3", .mpu_irqs = omap3xxx_timer3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer3_mpu_irqs), .main_clk = "gpt3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT, }, }, .slaves = omap3xxx_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer3_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer4 */ static struct omap_hwmod omap3xxx_timer4_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer4_mpu_irqs[] = { { .irq = 40, }, }; static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = { { .pa_start = 0x49036000, .pa_end = 0x49036000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer4_hwmod, .clk = "gpt4_ick", .addr = omap3xxx_timer4_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer4_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer4 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = { &omap3xxx_l4_per__timer4, }; /* timer4 hwmod */ static struct omap_hwmod omap3xxx_timer4_hwmod = { .name = "timer4", .mpu_irqs = omap3xxx_timer4_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer4_mpu_irqs), .main_clk = "gpt4_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT, }, }, .slaves = omap3xxx_timer4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer4_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer5 */ static struct omap_hwmod omap3xxx_timer5_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer5_mpu_irqs[] = { { .irq = 41, }, }; static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = { { .pa_start = 0x49038000, .pa_end = 0x49038000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer5_hwmod, .clk = "gpt5_ick", .addr = omap3xxx_timer5_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer5_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer5 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = { &omap3xxx_l4_per__timer5, }; /* timer5 hwmod */ static struct omap_hwmod omap3xxx_timer5_hwmod = { .name = "timer5", .mpu_irqs = omap3xxx_timer5_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer5_mpu_irqs), .main_clk = "gpt5_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT5_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT, }, }, .slaves = omap3xxx_timer5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer5_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer6 */ static struct omap_hwmod omap3xxx_timer6_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer6_mpu_irqs[] = { { .irq = 42, }, }; static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = { { .pa_start = 0x4903A000, .pa_end = 0x4903A000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer6 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer6_hwmod, .clk = "gpt6_ick", .addr = omap3xxx_timer6_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer6_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer6 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = { &omap3xxx_l4_per__timer6, }; /* timer6 hwmod */ static struct omap_hwmod omap3xxx_timer6_hwmod = { .name = "timer6", .mpu_irqs = omap3xxx_timer6_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer6_mpu_irqs), .main_clk = "gpt6_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT6_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT, }, }, .slaves = omap3xxx_timer6_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer6_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer7 */ static struct omap_hwmod omap3xxx_timer7_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer7_mpu_irqs[] = { { .irq = 43, }, }; static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = { { .pa_start = 0x4903C000, .pa_end = 0x4903C000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer7 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer7_hwmod, .clk = "gpt7_ick", .addr = omap3xxx_timer7_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer7_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer7 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = { &omap3xxx_l4_per__timer7, }; /* timer7 hwmod */ static struct omap_hwmod omap3xxx_timer7_hwmod = { .name = "timer7", .mpu_irqs = omap3xxx_timer7_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer7_mpu_irqs), .main_clk = "gpt7_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT7_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT, }, }, .slaves = omap3xxx_timer7_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer7_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer8 */ static struct omap_hwmod omap3xxx_timer8_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer8_mpu_irqs[] = { { .irq = 44, }, }; static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = { { .pa_start = 0x4903E000, .pa_end = 0x4903E000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer8 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer8_hwmod, .clk = "gpt8_ick", .addr = omap3xxx_timer8_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer8_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer8 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = { &omap3xxx_l4_per__timer8, }; /* timer8 hwmod */ static struct omap_hwmod omap3xxx_timer8_hwmod = { .name = "timer8", .mpu_irqs = omap3xxx_timer8_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer8_mpu_irqs), .main_clk = "gpt8_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT8_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT8_SHIFT, }, }, .slaves = omap3xxx_timer8_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer8_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer9 */ static struct omap_hwmod omap3xxx_timer9_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer9_mpu_irqs[] = { { .irq = 45, }, }; static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = { { .pa_start = 0x49040000, .pa_end = 0x49040000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_per -> timer9 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer9_hwmod, .clk = "gpt9_ick", .addr = omap3xxx_timer9_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer9_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer9 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = { &omap3xxx_l4_per__timer9, }; /* timer9 hwmod */ static struct omap_hwmod omap3xxx_timer9_hwmod = { .name = "timer9", .mpu_irqs = omap3xxx_timer9_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer9_mpu_irqs), .main_clk = "gpt9_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT9_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT9_SHIFT, }, }, .slaves = omap3xxx_timer9_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer9_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer10 */ static struct omap_hwmod omap3xxx_timer10_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer10_mpu_irqs[] = { { .irq = 46, }, }; static struct omap_hwmod_addr_space omap3xxx_timer10_addrs[] = { { .pa_start = 0x48086000, .pa_end = 0x48086000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_core -> timer10 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer10_hwmod, .clk = "gpt10_ick", .addr = omap3xxx_timer10_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer10_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer10 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = { &omap3xxx_l4_core__timer10, }; /* timer10 hwmod */ static struct omap_hwmod omap3xxx_timer10_hwmod = { .name = "timer10", .mpu_irqs = omap3xxx_timer10_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer10_mpu_irqs), .main_clk = "gpt10_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT10_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT10_SHIFT, }, }, .slaves = omap3xxx_timer10_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer10_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer11 */ static struct omap_hwmod omap3xxx_timer11_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer11_mpu_irqs[] = { { .irq = 47, }, }; static struct omap_hwmod_addr_space omap3xxx_timer11_addrs[] = { { .pa_start = 0x48088000, .pa_end = 0x48088000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_core -> timer11 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer11_hwmod, .clk = "gpt11_ick", .addr = omap3xxx_timer11_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer11_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer11 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = { &omap3xxx_l4_core__timer11, }; /* timer11 hwmod */ static struct omap_hwmod omap3xxx_timer11_hwmod = { .name = "timer11", .mpu_irqs = omap3xxx_timer11_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer11_mpu_irqs), .main_clk = "gpt11_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT11_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT11_SHIFT, }, }, .slaves = omap3xxx_timer11_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer11_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* timer12*/ static struct omap_hwmod omap3xxx_timer12_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = { { .irq = 95, }, }; static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = { { .pa_start = 0x48304000, .pa_end = 0x48304000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, }; /* l4_core -> timer12 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer12_hwmod, .clk = "gpt12_ick", .addr = omap3xxx_timer12_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_timer12_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer12 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = { &omap3xxx_l4_core__timer12, }; /* timer12 hwmod */ static struct omap_hwmod omap3xxx_timer12_hwmod = { .name = "timer12", .mpu_irqs = omap3xxx_timer12_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer12_mpu_irqs), .main_clk = "gpt12_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT12_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT12_SHIFT, }, }, .slaves = omap3xxx_timer12_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer12_slaves), .class = &omap3xxx_timer_hwmod_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = { { .pa_start = 0x48314000, .pa_end = 0x4831407f, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_wd_timer2_hwmod, .clk = "wdt2_ick", .addr = omap3xxx_wd_timer2_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap3xxx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x00, .sysc_offs = 0x20, .syss_offs = 0x10, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap3xxx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable }; /* wd_timer2 */ static struct omap_hwmod_ocp_if *omap3xxx_wd_timer2_slaves[] = { &omap3xxx_l4_wkup__wd_timer2, }; static struct omap_hwmod omap3xxx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap3xxx_wd_timer_hwmod_class, .main_clk = "wdt2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_WDT2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_WDT2_SHIFT, }, }, .slaves = omap3xxx_wd_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), /* * XXX: Use software supervised mode, HW supervised smartidle seems to * block CORE power domain idle transitions. Maybe a HW bug in wdt2? */ .flags = HWMOD_SWSUP_SIDLE, }; /* UART common */ static struct omap_hwmod_class_sysconfig uart_sysc = { .rev_offs = 0x50, .sysc_offs = 0x54, .syss_offs = 0x58, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class uart_class = { .name = "uart", .sysc = &uart_sysc, }; /* UART1 */ static struct omap_hwmod_irq_info uart1_mpu_irqs[] = { { .irq = INT_24XX_UART1_IRQ, }, }; static struct omap_hwmod_dma_info uart1_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, }, { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, }, }; static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = { &omap3_l4_core__uart1, }; static struct omap_hwmod omap3xxx_uart1_hwmod = { .name = "uart1", .mpu_irqs = uart1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs), .sdma_reqs = uart1_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs), .main_clk = "uart1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART1_SHIFT, }, }, .slaves = omap3xxx_uart1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart1_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* UART2 */ static struct omap_hwmod_irq_info uart2_mpu_irqs[] = { { .irq = INT_24XX_UART2_IRQ, }, }; static struct omap_hwmod_dma_info uart2_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, }, { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, }, }; static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = { &omap3_l4_core__uart2, }; static struct omap_hwmod omap3xxx_uart2_hwmod = { .name = "uart2", .mpu_irqs = uart2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs), .sdma_reqs = uart2_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs), .main_clk = "uart2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART2_SHIFT, }, }, .slaves = omap3xxx_uart2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart2_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* UART3 */ static struct omap_hwmod_irq_info uart3_mpu_irqs[] = { { .irq = INT_24XX_UART3_IRQ, }, }; static struct omap_hwmod_dma_info uart3_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, }, { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, }, }; static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = { &omap3_l4_per__uart3, }; static struct omap_hwmod omap3xxx_uart3_hwmod = { .name = "uart3", .mpu_irqs = uart3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs), .sdma_reqs = uart3_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs), .main_clk = "uart3_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART3_SHIFT, }, }, .slaves = omap3xxx_uart3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart3_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* UART4 */ static struct omap_hwmod_irq_info uart4_mpu_irqs[] = { { .irq = INT_36XX_UART4_IRQ, }, }; static struct omap_hwmod_dma_info uart4_sdma_reqs[] = { { .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, }, { .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, }, }; static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = { &omap3_l4_per__uart4, }; static struct omap_hwmod omap3xxx_uart4_hwmod = { .name = "uart4", .mpu_irqs = uart4_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart4_mpu_irqs), .sdma_reqs = uart4_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart4_sdma_reqs), .main_clk = "uart4_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .prcm_reg_id = 1, .module_bit = OMAP3630_EN_UART4_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3630_EN_UART4_SHIFT, }, }, .slaves = omap3xxx_uart4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart4_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1), }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, }; /* * 'dss' class * display sub-system */ static struct omap_hwmod_class_sysconfig omap3xxx_dss_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_dss_hwmod_class = { .name = "dss", .sysc = &omap3xxx_dss_sysc, }; static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = { { .name = "dispc", .dma_req = 5 }, { .name = "dsi1", .dma_req = 74 }, }; /* dss */ /* dss master ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = { &omap3xxx_dss__l3, }; static struct omap_hwmod_addr_space omap3xxx_dss_addrs[] = { { .pa_start = 0x48050000, .pa_end = 0x480503FF, .flags = ADDR_TYPE_RT }, }; /* l4_core -> dss */ static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3430es1_dss_core_hwmod, .clk = "dss_ick", .addr = omap3xxx_dss_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_core_hwmod, .clk = "dss_ick", .addr = omap3xxx_dss_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss slave ports */ static struct omap_hwmod_ocp_if *omap3430es1_dss_slaves[] = { &omap3430es1_l4_core__dss, }; static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = { &omap3xxx_l4_core__dss, }; static struct omap_hwmod_opt_clk dss_opt_clks[] = { { .role = "tv_clk", .clk = "dss_tv_fck" }, { .role = "video_clk", .clk = "dss_96m_fck" }, { .role = "sys_clk", .clk = "dss2_alwon_fck" }, }; static struct omap_hwmod omap3430es1_dss_core_hwmod = { .name = "dss_core", .class = &omap3xxx_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .sdma_reqs = omap3xxx_dss_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, .idlest_stdby_bit = OMAP3430ES1_ST_DSS_SHIFT, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap3430es1_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), .masters = omap3xxx_dss_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod omap3xxx_dss_core_hwmod = { .name = "dss_core", .class = &omap3xxx_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .sdma_reqs = omap3xxx_dss_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT, .idlest_stdby_bit = OMAP3430ES2_ST_DSS_STDBY_SHIFT, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap3xxx_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_slaves), .masters = omap3xxx_dss_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1), }; /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap3xxx_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_MIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_dispc_hwmod_class = { .name = "dispc", .sysc = &omap3xxx_dispc_sysc, }; static struct omap_hwmod_irq_info omap3xxx_dispc_irqs[] = { { .irq = 25 }, }; static struct omap_hwmod_addr_space omap3xxx_dss_dispc_addrs[] = { { .pa_start = 0x48050400, .pa_end = 0x480507FF, .flags = ADDR_TYPE_RT }, }; /* l4_core -> dss_dispc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dispc_hwmod, .clk = "dss_ick", .addr = omap3xxx_dss_dispc_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_dispc slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = { &omap3xxx_l4_core__dss_dispc, }; static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap3xxx_dispc_hwmod_class, .mpu_irqs = omap3xxx_dispc_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dispc_irqs), .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .slaves = omap3xxx_dss_dispc_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 | CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1), .flags = HWMOD_NO_IDLEST, }; /* * 'dsi' class * display serial interface controller */ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { .name = "dsi", }; static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { { .irq = 25 }, }; /* dss_dsi1 */ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = { { .pa_start = 0x4804FC00, .pa_end = 0x4804FFFF, .flags = ADDR_TYPE_RT }, }; /* l4_core -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dsi1_hwmod, .addr = omap3xxx_dss_dsi1_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_dsi1 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = { &omap3xxx_l4_core__dss_dsi1, }; static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { .name = "dss_dsi1", .class = &omap3xxx_dsi_hwmod_class, .mpu_irqs = omap3xxx_dsi1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dsi1_irqs), .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .slaves = omap3xxx_dss_dsi1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 | CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1), .flags = HWMOD_NO_IDLEST, }; /* * 'rfbi' class * remote frame buffer interface */ static struct omap_hwmod_class_sysconfig omap3xxx_rfbi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_rfbi_hwmod_class = { .name = "rfbi", .sysc = &omap3xxx_rfbi_sysc, }; static struct omap_hwmod_addr_space omap3xxx_dss_rfbi_addrs[] = { { .pa_start = 0x48050800, .pa_end = 0x48050BFF, .flags = ADDR_TYPE_RT }, }; /* l4_core -> dss_rfbi */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_rfbi_hwmod, .clk = "dss_ick", .addr = omap3xxx_dss_rfbi_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP , .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_rfbi slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = { &omap3xxx_l4_core__dss_rfbi, }; static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap3xxx_rfbi_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .slaves = omap3xxx_dss_rfbi_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 | CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1), .flags = HWMOD_NO_IDLEST, }; /* * 'venc' class * video encoder */ static struct omap_hwmod_class omap3xxx_venc_hwmod_class = { .name = "venc", }; /* dss_venc */ static struct omap_hwmod_addr_space omap3xxx_dss_venc_addrs[] = { { .pa_start = 0x48050C00, .pa_end = 0x48050FFF, .flags = ADDR_TYPE_RT }, }; /* l4_core -> dss_venc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_venc_hwmod, .clk = "dss_tv_fck", .addr = omap3xxx_dss_venc_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dss_venc_addrs), .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_venc slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = { &omap3xxx_l4_core__dss_venc, }; static struct omap_hwmod omap3xxx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap3xxx_venc_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .slaves = omap3xxx_dss_venc_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 | CHIP_GE_OMAP3430ES2 | CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1), .flags = HWMOD_NO_IDLEST, }; /* I2C1 */ static struct omap_i2c_dev_attr i2c1_dev_attr = { .fifo_depth = 8, /* bytes */ }; static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = { { .irq = INT_24XX_I2C1_IRQ, }, }; static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX }, { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX }, }; static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = { &omap3_l4_core__i2c1, }; static struct omap_hwmod omap3xxx_i2c1_hwmod = { .name = "i2c1", .mpu_irqs = i2c1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(i2c1_mpu_irqs), .sdma_reqs = i2c1_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(i2c1_sdma_reqs), .main_clk = "i2c1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C1_SHIFT, }, }, .slaves = omap3xxx_i2c1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c1_slaves), .class = &i2c_class, .dev_attr = &i2c1_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* I2C2 */ static struct omap_i2c_dev_attr i2c2_dev_attr = { .fifo_depth = 8, /* bytes */ }; static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = { { .irq = INT_24XX_I2C2_IRQ, }, }; static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX }, { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX }, }; static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = { &omap3_l4_core__i2c2, }; static struct omap_hwmod omap3xxx_i2c2_hwmod = { .name = "i2c2", .mpu_irqs = i2c2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(i2c2_mpu_irqs), .sdma_reqs = i2c2_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(i2c2_sdma_reqs), .main_clk = "i2c2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C2_SHIFT, }, }, .slaves = omap3xxx_i2c2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c2_slaves), .class = &i2c_class, .dev_attr = &i2c2_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* I2C3 */ static struct omap_i2c_dev_attr i2c3_dev_attr = { .fifo_depth = 64, /* bytes */ }; static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = { { .irq = INT_34XX_I2C3_IRQ, }, }; static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX }, { .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX }, }; static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = { &omap3_l4_core__i2c3, }; static struct omap_hwmod omap3xxx_i2c3_hwmod = { .name = "i2c3", .mpu_irqs = i2c3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(i2c3_mpu_irqs), .sdma_reqs = i2c3_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(i2c3_sdma_reqs), .main_clk = "i2c3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C3_SHIFT, }, }, .slaves = omap3xxx_i2c3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c3_slaves), .class = &i2c_class, .dev_attr = &i2c3_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = { { .pa_start = 0x48310000, .pa_end = 0x483101ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_gpio1_hwmod, .addr = omap3xxx_gpio1_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio1_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio2 */ static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = { { .pa_start = 0x49050000, .pa_end = 0x490501ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio2_hwmod, .addr = omap3xxx_gpio2_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio2_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio3 */ static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = { { .pa_start = 0x49052000, .pa_end = 0x490521ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio3_hwmod, .addr = omap3xxx_gpio3_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio3_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio4 */ static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = { { .pa_start = 0x49054000, .pa_end = 0x490541ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio4_hwmod, .addr = omap3xxx_gpio4_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio4_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio5 */ static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = { { .pa_start = 0x49056000, .pa_end = 0x490561ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio5_hwmod, .addr = omap3xxx_gpio5_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio5_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio6 */ static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = { { .pa_start = 0x49058000, .pa_end = 0x490581ff, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio6_hwmod, .addr = omap3xxx_gpio6_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_gpio6_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap3xxx_gpio_sysc, .rev = 1, }; /* gpio_dev_attr*/ static struct omap_gpio_dev_attr gpio_dev_attr = { .bank_width = 32, .dbck_flag = true, }; /* gpio1 */ static struct omap_hwmod_irq_info omap3xxx_gpio1_irqs[] = { { .irq = 29 }, /* INT_34XX_GPIO_BANK1 */ }; static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "gpio1_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = { &omap3xxx_l4_wkup__gpio1, }; static struct omap_hwmod omap3xxx_gpio1_hwmod = { .name = "gpio1", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs), .main_clk = "gpio1_ick", .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO1_SHIFT, }, }, .slaves = omap3xxx_gpio1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio1_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* gpio2 */ static struct omap_hwmod_irq_info omap3xxx_gpio2_irqs[] = { { .irq = 30 }, /* INT_34XX_GPIO_BANK2 */ }; static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "gpio2_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = { &omap3xxx_l4_per__gpio2, }; static struct omap_hwmod omap3xxx_gpio2_hwmod = { .name = "gpio2", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs), .main_clk = "gpio2_ick", .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO2_SHIFT, }, }, .slaves = omap3xxx_gpio2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio2_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* gpio3 */ static struct omap_hwmod_irq_info omap3xxx_gpio3_irqs[] = { { .irq = 31 }, /* INT_34XX_GPIO_BANK3 */ }; static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "gpio3_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = { &omap3xxx_l4_per__gpio3, }; static struct omap_hwmod omap3xxx_gpio3_hwmod = { .name = "gpio3", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs), .main_clk = "gpio3_ick", .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO3_SHIFT, }, }, .slaves = omap3xxx_gpio3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio3_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* gpio4 */ static struct omap_hwmod_irq_info omap3xxx_gpio4_irqs[] = { { .irq = 32 }, /* INT_34XX_GPIO_BANK4 */ }; static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "gpio4_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = { &omap3xxx_l4_per__gpio4, }; static struct omap_hwmod omap3xxx_gpio4_hwmod = { .name = "gpio4", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs), .main_clk = "gpio4_ick", .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO4_SHIFT, }, }, .slaves = omap3xxx_gpio4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio4_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* gpio5 */ static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = { { .irq = 33 }, /* INT_34XX_GPIO_BANK5 */ }; static struct omap_hwmod_opt_clk gpio5_opt_clks[] = { { .role = "dbclk", .clk = "gpio5_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = { &omap3xxx_l4_per__gpio5, }; static struct omap_hwmod omap3xxx_gpio5_hwmod = { .name = "gpio5", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs), .main_clk = "gpio5_ick", .opt_clks = gpio5_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO5_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO5_SHIFT, }, }, .slaves = omap3xxx_gpio5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio5_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* gpio6 */ static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = { { .irq = 34 }, /* INT_34XX_GPIO_BANK6 */ }; static struct omap_hwmod_opt_clk gpio6_opt_clks[] = { { .role = "dbclk", .clk = "gpio6_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = { &omap3xxx_l4_per__gpio6, }; static struct omap_hwmod omap3xxx_gpio6_hwmod = { .name = "gpio6", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio6_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs), .main_clk = "gpio6_ick", .opt_clks = gpio6_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO6_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO6_SHIFT, }, }, .slaves = omap3xxx_gpio6_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio6_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* dma_system -> L3 */ static struct omap_hwmod_ocp_if omap3xxx_dma_system__l3 = { .master = &omap3xxx_dma_system_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma attributes */ static struct omap_dma_dev_attr dma_dev_attr = { .dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY | IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY, .lch_count = 32, }; static struct omap_hwmod_class_sysconfig omap3xxx_dma_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x002c, .syss_offs = 0x0028, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_dma_hwmod_class = { .name = "dma", .sysc = &omap3xxx_dma_sysc, }; /* dma_system */ static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = { { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */ { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */ { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */ { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */ }; static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = { { .pa_start = 0x48056000, .pa_end = 0x48056fff, .flags = ADDR_TYPE_RT }, }; /* dma_system master ports */ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_masters[] = { &omap3xxx_dma_system__l3, }; /* l4_cfg -> dma_system */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dma_system_hwmod, .clk = "core_l4_ick", .addr = omap3xxx_dma_system_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_dma_system_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma_system slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = { &omap3xxx_l4_core__dma_system, }; static struct omap_hwmod omap3xxx_dma_system_hwmod = { .name = "dma", .class = &omap3xxx_dma_hwmod_class, .mpu_irqs = omap3xxx_dma_system_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dma_system_irqs), .main_clk = "core_l3_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_ST_SDMA_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_SDMA_SHIFT, }, }, .slaves = omap3xxx_dma_system_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dma_system_slaves), .masters = omap3xxx_dma_system_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dma_system_masters), .dev_attr = &dma_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), .flags = HWMOD_NO_IDLEST, }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = { .sysc_offs = 0x008c, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, .clockact = 0x2, }; static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap3xxx_mcbsp_sysc, .rev = MCBSP_CONFIG_TYPE3, }; /* mcbsp1 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = { { .name = "irq", .irq = 16 }, { .name = "tx", .irq = 59 }, { .name = "rx", .irq = 60 }, }; static struct omap_hwmod_dma_info omap3xxx_mcbsp1_sdma_chs[] = { { .name = "rx", .dma_req = 32 }, { .name = "tx", .dma_req = 31 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = { { .name = "mpu", .pa_start = 0x48074000, .pa_end = 0x480740ff, .flags = ADDR_TYPE_RT }, }; /* l4_core -> mcbsp1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp1_hwmod, .clk = "mcbsp1_ick", .addr = omap3xxx_mcbsp1_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp1 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp1_slaves[] = { &omap3xxx_l4_core__mcbsp1, }; static struct omap_hwmod omap3xxx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_irqs), .sdma_reqs = omap3xxx_mcbsp1_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_sdma_chs), .main_clk = "mcbsp1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP1_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT, }, }, .slaves = omap3xxx_mcbsp1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcbsp2 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = { { .name = "irq", .irq = 17 }, { .name = "tx", .irq = 62 }, { .name = "rx", .irq = 63 }, }; static struct omap_hwmod_dma_info omap3xxx_mcbsp2_sdma_chs[] = { { .name = "rx", .dma_req = 34 }, { .name = "tx", .dma_req = 33 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = { { .name = "mpu", .pa_start = 0x49022000, .pa_end = 0x490220ff, .flags = ADDR_TYPE_RT }, }; /* l4_per -> mcbsp2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_hwmod, .clk = "mcbsp2_ick", .addr = omap3xxx_mcbsp2_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp2 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_slaves[] = { &omap3xxx_l4_per__mcbsp2, }; static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = { .sidetone = "mcbsp2_sidetone", }; static struct omap_hwmod omap3xxx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_irqs), .sdma_reqs = omap3xxx_mcbsp2_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sdma_chs), .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT, }, }, .slaves = omap3xxx_mcbsp2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_slaves), .dev_attr = &omap34xx_mcbsp2_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcbsp3 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = { { .name = "irq", .irq = 22 }, { .name = "tx", .irq = 89 }, { .name = "rx", .irq = 90 }, }; static struct omap_hwmod_dma_info omap3xxx_mcbsp3_sdma_chs[] = { { .name = "rx", .dma_req = 18 }, { .name = "tx", .dma_req = 17 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = { { .name = "mpu", .pa_start = 0x49024000, .pa_end = 0x490240ff, .flags = ADDR_TYPE_RT }, }; /* l4_per -> mcbsp3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_hwmod, .clk = "mcbsp3_ick", .addr = omap3xxx_mcbsp3_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp3 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_slaves[] = { &omap3xxx_l4_per__mcbsp3, }; static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = { .sidetone = "mcbsp3_sidetone", }; static struct omap_hwmod omap3xxx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_irqs), .sdma_reqs = omap3xxx_mcbsp3_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sdma_chs), .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT, }, }, .slaves = omap3xxx_mcbsp3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_slaves), .dev_attr = &omap34xx_mcbsp3_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcbsp4 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = { { .name = "irq", .irq = 23 }, { .name = "tx", .irq = 54 }, { .name = "rx", .irq = 55 }, }; static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = { { .name = "rx", .dma_req = 20 }, { .name = "tx", .dma_req = 19 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = { { .name = "mpu", .pa_start = 0x49026000, .pa_end = 0x490260ff, .flags = ADDR_TYPE_RT }, }; /* l4_per -> mcbsp4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp4_hwmod, .clk = "mcbsp4_ick", .addr = omap3xxx_mcbsp4_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp4 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp4_slaves[] = { &omap3xxx_l4_per__mcbsp4, }; static struct omap_hwmod omap3xxx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_irqs), .sdma_reqs = omap3xxx_mcbsp4_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_sdma_chs), .main_clk = "mcbsp4_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT, }, }, .slaves = omap3xxx_mcbsp4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcbsp5 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = { { .name = "irq", .irq = 27 }, { .name = "tx", .irq = 81 }, { .name = "rx", .irq = 82 }, }; static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = { { .name = "rx", .dma_req = 22 }, { .name = "tx", .dma_req = 21 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = { { .name = "mpu", .pa_start = 0x48096000, .pa_end = 0x480960ff, .flags = ADDR_TYPE_RT }, }; /* l4_core -> mcbsp5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp5_hwmod, .clk = "mcbsp5_ick", .addr = omap3xxx_mcbsp5_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp5 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp5_slaves[] = { &omap3xxx_l4_core__mcbsp5, }; static struct omap_hwmod omap3xxx_mcbsp5_hwmod = { .name = "mcbsp5", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_irqs), .sdma_reqs = omap3xxx_mcbsp5_sdma_chs, .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_sdma_chs), .main_clk = "mcbsp5_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP5_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT, }, }, .slaves = omap3xxx_mcbsp5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* 'mcbsp sidetone' class */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sidetone_sysc = { .sysc_offs = 0x0010, .sysc_flags = SYSC_HAS_AUTOIDLE, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = { .name = "mcbsp_sidetone", .sysc = &omap3xxx_mcbsp_sidetone_sysc, }; /* mcbsp2_sidetone */ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = { { .name = "irq", .irq = 4 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = { { .name = "sidetone", .pa_start = 0x49028000, .pa_end = 0x490280ff, .flags = ADDR_TYPE_RT }, }; /* l4_per -> mcbsp2_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_sidetone_hwmod, .clk = "mcbsp2_ick", .addr = omap3xxx_mcbsp2_sidetone_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_addrs), .user = OCP_USER_MPU, }; /* mcbsp2_sidetone slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_sidetone_slaves[] = { &omap3xxx_l4_per__mcbsp2_sidetone, }; static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = { .name = "mcbsp2_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_irqs), .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT, }, }, .slaves = omap3xxx_mcbsp2_sidetone_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcbsp3_sidetone */ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = { { .name = "irq", .irq = 5 }, }; static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = { { .name = "sidetone", .pa_start = 0x4902A000, .pa_end = 0x4902A0ff, .flags = ADDR_TYPE_RT }, }; /* l4_per -> mcbsp3_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_sidetone_hwmod, .clk = "mcbsp3_ick", .addr = omap3xxx_mcbsp3_sidetone_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_addrs), .user = OCP_USER_MPU, }; /* mcbsp3_sidetone slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_sidetone_slaves[] = { &omap3xxx_l4_per__mcbsp3_sidetone, }; static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = { .name = "mcbsp3_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_irqs), .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT, }, }, .slaves = omap3xxx_mcbsp3_sidetone_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* SR common */ static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = { .clkact_shift = 20, }; static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = { .sysc_offs = 0x24, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE), .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap34xx_sr_sysc_fields, }; static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap34xx_sr_sysc, .rev = 1, }; static struct omap_hwmod_sysc_fields omap36xx_sr_sysc_fields = { .sidle_shift = 24, .enwkup_shift = 26 }; static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = { .sysc_offs = 0x38, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_NO_CACHE), .sysc_fields = &omap36xx_sr_sysc_fields, }; static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap36xx_sr_sysc, .rev = 2, }; /* SR1 */ static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = { &omap3_l4_core__sr1, }; static struct omap_hwmod omap34xx_sr1_hwmod = { .name = "sr1_hwmod", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .vdd_name = "mpu", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .slaves = omap3_sr1_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 | CHIP_IS_OMAP3430ES3_0 | CHIP_IS_OMAP3430ES3_1), .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr1_hwmod = { .name = "sr1_hwmod", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .vdd_name = "mpu", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .slaves = omap3_sr1_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1), }; /* SR2 */ static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = { &omap3_l4_core__sr2, }; static struct omap_hwmod omap34xx_sr2_hwmod = { .name = "sr2_hwmod", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .vdd_name = "core", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .slaves = omap3_sr2_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 | CHIP_IS_OMAP3430ES3_0 | CHIP_IS_OMAP3430ES3_1), .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr2_hwmod = { .name = "sr2_hwmod", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .vdd_name = "core", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .slaves = omap3_sr2_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1), }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors * using a queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap3xxx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap3xxx_mailbox_sysc, }; static struct omap_hwmod omap3xxx_mailbox_hwmod; static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = { { .irq = 26 }, }; static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = { { .pa_start = 0x48094000, .pa_end = 0x480941ff, .flags = ADDR_TYPE_RT, }, }; /* l4_core -> mailbox */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mailbox_hwmod, .addr = omap3xxx_mailbox_addrs, .addr_cnt = ARRAY_SIZE(omap3xxx_mailbox_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mailbox slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mailbox_slaves[] = { &omap3xxx_l4_core__mailbox, }; static struct omap_hwmod omap3xxx_mailbox_hwmod = { .name = "mailbox", .class = &omap3xxx_mailbox_hwmod_class, .mpu_irqs = omap3xxx_mailbox_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mailbox_irqs), .main_clk = "mailboxes_ick", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MAILBOXES_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MAILBOXES_SHIFT, }, }, .slaves = omap3xxx_mailbox_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mailbox_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* l4 core -> mcspi1 interface */ static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = { { .pa_start = 0x48098000, .pa_end = 0x480980ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi1, .clk = "mcspi1_ick", .addr = omap34xx_mcspi1_addr_space, .addr_cnt = ARRAY_SIZE(omap34xx_mcspi1_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi2 interface */ static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = { { .pa_start = 0x4809a000, .pa_end = 0x4809a0ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi2, .clk = "mcspi2_ick", .addr = omap34xx_mcspi2_addr_space, .addr_cnt = ARRAY_SIZE(omap34xx_mcspi2_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi3 interface */ static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = { { .pa_start = 0x480b8000, .pa_end = 0x480b80ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi3, .clk = "mcspi3_ick", .addr = omap34xx_mcspi3_addr_space, .addr_cnt = ARRAY_SIZE(omap34xx_mcspi3_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi4 interface */ static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = { { .pa_start = 0x480ba000, .pa_end = 0x480ba0ff, .flags = ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi4, .clk = "mcspi4_ick", .addr = omap34xx_mcspi4_addr_space, .addr_cnt = ARRAY_SIZE(omap34xx_mcspi4_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mcspi_class = { .name = "mcspi", .sysc = &omap34xx_mcspi_sysc, .rev = OMAP3_MCSPI_REV, }; /* mcspi1 */ static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = { { .name = "irq", .irq = 65 }, }; static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = { { .name = "tx0", .dma_req = 35 }, { .name = "rx0", .dma_req = 36 }, { .name = "tx1", .dma_req = 37 }, { .name = "rx1", .dma_req = 38 }, { .name = "tx2", .dma_req = 39 }, { .name = "rx2", .dma_req = 40 }, { .name = "tx3", .dma_req = 41 }, { .name = "rx3", .dma_req = 42 }, }; static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = { &omap34xx_l4_core__mcspi1, }; static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = { .num_chipselect = 4, }; static struct omap_hwmod omap34xx_mcspi1 = { .name = "mcspi1", .mpu_irqs = omap34xx_mcspi1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs), .sdma_reqs = omap34xx_mcspi1_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs), .main_clk = "mcspi1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT, }, }, .slaves = omap34xx_mcspi1_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi1_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcspi2 */ static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = { { .name = "irq", .irq = 66 }, }; static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = { { .name = "tx0", .dma_req = 43 }, { .name = "rx0", .dma_req = 44 }, { .name = "tx1", .dma_req = 45 }, { .name = "rx1", .dma_req = 46 }, }; static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = { &omap34xx_l4_core__mcspi2, }; static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap34xx_mcspi2 = { .name = "mcspi2", .mpu_irqs = omap34xx_mcspi2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs), .sdma_reqs = omap34xx_mcspi2_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs), .main_clk = "mcspi2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT, }, }, .slaves = omap34xx_mcspi2_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi2_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* mcspi3 */ static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = { { .name = "irq", .irq = 91 }, /* 91 */ }; static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = { { .name = "tx0", .dma_req = 15 }, { .name = "rx0", .dma_req = 16 }, { .name = "tx1", .dma_req = 23 }, { .name = "rx1", .dma_req = 24 }, }; static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = { &omap34xx_l4_core__mcspi3, }; static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap34xx_mcspi3 = { .name = "mcspi3", .mpu_irqs = omap34xx_mcspi3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs), .sdma_reqs = omap34xx_mcspi3_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs), .main_clk = "mcspi3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT, }, }, .slaves = omap34xx_mcspi3_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi3_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* SPI4 */ static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = { { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */ }; static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = { { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */ { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */ }; static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = { &omap34xx_l4_core__mcspi4, }; static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = { .num_chipselect = 1, }; static struct omap_hwmod omap34xx_mcspi4 = { .name = "mcspi4", .mpu_irqs = omap34xx_mcspi4_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs), .sdma_reqs = omap34xx_mcspi4_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs), .main_clk = "mcspi4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI4_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT, }, }, .slaves = omap34xx_mcspi4_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi4_dev_attr, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* * usbhsotg */ static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = { .rev_offs = 0x0400, .sysc_offs = 0x0404, .syss_offs = 0x0408, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE| SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class usbotg_class = { .name = "usbotg", .sysc = &omap3xxx_usbhsotg_sysc, }; /* usb_otg_hs */ static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = { { .name = "mc", .irq = 92 }, { .name = "dma", .irq = 93 }, }; static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { .name = "usb_otg_hs", .mpu_irqs = omap3xxx_usbhsotg_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs), .main_clk = "hsotgusb_ick", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_HSOTGUSB_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT, .idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT }, }, .masters = omap3xxx_usbhsotg_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters), .slaves = omap3xxx_usbhsotg_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves), .class = &usbotg_class, /* * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430) }; /* usb_otg_hs */ static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = { { .name = "mc", .irq = 71 }, }; static struct omap_hwmod_class am35xx_usbotg_class = { .name = "am35xx_usbotg", .sysc = NULL, }; static struct omap_hwmod am35xx_usbhsotg_hwmod = { .name = "am35x_otg_hs", .mpu_irqs = am35xx_usbhsotg_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs), .main_clk = NULL, .prcm = { .omap2 = { }, }, .masters = am35xx_usbhsotg_masters, .masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters), .slaves = am35xx_usbhsotg_slaves, .slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves), .class = &am35xx_usbotg_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1) }; /* MMC/SD/SDIO common */ static struct omap_hwmod_class_sysconfig omap34xx_mmc_sysc = { .rev_offs = 0x1fc, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mmc_class = { .name = "mmc", .sysc = &omap34xx_mmc_sysc, }; /* MMC/SD/SDIO1 */ static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = { { .irq = 83, }, }; static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = { { .name = "tx", .dma_req = 61, }, { .name = "rx", .dma_req = 62, }, }; static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc1_slaves[] = { &omap3xxx_l4_core__mmc1, }; static struct omap_mmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; static struct omap_hwmod omap3xxx_mmc1_hwmod = { .name = "mmc1", .mpu_irqs = omap34xx_mmc1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc1_mpu_irqs), .sdma_reqs = omap34xx_mmc1_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc1_sdma_reqs), .opt_clks = omap34xx_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, }, }, .dev_attr = &mmc1_dev_attr, .slaves = omap3xxx_mmc1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves), .class = &omap34xx_mmc_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* MMC/SD/SDIO2 */ static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = { { .irq = INT_24XX_MMC2_IRQ, }, }; static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = { { .name = "tx", .dma_req = 47, }, { .name = "rx", .dma_req = 48, }, }; static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = { &omap3xxx_l4_core__mmc2, }; static struct omap_hwmod omap3xxx_mmc2_hwmod = { .name = "mmc2", .mpu_irqs = omap34xx_mmc2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc2_mpu_irqs), .sdma_reqs = omap34xx_mmc2_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc2_sdma_reqs), .opt_clks = omap34xx_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, }, }, .slaves = omap3xxx_mmc2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves), .class = &omap34xx_mmc_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; /* MMC/SD/SDIO3 */ static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = { { .irq = 94, }, }; static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = { { .name = "tx", .dma_req = 77, }, { .name = "rx", .dma_req = 78, }, }; static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = { &omap3xxx_l4_core__mmc3, }; static struct omap_hwmod omap3xxx_mmc3_hwmod = { .name = "mmc3", .mpu_irqs = omap34xx_mmc3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc3_mpu_irqs), .sdma_reqs = omap34xx_mmc3_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc3_sdma_reqs), .opt_clks = omap34xx_mmc3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks), .main_clk = "mmchs3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC3_SHIFT, }, }, .slaves = omap3xxx_mmc3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc3_slaves), .class = &omap34xx_mmc_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430), }; static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_l3_main_hwmod, &omap3xxx_l4_core_hwmod, &omap3xxx_l4_per_hwmod, &omap3xxx_l4_wkup_hwmod, &omap3xxx_mmc1_hwmod, &omap3xxx_mmc2_hwmod, &omap3xxx_mmc3_hwmod, &omap3xxx_mpu_hwmod, &omap3xxx_iva_hwmod, &omap3xxx_timer1_hwmod, &omap3xxx_timer2_hwmod, &omap3xxx_timer3_hwmod, &omap3xxx_timer4_hwmod, &omap3xxx_timer5_hwmod, &omap3xxx_timer6_hwmod, &omap3xxx_timer7_hwmod, &omap3xxx_timer8_hwmod, &omap3xxx_timer9_hwmod, &omap3xxx_timer10_hwmod, &omap3xxx_timer11_hwmod, &omap3xxx_timer12_hwmod, &omap3xxx_wd_timer2_hwmod, &omap3xxx_uart1_hwmod, &omap3xxx_uart2_hwmod, &omap3xxx_uart3_hwmod, &omap3xxx_uart4_hwmod, /* dss class */ &omap3430es1_dss_core_hwmod, &omap3xxx_dss_core_hwmod, &omap3xxx_dss_dispc_hwmod, &omap3xxx_dss_dsi1_hwmod, &omap3xxx_dss_rfbi_hwmod, &omap3xxx_dss_venc_hwmod, /* i2c class */ &omap3xxx_i2c1_hwmod, &omap3xxx_i2c2_hwmod, &omap3xxx_i2c3_hwmod, &omap34xx_sr1_hwmod, &omap34xx_sr2_hwmod, &omap36xx_sr1_hwmod, &omap36xx_sr2_hwmod, /* gpio class */ &omap3xxx_gpio1_hwmod, &omap3xxx_gpio2_hwmod, &omap3xxx_gpio3_hwmod, &omap3xxx_gpio4_hwmod, &omap3xxx_gpio5_hwmod, &omap3xxx_gpio6_hwmod, /* dma_system class*/ &omap3xxx_dma_system_hwmod, /* mcbsp class */ &omap3xxx_mcbsp1_hwmod, &omap3xxx_mcbsp2_hwmod, &omap3xxx_mcbsp3_hwmod, &omap3xxx_mcbsp4_hwmod, &omap3xxx_mcbsp5_hwmod, &omap3xxx_mcbsp2_sidetone_hwmod, &omap3xxx_mcbsp3_sidetone_hwmod, /* mailbox class */ &omap3xxx_mailbox_hwmod, /* mcspi class */ &omap34xx_mcspi1, &omap34xx_mcspi2, &omap34xx_mcspi3, &omap34xx_mcspi4, /* usbotg class */ &omap3xxx_usbhsotg_hwmod, /* usbotg for am35x */ &am35xx_usbhsotg_hwmod, NULL, }; int __init omap3xxx_hwmod_init(void) { return omap_hwmod_register(omap3xxx_hwmods); }
gpl-2.0
izzyf/T.E.S.C.O-kernel_vivo_minimal
drivers/tty/serial/vt8500_serial.c
2380
16008
/* * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * Based on msm_serial.c, which is: * Copyright (C) 2007 Google, Inc. * Author: Robert Love <rlove@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) # define SUPPORT_SYSRQ #endif #include <linux/hrtimer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/platform_device.h> /* * UART Register offsets */ #define VT8500_URTDR 0x0000 /* Transmit data */ #define VT8500_URRDR 0x0004 /* Receive data */ #define VT8500_URDIV 0x0008 /* Clock/Baud rate divisor */ #define VT8500_URLCR 0x000C /* Line control */ #define VT8500_URICR 0x0010 /* IrDA control */ #define VT8500_URIER 0x0014 /* Interrupt enable */ #define VT8500_URISR 0x0018 /* Interrupt status */ #define VT8500_URUSR 0x001c /* UART status */ #define VT8500_URFCR 0x0020 /* FIFO control */ #define VT8500_URFIDX 0x0024 /* FIFO index */ #define VT8500_URBKR 0x0028 /* Break signal count */ #define VT8500_URTOD 0x002c /* Time out divisor */ #define VT8500_TXFIFO 0x1000 /* Transmit FIFO (16x8) */ #define VT8500_RXFIFO 0x1020 /* Receive FIFO (16x10) */ /* * Interrupt enable and status bits */ #define TXDE (1 << 0) /* Tx Data empty */ #define RXDF (1 << 1) /* Rx Data full */ #define TXFAE (1 << 2) /* Tx FIFO almost empty */ #define TXFE (1 << 3) /* Tx FIFO empty */ #define RXFAF (1 << 4) /* Rx FIFO almost full */ #define RXFF (1 << 5) /* Rx FIFO full */ #define TXUDR (1 << 6) /* Tx underrun */ #define RXOVER (1 << 7) /* Rx overrun */ #define PER (1 << 8) /* Parity error */ #define FER (1 << 9) /* Frame error */ #define TCTS (1 << 10) /* Toggle of CTS */ #define RXTOUT (1 << 11) /* Rx timeout */ #define BKDONE (1 << 12) /* Break signal done */ #define ERR (1 << 13) /* AHB error response */ #define RX_FIFO_INTS (RXFAF | RXFF | RXOVER | PER | FER | RXTOUT) #define TX_FIFO_INTS (TXFAE | TXFE | TXUDR) struct vt8500_port { struct uart_port uart; char name[16]; struct clk *clk; unsigned int ier; }; static inline void vt8500_write(struct uart_port *port, unsigned int val, unsigned int off) { writel(val, port->membase + off); } static inline unsigned int vt8500_read(struct uart_port *port, unsigned int off) { return readl(port->membase + off); } static void vt8500_stop_tx(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); vt8500_port->ier &= ~TX_FIFO_INTS; vt8500_write(port, vt8500_port->ier, VT8500_URIER); } static void vt8500_stop_rx(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); vt8500_port->ier &= ~RX_FIFO_INTS; vt8500_write(port, vt8500_port->ier, VT8500_URIER); } static void vt8500_enable_ms(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); vt8500_port->ier |= TCTS; vt8500_write(port, vt8500_port->ier, VT8500_URIER); } static void handle_rx(struct uart_port *port) { struct tty_struct *tty = tty_port_tty_get(&port->state->port); if (!tty) { /* Discard data: no tty available */ int count = (vt8500_read(port, VT8500_URFIDX) & 0x1f00) >> 8; u16 ch; while (count--) ch = readw(port->membase + VT8500_RXFIFO); return; } /* * Handle overrun */ if ((vt8500_read(port, VT8500_URISR) & RXOVER)) { port->icount.overrun++; tty_insert_flip_char(tty, 0, TTY_OVERRUN); } /* and now the main RX loop */ while (vt8500_read(port, VT8500_URFIDX) & 0x1f00) { unsigned int c; char flag = TTY_NORMAL; c = readw(port->membase + VT8500_RXFIFO) & 0x3ff; /* Mask conditions we're ignorning. */ c &= ~port->read_status_mask; if (c & FER) { port->icount.frame++; flag = TTY_FRAME; } else if (c & PER) { port->icount.parity++; flag = TTY_PARITY; } port->icount.rx++; if (!uart_handle_sysrq_char(port, c)) tty_insert_flip_char(tty, c, flag); } tty_flip_buffer_push(tty); tty_kref_put(tty); } static void handle_tx(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { writeb(port->x_char, port->membase + VT8500_TXFIFO); port->icount.tx++; port->x_char = 0; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { vt8500_stop_tx(port); return; } while ((vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16) { if (uart_circ_empty(xmit)) break; writeb(xmit->buf[xmit->tail], port->membase + VT8500_TXFIFO); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) vt8500_stop_tx(port); } static void vt8500_start_tx(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); vt8500_port->ier &= ~TX_FIFO_INTS; vt8500_write(port, vt8500_port->ier, VT8500_URIER); handle_tx(port); vt8500_port->ier |= TX_FIFO_INTS; vt8500_write(port, vt8500_port->ier, VT8500_URIER); } static void handle_delta_cts(struct uart_port *port) { port->icount.cts++; wake_up_interruptible(&port->state->port.delta_msr_wait); } static irqreturn_t vt8500_irq(int irq, void *dev_id) { struct uart_port *port = dev_id; unsigned long isr; spin_lock(&port->lock); isr = vt8500_read(port, VT8500_URISR); /* Acknowledge active status bits */ vt8500_write(port, isr, VT8500_URISR); if (isr & RX_FIFO_INTS) handle_rx(port); if (isr & TX_FIFO_INTS) handle_tx(port); if (isr & TCTS) handle_delta_cts(port); spin_unlock(&port->lock); return IRQ_HANDLED; } static unsigned int vt8500_tx_empty(struct uart_port *port) { return (vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16 ? TIOCSER_TEMT : 0; } static unsigned int vt8500_get_mctrl(struct uart_port *port) { unsigned int usr; usr = vt8500_read(port, VT8500_URUSR); if (usr & (1 << 4)) return TIOCM_CTS; else return 0; } static void vt8500_set_mctrl(struct uart_port *port, unsigned int mctrl) { } static void vt8500_break_ctl(struct uart_port *port, int break_ctl) { if (break_ctl) vt8500_write(port, vt8500_read(port, VT8500_URLCR) | (1 << 9), VT8500_URLCR); } static int vt8500_set_baud_rate(struct uart_port *port, unsigned int baud) { unsigned long div; unsigned int loops = 1000; div = vt8500_read(port, VT8500_URDIV) & ~(0x3ff); if (unlikely((baud < 900) || (baud > 921600))) div |= 7; else div |= (921600 / baud) - 1; while ((vt8500_read(port, VT8500_URUSR) & (1 << 5)) && --loops) cpu_relax(); vt8500_write(port, div, VT8500_URDIV); return baud; } static int vt8500_startup(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); int ret; snprintf(vt8500_port->name, sizeof(vt8500_port->name), "vt8500_serial%d", port->line); ret = request_irq(port->irq, vt8500_irq, IRQF_TRIGGER_HIGH, vt8500_port->name, port); if (unlikely(ret)) return ret; vt8500_write(port, 0x03, VT8500_URLCR); /* enable TX & RX */ return 0; } static void vt8500_shutdown(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); vt8500_port->ier = 0; /* disable interrupts and FIFOs */ vt8500_write(&vt8500_port->uart, 0, VT8500_URIER); vt8500_write(&vt8500_port->uart, 0x880, VT8500_URFCR); free_irq(port->irq, port); } static void vt8500_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); unsigned long flags; unsigned int baud, lcr; unsigned int loops = 1000; spin_lock_irqsave(&port->lock, flags); /* calculate and set baud rate */ baud = uart_get_baud_rate(port, termios, old, 900, 921600); baud = vt8500_set_baud_rate(port, baud); if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); /* calculate parity */ lcr = vt8500_read(&vt8500_port->uart, VT8500_URLCR); lcr &= ~((1 << 5) | (1 << 4)); if (termios->c_cflag & PARENB) { lcr |= (1 << 4); termios->c_cflag &= ~CMSPAR; if (termios->c_cflag & PARODD) lcr |= (1 << 5); } /* calculate bits per char */ lcr &= ~(1 << 2); switch (termios->c_cflag & CSIZE) { case CS7: break; case CS8: default: lcr |= (1 << 2); termios->c_cflag &= ~CSIZE; termios->c_cflag |= CS8; break; } /* calculate stop bits */ lcr &= ~(1 << 3); if (termios->c_cflag & CSTOPB) lcr |= (1 << 3); /* set parity, bits per char, and stop bit */ vt8500_write(&vt8500_port->uart, lcr, VT8500_URLCR); /* Configure status bits to ignore based on termio flags. */ port->read_status_mask = 0; if (termios->c_iflag & IGNPAR) port->read_status_mask = FER | PER; uart_update_timeout(port, termios->c_cflag, baud); /* Reset FIFOs */ vt8500_write(&vt8500_port->uart, 0x88c, VT8500_URFCR); while ((vt8500_read(&vt8500_port->uart, VT8500_URFCR) & 0xc) && --loops) cpu_relax(); /* Every possible FIFO-related interrupt */ vt8500_port->ier = RX_FIFO_INTS | TX_FIFO_INTS; /* * CTS flow control */ if (UART_ENABLE_MS(&vt8500_port->uart, termios->c_cflag)) vt8500_port->ier |= TCTS; vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR); vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER); spin_unlock_irqrestore(&port->lock, flags); } static const char *vt8500_type(struct uart_port *port) { struct vt8500_port *vt8500_port = container_of(port, struct vt8500_port, uart); return vt8500_port->name; } static void vt8500_release_port(struct uart_port *port) { } static int vt8500_request_port(struct uart_port *port) { return 0; } static void vt8500_config_port(struct uart_port *port, int flags) { port->type = PORT_VT8500; } static int vt8500_verify_port(struct uart_port *port, struct serial_struct *ser) { if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_VT8500)) return -EINVAL; if (unlikely(port->irq != ser->irq)) return -EINVAL; return 0; } static struct vt8500_port *vt8500_uart_ports[4]; static struct uart_driver vt8500_uart_driver; #ifdef CONFIG_SERIAL_VT8500_CONSOLE static inline void wait_for_xmitr(struct uart_port *port) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = vt8500_read(port, VT8500_URFIDX); if (--tmout == 0) break; udelay(1); } while (status & 0x10); } static void vt8500_console_putchar(struct uart_port *port, int c) { wait_for_xmitr(port); writeb(c, port->membase + VT8500_TXFIFO); } static void vt8500_console_write(struct console *co, const char *s, unsigned int count) { struct vt8500_port *vt8500_port = vt8500_uart_ports[co->index]; unsigned long ier; BUG_ON(co->index < 0 || co->index >= vt8500_uart_driver.nr); ier = vt8500_read(&vt8500_port->uart, VT8500_URIER); vt8500_write(&vt8500_port->uart, VT8500_URIER, 0); uart_console_write(&vt8500_port->uart, s, count, vt8500_console_putchar); /* * Finally, wait for transmitter to become empty * and switch back to FIFO */ wait_for_xmitr(&vt8500_port->uart); vt8500_write(&vt8500_port->uart, VT8500_URIER, ier); } static int __init vt8500_console_setup(struct console *co, char *options) { struct vt8500_port *vt8500_port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (unlikely(co->index >= vt8500_uart_driver.nr || co->index < 0)) return -ENXIO; vt8500_port = vt8500_uart_ports[co->index]; if (!vt8500_port) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&vt8500_port->uart, co, baud, parity, bits, flow); } static struct console vt8500_console = { .name = "ttyWMT", .write = vt8500_console_write, .device = uart_console_device, .setup = vt8500_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &vt8500_uart_driver, }; #define VT8500_CONSOLE (&vt8500_console) #else #define VT8500_CONSOLE NULL #endif static struct uart_ops vt8500_uart_pops = { .tx_empty = vt8500_tx_empty, .set_mctrl = vt8500_set_mctrl, .get_mctrl = vt8500_get_mctrl, .stop_tx = vt8500_stop_tx, .start_tx = vt8500_start_tx, .stop_rx = vt8500_stop_rx, .enable_ms = vt8500_enable_ms, .break_ctl = vt8500_break_ctl, .startup = vt8500_startup, .shutdown = vt8500_shutdown, .set_termios = vt8500_set_termios, .type = vt8500_type, .release_port = vt8500_release_port, .request_port = vt8500_request_port, .config_port = vt8500_config_port, .verify_port = vt8500_verify_port, }; static struct uart_driver vt8500_uart_driver = { .owner = THIS_MODULE, .driver_name = "vt8500_serial", .dev_name = "ttyWMT", .nr = 6, .cons = VT8500_CONSOLE, }; static int __init vt8500_serial_probe(struct platform_device *pdev) { struct vt8500_port *vt8500_port; struct resource *mmres, *irqres; int ret; mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!mmres || !irqres) return -ENODEV; vt8500_port = kzalloc(sizeof(struct vt8500_port), GFP_KERNEL); if (!vt8500_port) return -ENOMEM; vt8500_port->uart.type = PORT_VT8500; vt8500_port->uart.iotype = UPIO_MEM; vt8500_port->uart.mapbase = mmres->start; vt8500_port->uart.irq = irqres->start; vt8500_port->uart.fifosize = 16; vt8500_port->uart.ops = &vt8500_uart_pops; vt8500_port->uart.line = pdev->id; vt8500_port->uart.dev = &pdev->dev; vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; vt8500_port->uart.uartclk = 24000000; snprintf(vt8500_port->name, sizeof(vt8500_port->name), "VT8500 UART%d", pdev->id); vt8500_port->uart.membase = ioremap(mmres->start, mmres->end - mmres->start + 1); if (!vt8500_port->uart.membase) { ret = -ENOMEM; goto err; } vt8500_uart_ports[pdev->id] = vt8500_port; uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart); platform_set_drvdata(pdev, vt8500_port); return 0; err: kfree(vt8500_port); return ret; } static int __devexit vt8500_serial_remove(struct platform_device *pdev) { struct vt8500_port *vt8500_port = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart); kfree(vt8500_port); return 0; } static struct platform_driver vt8500_platform_driver = { .probe = vt8500_serial_probe, .remove = vt8500_serial_remove, .driver = { .name = "vt8500_serial", .owner = THIS_MODULE, }, }; static int __init vt8500_serial_init(void) { int ret; ret = uart_register_driver(&vt8500_uart_driver); if (unlikely(ret)) return ret; ret = platform_driver_register(&vt8500_platform_driver); if (unlikely(ret)) uart_unregister_driver(&vt8500_uart_driver); return ret; } static void __exit vt8500_serial_exit(void) { #ifdef CONFIG_SERIAL_VT8500_CONSOLE unregister_console(&vt8500_console); #endif platform_driver_unregister(&vt8500_platform_driver); uart_unregister_driver(&vt8500_uart_driver); } module_init(vt8500_serial_init); module_exit(vt8500_serial_exit); MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); MODULE_DESCRIPTION("Driver for vt8500 serial device"); MODULE_LICENSE("GPL");
gpl-2.0
sakindia123/haxxynos_kernel_s2
drivers/staging/comedi/drivers/pcl818.c
2380
57285
/* comedi/drivers/pcl818.c Author: Michal Dobes <dobes@tesnet.cz> hardware driver for Advantech cards: card: PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818, PCL-718 driver: pcl818l, pcl818h, pcl818hd, pcl818hg, pcl818, pcl718 */ /* Driver: pcl818 Description: Advantech PCL-818 cards, PCL-718 Author: Michal Dobes <dobes@tesnet.cz> Devices: [Advantech] PCL-818L (pcl818l), PCL-818H (pcl818h), PCL-818HD (pcl818hd), PCL-818HG (pcl818hg), PCL-818 (pcl818), PCL-718 (pcl718) Status: works All cards have 16 SE/8 DIFF ADCs, one or two DACs, 16 DI and 16 DO. Differences are only at maximal sample speed, range list and FIFO support. The driver support AI mode 0, 1, 3 other subdevices (AO, DI, DO) support only mode 0. If DMA/FIFO/INT are disabled then AI support only mode 0. PCL-818HD and PCL-818HG support 1kword FIFO. Driver support this FIFO but this code is untested. A word or two about DMA. Driver support DMA operations at two ways: 1) DMA uses two buffers and after one is filled then is generated INT and DMA restart with second buffer. With this mode I'm unable run more that 80Ksamples/secs without data dropouts on K6/233. 2) DMA uses one buffer and run in autoinit mode and the data are from DMA buffer moved on the fly with 2kHz interrupts from RTC. This mode is used if the interrupt 8 is available for allocation. If not, then first DMA mode is used. With this I can run at full speed one card (100ksamples/secs) or two cards with 60ksamples/secs each (more is problem on account of ISA limitations). To use this mode you must have compiled kernel with disabled "Enhanced Real Time Clock Support". Maybe you can have problems if you use xntpd or similar. If you've data dropouts with DMA mode 2 then: a) disable IDE DMA b) switch text mode console to fb. Options for PCL-818L: [0] - IO Base [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) [2] - DMA (0=disable, 1, 3) [3] - 0, 10=10MHz clock for 8254 1= 1MHz clock for 8254 [4] - 0, 5=A/D input -5V.. +5V 1, 10=A/D input -10V..+10V [5] - 0, 5=D/A output 0-5V (internal reference -5V) 1, 10=D/A output 0-10V (internal reference -10V) 2 =D/A output unknown (external reference) Options for PCL-818, PCL-818H: [0] - IO Base [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) [2] - DMA (0=disable, 1, 3) [3] - 0, 10=10MHz clock for 8254 1= 1MHz clock for 8254 [4] - 0, 5=D/A output 0-5V (internal reference -5V) 1, 10=D/A output 0-10V (internal reference -10V) 2 =D/A output unknown (external reference) Options for PCL-818HD, PCL-818HG: [0] - IO Base [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) [2] - DMA/FIFO (-1=use FIFO, 0=disable both FIFO and DMA, 1=use DMA ch 1, 3=use DMA ch 3) [3] - 0, 10=10MHz clock for 8254 1= 1MHz clock for 8254 [4] - 0, 5=D/A output 0-5V (internal reference -5V) 1, 10=D/A output 0-10V (internal reference -10V) 2 =D/A output unknown (external reference) Options for PCL-718: [0] - IO Base [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) [2] - DMA (0=disable, 1, 3) [3] - 0, 10=10MHz clock for 8254 1= 1MHz clock for 8254 [4] - 0=A/D Range is +/-10V 1= +/-5V 2= +/-2.5V 3= +/-1V 4= +/-0.5V 5= user defined bipolar 6= 0-10V 7= 0-5V 8= 0-2V 9= 0-1V 10= user defined unipolar [5] - 0, 5=D/A outputs 0-5V (internal reference -5V) 1, 10=D/A outputs 0-10V (internal reference -10V) 2=D/A outputs unknown (external reference) [6] - 0, 60=max 60kHz A/D sampling 1,100=max 100kHz A/D sampling (PCL-718 with Option 001 installed) */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/mc146818rtc.h> #include <linux/gfp.h> #include <linux/delay.h> #include <asm/dma.h> #include "8253.h" /* #define PCL818_MODE13_AO 1 */ /* boards constants */ #define boardPCL818L 0 #define boardPCL818H 1 #define boardPCL818HD 2 #define boardPCL818HG 3 #define boardPCL818 4 #define boardPCL718 5 /* IO space len */ #define PCLx1x_RANGE 16 /* IO space len if we use FIFO */ #define PCLx1xFIFO_RANGE 32 /* W: clear INT request */ #define PCL818_CLRINT 8 /* R: return status byte */ #define PCL818_STATUS 8 /* R: A/D high byte W: A/D range control */ #define PCL818_RANGE 1 /* R: next mux scan channel W: mux scan channel & range control pointer */ #define PCL818_MUX 2 /* R/W: operation control register */ #define PCL818_CONTROL 9 /* W: counter enable */ #define PCL818_CNTENABLE 10 /* R: low byte of A/D W: soft A/D trigger */ #define PCL818_AD_LO 0 /* R: high byte of A/D W: A/D range control */ #define PCL818_AD_HI 1 /* W: D/A low&high byte */ #define PCL818_DA_LO 4 #define PCL818_DA_HI 5 /* R: low&high byte of DI */ #define PCL818_DI_LO 3 #define PCL818_DI_HI 11 /* W: low&high byte of DO */ #define PCL818_DO_LO 3 #define PCL818_DO_HI 11 /* W: PCL718 second D/A */ #define PCL718_DA2_LO 6 #define PCL718_DA2_HI 7 /* counters */ #define PCL818_CTR0 12 #define PCL818_CTR1 13 #define PCL818_CTR2 14 /* W: counter control */ #define PCL818_CTRCTL 15 /* W: fifo enable/disable */ #define PCL818_FI_ENABLE 6 /* W: fifo interrupt clear */ #define PCL818_FI_INTCLR 20 /* W: fifo interrupt clear */ #define PCL818_FI_FLUSH 25 /* R: fifo status */ #define PCL818_FI_STATUS 25 /* R: one record from FIFO */ #define PCL818_FI_DATALO 23 #define PCL818_FI_DATAHI 23 /* type of interrupt handler */ #define INT_TYPE_AI1_INT 1 #define INT_TYPE_AI1_DMA 2 #define INT_TYPE_AI1_FIFO 3 #define INT_TYPE_AI3_INT 4 #define INT_TYPE_AI3_DMA 5 #define INT_TYPE_AI3_FIFO 6 #ifdef PCL818_MODE13_AO #define INT_TYPE_AO1_INT 7 #define INT_TYPE_AO3_INT 8 #endif #ifdef unused /* RTC stuff... */ #define INT_TYPE_AI1_DMA_RTC 9 #define INT_TYPE_AI3_DMA_RTC 10 #define RTC_IRQ 8 #define RTC_IO_EXTENT 0x10 #endif #define MAGIC_DMA_WORD 0x5a5a static const struct comedi_lrange range_pcl818h_ai = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10), } }; static const struct comedi_lrange range_pcl818hg_ai = { 10, { BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), BIP_RANGE(0.005), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01), BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01), } }; static const struct comedi_lrange range_pcl818l_l_ai = { 4, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), } }; static const struct comedi_lrange range_pcl818l_h_ai = { 4, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), } }; static const struct comedi_lrange range718_bipolar1 = { 1, {BIP_RANGE(1),} }; static const struct comedi_lrange range718_bipolar0_5 = { 1, {BIP_RANGE(0.5),} }; static const struct comedi_lrange range718_unipolar2 = { 1, {UNI_RANGE(2),} }; static const struct comedi_lrange range718_unipolar1 = { 1, {BIP_RANGE(1),} }; static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcl818_detach(struct comedi_device *dev); #ifdef unused static int RTC_lock = 0; /* RTC lock */ static int RTC_timer_lock = 0; /* RTC int lock */ #endif struct pcl818_board { const char *name; /* driver name */ int n_ranges; /* len of range list */ int n_aichan_se; /* num of A/D chans in single ended mode */ int n_aichan_diff; /* num of A/D chans in diferencial mode */ unsigned int ns_min; /* minimal allowed delay between samples (in ns) */ int n_aochan; /* num of D/A chans */ int n_dichan; /* num of DI chans */ int n_dochan; /* num of DO chans */ const struct comedi_lrange *ai_range_type; /* default A/D rangelist */ const struct comedi_lrange *ao_range_type; /* default D/A rangelist */ unsigned int io_range; /* len of IO space */ unsigned int IRQbits; /* allowed interrupts */ unsigned int DMAbits; /* allowed DMA chans */ int ai_maxdata; /* maxdata for A/D */ int ao_maxdata; /* maxdata for D/A */ unsigned char fifo; /* 1=board has FIFO */ int is_818; }; static const struct pcl818_board boardtypes[] = { {"pcl818l", 4, 16, 8, 25000, 1, 16, 16, &range_pcl818l_l_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 0, 1}, {"pcl818h", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 0, 1}, {"pcl818hd", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 1, 1}, {"pcl818hg", 12, 16, 8, 10000, 1, 16, 16, &range_pcl818hg_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 1, 1}, {"pcl818", 9, 16, 8, 10000, 2, 16, 16, &range_pcl818h_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 0, 1}, {"pcl718", 1, 16, 8, 16000, 2, 16, 16, &range_unipolar5, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 0, 0}, /* pcm3718 */ {"pcm3718", 9, 16, 8, 10000, 0, 16, 16, &range_pcl818h_ai, &range_unipolar5, PCLx1x_RANGE, 0x00fc, 0x0a, 0xfff, 0xfff, 0, 1 /* XXX ? */ }, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl818_board)) static struct comedi_driver driver_pcl818 = { .driver_name = "pcl818", .module = THIS_MODULE, .attach = pcl818_attach, .detach = pcl818_detach, .board_name = &boardtypes[0].name, .num_names = n_boardtypes, .offset = sizeof(struct pcl818_board), }; static int __init driver_pcl818_init_module(void) { return comedi_driver_register(&driver_pcl818); } static void __exit driver_pcl818_cleanup_module(void) { comedi_driver_unregister(&driver_pcl818); } module_init(driver_pcl818_init_module); module_exit(driver_pcl818_cleanup_module); struct pcl818_private { unsigned int dma; /* used DMA, 0=don't use DMA */ int dma_rtc; /* 1=RTC used with DMA, 0=no RTC alloc */ unsigned int io_range; #ifdef unused unsigned long rtc_iobase; /* RTC port region */ unsigned int rtc_iosize; unsigned int rtc_irq; struct timer_list rtc_irq_timer; /* timer for RTC sanity check */ unsigned long rtc_freq; /* RTC int freq */ int rtc_irq_blocked; /* 1=we now do AI with DMA&RTC */ #endif unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */ unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */ unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */ unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */ unsigned int dmasamplsize; /* size in samples hwdmasize[0]/2 */ unsigned int last_top_dma; /* DMA pointer in last RTC int */ int next_dma_buf; /* which DMA buffer will be used next round */ long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */ unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */ unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */ unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */ int i8253_osc_base; /* 1/frequency of on board oscilator in ns */ int irq_free; /* 1=have allocated IRQ */ int irq_blocked; /* 1=IRQ now uses any subdev */ int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */ int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */ struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */ int ai_act_scan; /* how many scans we finished */ int ai_act_chan; /* actual position in actual scan */ unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */ unsigned int act_chanlist_len; /* how long is actual MUX list */ unsigned int act_chanlist_pos; /* actual position in MUX list */ unsigned int ai_scans; /* len of scanlist */ unsigned int ai_n_chan; /* how many channels is measured */ unsigned int *ai_chanlist; /* actaul chanlist */ unsigned int ai_flags; /* flaglist */ unsigned int ai_data_len; /* len of data buffer */ short *ai_data; /* data buffer */ unsigned int ai_timer1; /* timers */ unsigned int ai_timer2; struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */ unsigned char usefifo; /* 1=use fifo */ unsigned int ao_readback[2]; }; static const unsigned int muxonechan[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, /* used for gain list programming */ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; #define devpriv ((struct pcl818_private *)dev->private) #define this_board ((const struct pcl818_board *)dev->board_ptr) /* ============================================================================== */ static void setup_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *chanlist, unsigned int n_chan, unsigned int seglen); static int check_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *chanlist, unsigned int n_chan); static int pcl818_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1, unsigned int divisor2); #ifdef unused static int set_rtc_irq_bit(unsigned char bit); static void rtc_dropped_irq(unsigned long data); static int rtc_setfreq_irq(int freq); #endif /* ============================================================================== ANALOG INPUT MODE0, 818 cards, slow version */ static int pcl818_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; int timeout; /* software trigger, DMA and INT off */ outb(0, dev->iobase + PCL818_CONTROL); /* select channel */ outb(muxonechan[CR_CHAN(insn->chanspec)], dev->iobase + PCL818_MUX); /* select gain */ outb(CR_RANGE(insn->chanspec), dev->iobase + PCL818_RANGE); for (n = 0; n < insn->n; n++) { /* clear INT (conversion end) flag */ outb(0, dev->iobase + PCL818_CLRINT); /* start conversion */ outb(0, dev->iobase + PCL818_AD_LO); timeout = 100; while (timeout--) { if (inb(dev->iobase + PCL818_STATUS) & 0x10) goto conv_finish; udelay(1); } comedi_error(dev, "A/D insn timeout"); /* clear INT (conversion end) flag */ outb(0, dev->iobase + PCL818_CLRINT); return -EIO; conv_finish: data[n] = ((inb(dev->iobase + PCL818_AD_HI) << 4) | (inb(dev->iobase + PCL818_AD_LO) >> 4)); } return n; } /* ============================================================================== ANALOG OUTPUT MODE0, 818 cards only one sample per call is supported */ static int pcl818_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; int chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) { data[n] = devpriv->ao_readback[chan]; } return n; } static int pcl818_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; int chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) { devpriv->ao_readback[chan] = data[n]; outb((data[n] & 0x000f) << 4, dev->iobase + (chan ? PCL718_DA2_LO : PCL818_DA_LO)); outb((data[n] & 0x0ff0) >> 4, dev->iobase + (chan ? PCL718_DA2_HI : PCL818_DA_HI)); } return n; } /* ============================================================================== DIGITAL INPUT MODE0, 818 cards only one sample per call is supported */ static int pcl818_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; data[1] = inb(dev->iobase + PCL818_DI_LO) | (inb(dev->iobase + PCL818_DI_HI) << 8); return 2; } /* ============================================================================== DIGITAL OUTPUT MODE0, 818 cards only one sample per call is supported */ static int pcl818_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; s->state &= ~data[0]; s->state |= (data[0] & data[1]); outb(s->state & 0xff, dev->iobase + PCL818_DO_LO); outb((s->state >> 8), dev->iobase + PCL818_DO_HI); data[1] = s->state; return 2; } /* ============================================================================== analog input interrupt mode 1 & 3, 818 cards one sample per interrupt version */ static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; int low; int timeout = 50; /* wait max 50us */ while (timeout--) { if (inb(dev->iobase + PCL818_STATUS) & 0x10) goto conv_finish; udelay(1); } outb(0, dev->iobase + PCL818_STATUS); /* clear INT request */ comedi_error(dev, "A/D mode1/3 IRQ without DRDY!"); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; conv_finish: low = inb(dev->iobase + PCL818_AD_LO); comedi_buf_put(s->async, ((inb(dev->iobase + PCL818_AD_HI) << 4) | (low >> 4))); /* get one sample */ outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ printk ("comedi: A/D mode1/3 IRQ - channel dropout %x!=%x !\n", (low & 0xf), devpriv->act_chanlist[devpriv->act_chanlist_pos]); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } devpriv->act_chanlist_pos++; if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) { devpriv->act_chanlist_pos = 0; } s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_chan) { /* printk("E"); */ s->async->cur_chan = 0; devpriv->ai_act_scan--; } if (!devpriv->neverending_ai) { if (devpriv->ai_act_scan == 0) { /* all data sampled */ pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; } } comedi_event(dev, s); return IRQ_HANDLED; } /* ============================================================================== analog input dma mode 1 & 3, 818 cards */ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; int i, len, bufptr; unsigned long flags; short *ptr; disable_dma(devpriv->dma); devpriv->next_dma_buf = 1 - devpriv->next_dma_buf; if ((devpriv->dma_runs_to_end) > -1 || devpriv->neverending_ai) { /* switch dma bufs */ set_dma_mode(devpriv->dma, DMA_MODE_READ); flags = claim_dma_lock(); set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]); if (devpriv->dma_runs_to_end || devpriv->neverending_ai) { set_dma_count(devpriv->dma, devpriv->hwdmasize[devpriv-> next_dma_buf]); } else { set_dma_count(devpriv->dma, devpriv->last_dma_run); } release_dma_lock(flags); enable_dma(devpriv->dma); } printk("comedi: A/D mode1/3 IRQ \n"); devpriv->dma_runs_to_end--; outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ ptr = (short *)devpriv->dmabuf[1 - devpriv->next_dma_buf]; len = devpriv->hwdmasize[0] >> 1; bufptr = 0; for (i = 0; i < len; i++) { if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ printk ("comedi: A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n", (ptr[bufptr] & 0xf), devpriv->act_chanlist[devpriv->act_chanlist_pos], devpriv->act_chanlist_pos); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */ devpriv->act_chanlist_pos++; if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) { devpriv->act_chanlist_pos = 0; } s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_chan) { s->async->cur_chan = 0; devpriv->ai_act_scan--; } if (!devpriv->neverending_ai) if (devpriv->ai_act_scan == 0) { /* all data sampled */ pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); /* printk("done int ai13 dma\n"); */ return IRQ_HANDLED; } } if (len > 0) comedi_event(dev, s); return IRQ_HANDLED; } #ifdef unused /* ============================================================================== analog input dma mode 1 & 3 over RTC, 818 cards */ static irqreturn_t interrupt_pcl818_ai_mode13_dma_rtc(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; unsigned long tmp; unsigned int top1, top2, i, bufptr; long ofs_dats; short *dmabuf = (short *)devpriv->dmabuf[0]; /* outb(2,0x378); */ switch (devpriv->ai_mode) { case INT_TYPE_AI1_DMA_RTC: case INT_TYPE_AI3_DMA_RTC: tmp = (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); mod_timer(&devpriv->rtc_irq_timer, jiffies + HZ / devpriv->rtc_freq + 2 * HZ / 100); for (i = 0; i < 10; i++) { top1 = get_dma_residue(devpriv->dma); top2 = get_dma_residue(devpriv->dma); if (top1 == top2) break; } if (top1 != top2) return IRQ_HANDLED; top1 = devpriv->hwdmasize[0] - top1; /* where is now DMA in buffer */ top1 >>= 1; ofs_dats = top1 - devpriv->last_top_dma; /* new samples from last call */ if (ofs_dats < 0) ofs_dats = (devpriv->dmasamplsize) + ofs_dats; if (!ofs_dats) return IRQ_HANDLED; /* exit=no new samples from last call */ /* obsluz data */ i = devpriv->last_top_dma - 1; i &= (devpriv->dmasamplsize - 1); if (dmabuf[i] != MAGIC_DMA_WORD) { /* DMA overflow! */ comedi_error(dev, "A/D mode1/3 DMA buffer overflow!"); /* printk("I %d dmabuf[i] %d %d\n",i,dmabuf[i],devpriv->dmasamplsize); */ pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } /* printk("r %ld ",ofs_dats); */ bufptr = devpriv->last_top_dma; for (i = 0; i < ofs_dats; i++) { if ((dmabuf[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ printk ("comedi: A/D mode1/3 DMA - channel dropout %d!=%d !\n", (dmabuf[bufptr] & 0xf), devpriv-> act_chanlist[devpriv->act_chanlist_pos]); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } comedi_buf_put(s->async, dmabuf[bufptr++] >> 4); /* get one sample */ bufptr &= (devpriv->dmasamplsize - 1); devpriv->act_chanlist_pos++; if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) { devpriv->act_chanlist_pos = 0; } s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_chan) { s->async->cur_chan = 0; devpriv->ai_act_scan--; } if (!devpriv->neverending_ai) if (devpriv->ai_act_scan == 0) { /* all data sampled */ pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); /* printk("done int ai13 dma\n"); */ return IRQ_HANDLED; } } devpriv->last_top_dma = bufptr; bufptr--; bufptr &= (devpriv->dmasamplsize - 1); dmabuf[bufptr] = MAGIC_DMA_WORD; comedi_event(dev, s); /* outb(0,0x378); */ return IRQ_HANDLED; } /* outb(0,0x378); */ return IRQ_HANDLED; } #endif /* ============================================================================== analog input interrupt mode 1 & 3, 818HD/HG cards */ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; int i, len, lo; outb(0, dev->iobase + PCL818_FI_INTCLR); /* clear fifo int request */ lo = inb(dev->iobase + PCL818_FI_STATUS); if (lo & 4) { comedi_error(dev, "A/D mode1/3 FIFO overflow!"); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } if (lo & 1) { comedi_error(dev, "A/D mode1/3 FIFO interrupt without data!"); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } if (lo & 2) { len = 512; } else { len = 0; } for (i = 0; i < len; i++) { lo = inb(dev->iobase + PCL818_FI_DATALO); if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */ printk ("comedi: A/D mode1/3 FIFO - channel dropout %d!=%d !\n", (lo & 0xf), devpriv->act_chanlist[devpriv->act_chanlist_pos]); pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_event(dev, s); return IRQ_HANDLED; } comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */ devpriv->act_chanlist_pos++; if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) { devpriv->act_chanlist_pos = 0; } s->async->cur_chan++; if (s->async->cur_chan >= devpriv->ai_n_chan) { s->async->cur_chan = 0; devpriv->ai_act_scan--; } if (!devpriv->neverending_ai) if (devpriv->ai_act_scan == 0) { /* all data sampled */ pcl818_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); return IRQ_HANDLED; } } if (len > 0) comedi_event(dev, s); return IRQ_HANDLED; } /* ============================================================================== INT procedure */ static irqreturn_t interrupt_pcl818(int irq, void *d) { struct comedi_device *dev = d; if (!dev->attached) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* printk("I\n"); */ if (devpriv->irq_blocked && devpriv->irq_was_now_closed) { if ((devpriv->neverending_ai || (!devpriv->neverending_ai && devpriv->ai_act_scan > 0)) && (devpriv->ai_mode == INT_TYPE_AI1_DMA || devpriv->ai_mode == INT_TYPE_AI3_DMA)) { /* The cleanup from ai_cancel() has been delayed until now because the card doesn't seem to like being reprogrammed while a DMA transfer is in progress. */ struct comedi_subdevice *s = dev->subdevices + 0; devpriv->ai_act_scan = 0; devpriv->neverending_ai = 0; pcl818_ai_cancel(dev, s); } outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ return IRQ_HANDLED; } switch (devpriv->ai_mode) { case INT_TYPE_AI1_DMA: case INT_TYPE_AI3_DMA: return interrupt_pcl818_ai_mode13_dma(irq, d); case INT_TYPE_AI1_INT: case INT_TYPE_AI3_INT: return interrupt_pcl818_ai_mode13_int(irq, d); case INT_TYPE_AI1_FIFO: case INT_TYPE_AI3_FIFO: return interrupt_pcl818_ai_mode13_fifo(irq, d); #ifdef PCL818_MODE13_AO case INT_TYPE_AO1_INT: case INT_TYPE_AO3_INT: return interrupt_pcl818_ao_mode13_int(irq, d); #endif default: break; } outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ if ((!dev->irq) || (!devpriv->irq_free) || (!devpriv->irq_blocked) || (!devpriv->ai_mode)) { comedi_error(dev, "bad IRQ!"); return IRQ_NONE; } comedi_error(dev, "IRQ from unknown source!"); return IRQ_NONE; } /* ============================================================================== ANALOG INPUT MODE 1 or 3 DMA , 818 cards */ static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int flags; unsigned int bytes; printk("mode13dma_int, mode: %d\n", mode); disable_dma(devpriv->dma); /* disable dma */ bytes = devpriv->hwdmasize[0]; if (!devpriv->neverending_ai) { bytes = devpriv->ai_n_chan * devpriv->ai_scans * sizeof(short); /* how many */ devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize[0]; /* how many DMA pages we must fiil */ devpriv->last_dma_run = bytes % devpriv->hwdmasize[0]; /* on last dma transfer must be moved */ devpriv->dma_runs_to_end--; if (devpriv->dma_runs_to_end >= 0) bytes = devpriv->hwdmasize[0]; } devpriv->next_dma_buf = 0; set_dma_mode(devpriv->dma, DMA_MODE_READ); flags = claim_dma_lock(); clear_dma_ff(devpriv->dma); set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]); set_dma_count(devpriv->dma, bytes); release_dma_lock(flags); enable_dma(devpriv->dma); if (mode == 1) { devpriv->ai_mode = INT_TYPE_AI1_DMA; outb(0x87 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Pacer+IRQ+DMA */ } else { devpriv->ai_mode = INT_TYPE_AI3_DMA; outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */ }; } #ifdef unused /* ============================================================================== ANALOG INPUT MODE 1 or 3 DMA rtc, 818 cards */ static void pcl818_ai_mode13dma_rtc(int mode, struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int flags; short *pole; set_dma_mode(devpriv->dma, DMA_MODE_READ | DMA_AUTOINIT); flags = claim_dma_lock(); clear_dma_ff(devpriv->dma); set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]); set_dma_count(devpriv->dma, devpriv->hwdmasize[0]); release_dma_lock(flags); enable_dma(devpriv->dma); devpriv->last_top_dma = 0; /* devpriv->hwdmasize[0]; */ pole = (short *)devpriv->dmabuf[0]; devpriv->dmasamplsize = devpriv->hwdmasize[0] / 2; pole[devpriv->dmasamplsize - 1] = MAGIC_DMA_WORD; #ifdef unused devpriv->rtc_freq = rtc_setfreq_irq(2048); devpriv->rtc_irq_timer.expires = jiffies + HZ / devpriv->rtc_freq + 2 * HZ / 100; devpriv->rtc_irq_timer.data = (unsigned long)dev; devpriv->rtc_irq_timer.function = rtc_dropped_irq; add_timer(&devpriv->rtc_irq_timer); #endif if (mode == 1) { devpriv->int818_mode = INT_TYPE_AI1_DMA_RTC; outb(0x07 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Pacer+DMA */ } else { devpriv->int818_mode = INT_TYPE_AI3_DMA_RTC; outb(0x06 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+DMA */ }; } #endif /* ============================================================================== ANALOG INPUT MODE 1 or 3, 818 cards */ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; int divisor1 = 0, divisor2 = 0; unsigned int seglen; printk("pcl818_ai_cmd_mode()\n"); if ((!dev->irq) && (!devpriv->dma_rtc)) { comedi_error(dev, "IRQ not defined!"); return -EINVAL; } if (devpriv->irq_blocked) return -EBUSY; start_pacer(dev, -1, 0, 0); /* stop pacer */ seglen = check_channel_list(dev, s, devpriv->ai_chanlist, devpriv->ai_n_chan); if (seglen < 1) return -EINVAL; setup_channel_list(dev, s, devpriv->ai_chanlist, devpriv->ai_n_chan, seglen); udelay(1); devpriv->ai_act_scan = devpriv->ai_scans; devpriv->ai_act_chan = 0; devpriv->irq_blocked = 1; devpriv->irq_was_now_closed = 0; devpriv->neverending_ai = 0; devpriv->act_chanlist_pos = 0; devpriv->dma_runs_to_end = 0; if ((devpriv->ai_scans == 0) || (devpriv->ai_scans == -1)) devpriv->neverending_ai = 1; /* well, user want neverending */ if (mode == 1) { i8253_cascade_ns_to_timer(devpriv->i8253_osc_base, &divisor1, &divisor2, &cmd->convert_arg, TRIG_ROUND_NEAREST); if (divisor1 == 1) { /* PCL718/818 crash if any divisor is set to 1 */ divisor1 = 2; divisor2 /= 2; } if (divisor2 == 1) { divisor2 = 2; divisor1 /= 2; } } outb(0, dev->iobase + PCL818_CNTENABLE); /* enable pacer */ switch (devpriv->dma) { case 1: /* DMA */ case 3: if (devpriv->dma_rtc == 0) { pcl818_ai_mode13dma_int(mode, dev, s); } #ifdef unused else { pcl818_ai_mode13dma_rtc(mode, dev, s); } #else else { return -EINVAL; } #endif break; case 0: if (!devpriv->usefifo) { /* IRQ */ /* printk("IRQ\n"); */ if (mode == 1) { devpriv->ai_mode = INT_TYPE_AI1_INT; /* Pacer+IRQ */ outb(0x83 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); } else { devpriv->ai_mode = INT_TYPE_AI3_INT; /* Ext trig+IRQ */ outb(0x82 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); } } else { /* FIFO */ /* enable FIFO */ outb(1, dev->iobase + PCL818_FI_ENABLE); if (mode == 1) { devpriv->ai_mode = INT_TYPE_AI1_FIFO; /* Pacer */ outb(0x03, dev->iobase + PCL818_CONTROL); } else { devpriv->ai_mode = INT_TYPE_AI3_FIFO; outb(0x02, dev->iobase + PCL818_CONTROL); } } } start_pacer(dev, mode, divisor1, divisor2); #ifdef unused switch (devpriv->ai_mode) { case INT_TYPE_AI1_DMA_RTC: case INT_TYPE_AI3_DMA_RTC: set_rtc_irq_bit(1); /* start RTC */ break; } #endif printk("pcl818_ai_cmd_mode() end\n"); return 0; } #ifdef unused /* ============================================================================== ANALOG OUTPUT MODE 1 or 3, 818 cards */ #ifdef PCL818_MODE13_AO static int pcl818_ao_mode13(int mode, struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { int divisor1 = 0, divisor2 = 0; if (!dev->irq) { comedi_error(dev, "IRQ not defined!"); return -EINVAL; } if (devpriv->irq_blocked) return -EBUSY; start_pacer(dev, -1, 0, 0); /* stop pacer */ devpriv->int13_act_scan = it->n; devpriv->int13_act_chan = 0; devpriv->irq_blocked = 1; devpriv->irq_was_now_closed = 0; devpriv->neverending_ai = 0; devpriv->act_chanlist_pos = 0; if (mode == 1) { i8253_cascade_ns_to_timer(devpriv->i8253_osc_base, &divisor1, &divisor2, &it->trigvar, TRIG_ROUND_NEAREST); if (divisor1 == 1) { /* PCL818 crash if any divisor is set to 1 */ divisor1 = 2; divisor2 /= 2; } if (divisor2 == 1) { divisor2 = 2; divisor1 /= 2; } } outb(0, dev->iobase + PCL818_CNTENABLE); /* enable pacer */ if (mode == 1) { devpriv->int818_mode = INT_TYPE_AO1_INT; outb(0x83 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Pacer+IRQ */ } else { devpriv->int818_mode = INT_TYPE_AO3_INT; outb(0x82 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ */ }; start_pacer(dev, mode, divisor1, divisor2); return 0; } /* ============================================================================== ANALOG OUTPUT MODE 1, 818 cards */ static int pcl818_ao_mode1(struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { return pcl818_ao_mode13(1, dev, s, it); } /* ============================================================================== ANALOG OUTPUT MODE 3, 818 cards */ static int pcl818_ao_mode3(struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { return pcl818_ao_mode13(3, dev, s, it); } #endif #endif /* ============================================================================== Start/stop pacer onboard pacer */ static void start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1, unsigned int divisor2) { outb(0xb4, dev->iobase + PCL818_CTRCTL); outb(0x74, dev->iobase + PCL818_CTRCTL); udelay(1); if (mode == 1) { outb(divisor2 & 0xff, dev->iobase + PCL818_CTR2); outb((divisor2 >> 8) & 0xff, dev->iobase + PCL818_CTR2); outb(divisor1 & 0xff, dev->iobase + PCL818_CTR1); outb((divisor1 >> 8) & 0xff, dev->iobase + PCL818_CTR1); } } /* ============================================================================== Check if channel list from user is builded correctly If it's ok, then program scan/gain logic */ static int check_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *chanlist, unsigned int n_chan) { unsigned int chansegment[16]; unsigned int i, nowmustbechan, seglen, segpos; /* correct channel and range number check itself comedi/range.c */ if (n_chan < 1) { comedi_error(dev, "range/channel list is empty!"); return 0; } if (n_chan > 1) { /* first channel is every time ok */ chansegment[0] = chanlist[0]; /* build part of chanlist */ for (i = 1, seglen = 1; i < n_chan; i++, seglen++) { /* printk("%d. %d * %d\n",i, * CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i]));*/ /* we detect loop, this must by finish */ if (chanlist[0] == chanlist[i]) break; nowmustbechan = (CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan; if (nowmustbechan != CR_CHAN(chanlist[i])) { /* channel list isn't continuous :-( */ printk ("comedi%d: pcl818: channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n", dev->minor, i, CR_CHAN(chanlist[i]), nowmustbechan, CR_CHAN(chanlist[0])); return 0; } /* well, this is next correct channel in list */ chansegment[i] = chanlist[i]; } /* check whole chanlist */ for (i = 0, segpos = 0; i < n_chan; i++) { /* printk("%d %d=%d %d\n",CR_CHAN(chansegment[i%seglen]),CR_RANGE(chansegment[i%seglen]),CR_CHAN(it->chanlist[i]),CR_RANGE(it->chanlist[i])); */ if (chanlist[i] != chansegment[i % seglen]) { printk ("comedi%d: pcl818: bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n", dev->minor, i, CR_CHAN(chansegment[i]), CR_RANGE(chansegment[i]), CR_AREF(chansegment[i]), CR_CHAN(chanlist[i % seglen]), CR_RANGE(chanlist[i % seglen]), CR_AREF(chansegment[i % seglen])); return 0; /* chan/gain list is strange */ } } } else { seglen = 1; } printk("check_channel_list: seglen %d\n", seglen); return seglen; } static void setup_channel_list(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int *chanlist, unsigned int n_chan, unsigned int seglen) { int i; devpriv->act_chanlist_len = seglen; devpriv->act_chanlist_pos = 0; for (i = 0; i < seglen; i++) { /* store range list to card */ devpriv->act_chanlist[i] = CR_CHAN(chanlist[i]); outb(muxonechan[CR_CHAN(chanlist[i])], dev->iobase + PCL818_MUX); /* select channel */ outb(CR_RANGE(chanlist[i]), dev->iobase + PCL818_RANGE); /* select gain */ } udelay(1); /* select channel interval to scan */ outb(devpriv->act_chanlist[0] | (devpriv->act_chanlist[seglen - 1] << 4), dev->iobase + PCL818_MUX); } /* ============================================================================== Check if board is switched to SE (1) or DIFF(0) mode */ static int check_single_ended(unsigned int port) { if (inb(port + PCL818_STATUS) & 0x20) { return 1; } else { return 0; } } /* ============================================================================== */ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp, divisor1 = 0, divisor2 = 0; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) { return 1; } /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW) { cmd->start_src = TRIG_NOW; err++; } if (cmd->scan_begin_src != TRIG_FOLLOW) { cmd->scan_begin_src = TRIG_FOLLOW; err++; } if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (cmd->scan_end_src != TRIG_COUNT) { cmd->scan_end_src = TRIG_COUNT; err++; } if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT) err++; if (err) { return 2; } /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < this_board->ns_min) { cmd->convert_arg = this_board->ns_min; err++; } } else { /* TRIG_EXT */ if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) { return 3; } /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; i8253_cascade_ns_to_timer(devpriv->i8253_osc_base, &divisor1, &divisor2, &cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (cmd->convert_arg < this_board->ns_min) cmd->convert_arg = this_board->ns_min; if (tmp != cmd->convert_arg) err++; } if (err) { return 4; } /* step 5: complain about special chanlist considerations */ if (cmd->chanlist) { if (!check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len)) return 5; /* incorrect channels list */ } return 0; } /* ============================================================================== */ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; int retval; printk("pcl818_ai_cmd()\n"); devpriv->ai_n_chan = cmd->chanlist_len; devpriv->ai_chanlist = cmd->chanlist; devpriv->ai_flags = cmd->flags; devpriv->ai_data_len = s->async->prealloc_bufsz; devpriv->ai_data = s->async->prealloc_buf; devpriv->ai_timer1 = 0; devpriv->ai_timer2 = 0; if (cmd->stop_src == TRIG_COUNT) { devpriv->ai_scans = cmd->stop_arg; } else { devpriv->ai_scans = 0; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */ if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */ devpriv->ai_timer1 = cmd->convert_arg; retval = pcl818_ai_cmd_mode(1, dev, s); printk("pcl818_ai_cmd() end\n"); return retval; } if (cmd->convert_src == TRIG_EXT) { /* mode 3 */ return pcl818_ai_cmd_mode(3, dev, s); } } return -1; } /* ============================================================================== cancel any mode 1-4 AI */ static int pcl818_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { if (devpriv->irq_blocked > 0) { printk("pcl818_ai_cancel()\n"); devpriv->irq_was_now_closed = 1; switch (devpriv->ai_mode) { #ifdef unused case INT_TYPE_AI1_DMA_RTC: case INT_TYPE_AI3_DMA_RTC: set_rtc_irq_bit(0); /* stop RTC */ del_timer(&devpriv->rtc_irq_timer); #endif case INT_TYPE_AI1_DMA: case INT_TYPE_AI3_DMA: if (devpriv->neverending_ai || (!devpriv->neverending_ai && devpriv->ai_act_scan > 0)) { /* wait for running dma transfer to end, do cleanup in interrupt */ goto end; } disable_dma(devpriv->dma); case INT_TYPE_AI1_INT: case INT_TYPE_AI3_INT: case INT_TYPE_AI1_FIFO: case INT_TYPE_AI3_FIFO: #ifdef PCL818_MODE13_AO case INT_TYPE_AO1_INT: case INT_TYPE_AO3_INT: #endif outb(inb(dev->iobase + PCL818_CONTROL) & 0x73, dev->iobase + PCL818_CONTROL); /* Stop A/D */ udelay(1); start_pacer(dev, -1, 0, 0); outb(0, dev->iobase + PCL818_AD_LO); inb(dev->iobase + PCL818_AD_LO); inb(dev->iobase + PCL818_AD_HI); outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */ outb(0, dev->iobase + PCL818_CONTROL); /* Stop A/D */ if (devpriv->usefifo) { /* FIFO shutdown */ outb(0, dev->iobase + PCL818_FI_INTCLR); outb(0, dev->iobase + PCL818_FI_FLUSH); outb(0, dev->iobase + PCL818_FI_ENABLE); } devpriv->irq_blocked = 0; devpriv->last_int_sub = s; devpriv->neverending_ai = 0; devpriv->ai_mode = 0; devpriv->irq_was_now_closed = 0; break; } } end: printk("pcl818_ai_cancel() end\n"); return 0; } /* ============================================================================== chech for PCL818 */ static int pcl818_check(unsigned long iobase) { outb(0x00, iobase + PCL818_MUX); udelay(1); if (inb(iobase + PCL818_MUX) != 0x00) return 1; /* there isn't card */ outb(0x55, iobase + PCL818_MUX); udelay(1); if (inb(iobase + PCL818_MUX) != 0x55) return 1; /* there isn't card */ outb(0x00, iobase + PCL818_MUX); udelay(1); outb(0x18, iobase + PCL818_CONTROL); udelay(1); if (inb(iobase + PCL818_CONTROL) != 0x18) return 1; /* there isn't card */ return 0; /* ok, card exist */ } /* ============================================================================== reset whole PCL-818 cards */ static void pcl818_reset(struct comedi_device *dev) { if (devpriv->usefifo) { /* FIFO shutdown */ outb(0, dev->iobase + PCL818_FI_INTCLR); outb(0, dev->iobase + PCL818_FI_FLUSH); outb(0, dev->iobase + PCL818_FI_ENABLE); } outb(0, dev->iobase + PCL818_DA_LO); /* DAC=0V */ outb(0, dev->iobase + PCL818_DA_HI); udelay(1); outb(0, dev->iobase + PCL818_DO_HI); /* DO=$0000 */ outb(0, dev->iobase + PCL818_DO_LO); udelay(1); outb(0, dev->iobase + PCL818_CONTROL); outb(0, dev->iobase + PCL818_CNTENABLE); outb(0, dev->iobase + PCL818_MUX); outb(0, dev->iobase + PCL818_CLRINT); outb(0xb0, dev->iobase + PCL818_CTRCTL); /* Stop pacer */ outb(0x70, dev->iobase + PCL818_CTRCTL); outb(0x30, dev->iobase + PCL818_CTRCTL); if (this_board->is_818) { outb(0, dev->iobase + PCL818_RANGE); } else { outb(0, dev->iobase + PCL718_DA2_LO); outb(0, dev->iobase + PCL718_DA2_HI); } } #ifdef unused /* ============================================================================== Enable(1)/disable(0) periodic interrupts from RTC */ static int set_rtc_irq_bit(unsigned char bit) { unsigned char val; unsigned long flags; if (bit == 1) { RTC_timer_lock++; if (RTC_timer_lock > 1) return 0; } else { RTC_timer_lock--; if (RTC_timer_lock < 0) RTC_timer_lock = 0; if (RTC_timer_lock > 0) return 0; } save_flags(flags); cli(); val = CMOS_READ(RTC_CONTROL); if (bit) { val |= RTC_PIE; } else { val &= ~RTC_PIE; } CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); restore_flags(flags); return 0; } /* ============================================================================== Restart RTC if something stop it (xntpd every 11 mins or large IDE transfers) */ static void rtc_dropped_irq(unsigned long data) { struct comedi_device *dev = (void *)data; unsigned long flags, tmp; switch (devpriv->int818_mode) { case INT_TYPE_AI1_DMA_RTC: case INT_TYPE_AI3_DMA_RTC: mod_timer(&devpriv->rtc_irq_timer, jiffies + HZ / devpriv->rtc_freq + 2 * HZ / 100); save_flags(flags); cli(); tmp = (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ restore_flags(flags); break; } } /* ============================================================================== Set frequency of interrupts from RTC */ static int rtc_setfreq_irq(int freq) { int tmp = 0; int rtc_freq; unsigned char val; unsigned long flags; if (freq < 2) freq = 2; if (freq > 8192) freq = 8192; while (freq > (1 << tmp)) tmp++; rtc_freq = 1 << tmp; save_flags(flags); cli(); val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0; val |= (16 - tmp); CMOS_WRITE(val, RTC_FREQ_SELECT); restore_flags(flags); return rtc_freq; } #endif /* ============================================================================== Free any resources that we have claimed */ static void free_resources(struct comedi_device *dev) { /* printk("free_resource()\n"); */ if (dev->private) { pcl818_ai_cancel(dev, devpriv->sub_ai); pcl818_reset(dev); if (devpriv->dma) free_dma(devpriv->dma); if (devpriv->dmabuf[0]) free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]); if (devpriv->dmabuf[1]) free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]); #ifdef unused if (devpriv->rtc_irq) free_irq(devpriv->rtc_irq, dev); if ((devpriv->dma_rtc) && (RTC_lock == 1)) { if (devpriv->rtc_iobase) release_region(devpriv->rtc_iobase, devpriv->rtc_iosize); } if (devpriv->dma_rtc) RTC_lock--; #endif } if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, devpriv->io_range); /* printk("free_resource() end\n"); */ } /* ============================================================================== Initialization */ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int ret; unsigned long iobase; unsigned int irq; int dma; unsigned long pages; struct comedi_subdevice *s; ret = alloc_private(dev, sizeof(struct pcl818_private)); if (ret < 0) return ret; /* Can't alloc mem */ /* claim our I/O space */ iobase = it->options[0]; printk("comedi%d: pcl818: board=%s, ioport=0x%03lx", dev->minor, this_board->name, iobase); devpriv->io_range = this_board->io_range; if ((this_board->fifo) && (it->options[2] == -1)) { /* we've board with FIFO and we want to use FIFO */ devpriv->io_range = PCLx1xFIFO_RANGE; devpriv->usefifo = 1; } if (!request_region(iobase, devpriv->io_range, "pcl818")) { printk("I/O port conflict\n"); return -EIO; } dev->iobase = iobase; if (pcl818_check(iobase)) { printk(", I can't detect board. FAIL!\n"); return -EIO; } /* set up some name stuff */ dev->board_name = this_board->name; /* grab our IRQ */ irq = 0; if (this_board->IRQbits != 0) { /* board support IRQ */ irq = it->options[1]; if (irq) { /* we want to use IRQ */ if (((1 << irq) & this_board->IRQbits) == 0) { printk (", IRQ %u is out of allowed range, DISABLING IT", irq); irq = 0; /* Bad IRQ */ } else { if (request_irq (irq, interrupt_pcl818, 0, "pcl818", dev)) { printk (", unable to allocate IRQ %u, DISABLING IT", irq); irq = 0; /* Can't use IRQ */ } else { printk(", irq=%u", irq); } } } } dev->irq = irq; if (irq) { devpriv->irq_free = 1; } /* 1=we have allocated irq */ else { devpriv->irq_free = 0; } devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */ devpriv->ai_mode = 0; /* mode of irq */ #ifdef unused /* grab RTC for DMA operations */ devpriv->dma_rtc = 0; if (it->options[2] > 0) { /* we want to use DMA */ if (RTC_lock == 0) { if (!request_region(RTC_PORT(0), RTC_IO_EXTENT, "pcl818 (RTC)")) goto no_rtc; } devpriv->rtc_iobase = RTC_PORT(0); devpriv->rtc_iosize = RTC_IO_EXTENT; RTC_lock++; if (!request_irq(RTC_IRQ, interrupt_pcl818_ai_mode13_dma_rtc, 0, "pcl818 DMA (RTC)", dev)) { devpriv->dma_rtc = 1; devpriv->rtc_irq = RTC_IRQ; printk(", dma_irq=%u", devpriv->rtc_irq); } else { RTC_lock--; if (RTC_lock == 0) { if (devpriv->rtc_iobase) release_region(devpriv->rtc_iobase, devpriv->rtc_iosize); } devpriv->rtc_iobase = 0; devpriv->rtc_iosize = 0; } } no_rtc: #endif /* grab our DMA */ dma = 0; devpriv->dma = dma; if ((devpriv->irq_free == 0) && (devpriv->dma_rtc == 0)) goto no_dma; /* if we haven't IRQ, we can't use DMA */ if (this_board->DMAbits != 0) { /* board support DMA */ dma = it->options[2]; if (dma < 1) goto no_dma; /* DMA disabled */ if (((1 << dma) & this_board->DMAbits) == 0) { printk(", DMA is out of allowed range, FAIL!\n"); return -EINVAL; /* Bad DMA */ } ret = request_dma(dma, "pcl818"); if (ret) { printk(", unable to allocate DMA %u, FAIL!\n", dma); return -EBUSY; /* DMA isn't free */ } devpriv->dma = dma; printk(", dma=%u", dma); pages = 2; /* we need 16KB */ devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[0]) { printk(", unable to allocate DMA buffer, FAIL!\n"); /* maybe experiment with try_to_free_pages() will help .... */ return -EBUSY; /* no buffer :-( */ } devpriv->dmapages[0] = pages; devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]); devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE; /* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */ if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */ devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages); if (!devpriv->dmabuf[1]) { printk (", unable to allocate DMA buffer, FAIL!\n"); return -EBUSY; } devpriv->dmapages[1] = pages; devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]); devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE; } } no_dma: ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; s = dev->subdevices + 0; if (!this_board->n_aichan_se) { s->type = COMEDI_SUBD_UNUSED; } else { s->type = COMEDI_SUBD_AI; devpriv->sub_ai = s; s->subdev_flags = SDF_READABLE; if (check_single_ended(dev->iobase)) { s->n_chan = this_board->n_aichan_se; s->subdev_flags |= SDF_COMMON | SDF_GROUND; printk(", %dchans S.E. DAC", s->n_chan); } else { s->n_chan = this_board->n_aichan_diff; s->subdev_flags |= SDF_DIFF; printk(", %dchans DIFF DAC", s->n_chan); } s->maxdata = this_board->ai_maxdata; s->len_chanlist = s->n_chan; s->range_table = this_board->ai_range_type; s->cancel = pcl818_ai_cancel; s->insn_read = pcl818_ai_insn_read; if ((irq) || (devpriv->dma_rtc)) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->do_cmdtest = ai_cmdtest; s->do_cmd = ai_cmd; } if (this_board->is_818) { if ((it->options[4] == 1) || (it->options[4] == 10)) s->range_table = &range_pcl818l_h_ai; /* secondary range list jumper selectable */ } else { switch (it->options[4]) { case 0: s->range_table = &range_bipolar10; break; case 1: s->range_table = &range_bipolar5; break; case 2: s->range_table = &range_bipolar2_5; break; case 3: s->range_table = &range718_bipolar1; break; case 4: s->range_table = &range718_bipolar0_5; break; case 6: s->range_table = &range_unipolar10; break; case 7: s->range_table = &range_unipolar5; break; case 8: s->range_table = &range718_unipolar2; break; case 9: s->range_table = &range718_unipolar1; break; default: s->range_table = &range_unknown; break; } } } s = dev->subdevices + 1; if (!this_board->n_aochan) { s->type = COMEDI_SUBD_UNUSED; } else { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = this_board->n_aochan; s->maxdata = this_board->ao_maxdata; s->len_chanlist = this_board->n_aochan; s->range_table = this_board->ao_range_type; s->insn_read = pcl818_ao_insn_read; s->insn_write = pcl818_ao_insn_write; #ifdef unused #ifdef PCL818_MODE13_AO if (irq) { s->trig[1] = pcl818_ao_mode1; s->trig[3] = pcl818_ao_mode3; } #endif #endif if (this_board->is_818) { if ((it->options[4] == 1) || (it->options[4] == 10)) s->range_table = &range_unipolar10; if (it->options[4] == 2) s->range_table = &range_unknown; } else { if ((it->options[5] == 1) || (it->options[5] == 10)) s->range_table = &range_unipolar10; if (it->options[5] == 2) s->range_table = &range_unknown; } } s = dev->subdevices + 2; if (!this_board->n_dichan) { s->type = COMEDI_SUBD_UNUSED; } else { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = this_board->n_dichan; s->maxdata = 1; s->len_chanlist = this_board->n_dichan; s->range_table = &range_digital; s->insn_bits = pcl818_di_insn_bits; } s = dev->subdevices + 3; if (!this_board->n_dochan) { s->type = COMEDI_SUBD_UNUSED; } else { s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = this_board->n_dochan; s->maxdata = 1; s->len_chanlist = this_board->n_dochan; s->range_table = &range_digital; s->insn_bits = pcl818_do_insn_bits; } /* select 1/10MHz oscilator */ if ((it->options[3] == 0) || (it->options[3] == 10)) { devpriv->i8253_osc_base = 100; } else { devpriv->i8253_osc_base = 1000; } /* max sampling speed */ devpriv->ns_min = this_board->ns_min; if (!this_board->is_818) { if ((it->options[6] == 1) || (it->options[6] == 100)) devpriv->ns_min = 10000; /* extended PCL718 to 100kHz DAC */ } pcl818_reset(dev); printk("\n"); return 0; } /* ============================================================================== Removes device */ static int pcl818_detach(struct comedi_device *dev) { /* printk("comedi%d: pcl818: remove\n", dev->minor); */ free_resources(dev); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
blackwing182/htc-kernel-msm7x30-3.0
drivers/target/target_core_device.c
2380
46603
/******************************************************************************* * Filename: target_core_device.c (based on iscsi_target_device.c) * * This file contains the iSCSI Virtual Device and Disk Transport * agnostic related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. * Copyright (c) 2007-2010 Rising Tide Systems * Copyright (c) 2008-2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/net.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> #include <net/sock.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <target/target_core_base.h> #include <target/target_core_device.h> #include <target/target_core_tpg.h> #include <target/target_core_transport.h> #include <target/target_core_fabric_ops.h> #include "target_core_alua.h" #include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" static void se_dev_start(struct se_device *dev); static void se_dev_stop(struct se_device *dev); int transport_get_lun_for_cmd( struct se_cmd *se_cmd, unsigned char *cdb, u32 unpacked_lun) { struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = SE_SESS(se_cmd); unsigned long flags; int read_only = 0; spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); deve = se_cmd->se_deve = &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (se_cmd) { deve->total_cmds++; deve->total_bytes += se_cmd->data_length; if (se_cmd->data_direction == DMA_TO_DEVICE) { if (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { read_only = 1; goto out; } deve->write_bytes += se_cmd->data_length; } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { deve->read_bytes += se_cmd->data_length; } } deve->deve_cmds++; se_lun = se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } out: spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); if (!se_lun) { if (read_only) { se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" " Access for 0x%08x\n", CMD_TFO(se_cmd)->get_fabric_name(), unpacked_lun); return -1; } else { /* * Use the se_portal_group->tpg_virt_lun0 to allow for * REPORT_LUNS, et al to be returned when no active * MappedLUN=0 exists for this Initiator Port. */ if (unpacked_lun != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", CMD_TFO(se_cmd)->get_fabric_name(), unpacked_lun); return -1; } /* * Force WRITE PROTECT for virtual LUN 0 */ if ((se_cmd->data_direction != DMA_FROM_DEVICE) && (se_cmd->data_direction != DMA_NONE)) { se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -1; } #if 0 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", CMD_TFO(se_cmd)->get_fabric_name()); #endif se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } } /* * Determine if the struct se_lun is online. */ /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -1; } { struct se_device *dev = se_lun->lun_se_dev; spin_lock_irq(&dev->stats_lock); dev->num_cmds++; if (se_cmd->data_direction == DMA_TO_DEVICE) dev->write_bytes += se_cmd->data_length; else if (se_cmd->data_direction == DMA_FROM_DEVICE) dev->read_bytes += se_cmd->data_length; spin_unlock_irq(&dev->stats_lock); } /* * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used * for tracking state of struct se_cmds during LUN shutdown events. */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); #if 0 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); #endif spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); return 0; } EXPORT_SYMBOL(transport_get_lun_for_cmd); int transport_get_lun_for_tmr( struct se_cmd *se_cmd, u32 unpacked_lun) { struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = SE_SESS(se_cmd); struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); deve = se_cmd->se_deve = &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; dev = se_lun->lun_se_dev; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ } spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); if (!se_lun) { printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", CMD_TFO(se_cmd)->get_fabric_name(), unpacked_lun); se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -1; } /* * Determine if the struct se_lun is online. */ /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -1; } se_tmr->tmr_dev = dev; spin_lock(&dev->se_tmr_lock); list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); spin_unlock(&dev->se_tmr_lock); return 0; } EXPORT_SYMBOL(transport_get_lun_for_tmr); /* * This function is called from core_scsi3_emulate_pro_register_and_move() * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count * when a matching rtpi is found. */ struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *nacl, u16 rtpi) { struct se_dev_entry *deve; struct se_lun *lun; struct se_port *port; struct se_portal_group *tpg = nacl->se_tpg; u32 i; spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &nacl->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; lun = deve->se_lun; if (!(lun)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", TPG_TFO(tpg)->get_fabric_name()); continue; } port = lun->lun_sep; if (!(port)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", TPG_TFO(tpg)->get_fabric_name()); continue; } if (port->sep_rtpi != rtpi) continue; atomic_inc(&deve->pr_ref_count); smp_mb__after_atomic_inc(); spin_unlock_irq(&nacl->device_list_lock); return deve; } spin_unlock_irq(&nacl->device_list_lock); return NULL; } int core_free_device_list_for_node( struct se_node_acl *nacl, struct se_portal_group *tpg) { struct se_dev_entry *deve; struct se_lun *lun; u32 i; if (!nacl->device_list) return 0; spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &nacl->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; if (!deve->se_lun) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", TPG_TFO(tpg)->get_fabric_name()); continue; } lun = deve->se_lun; spin_unlock_irq(&nacl->device_list_lock); core_update_device_list_for_node(lun, NULL, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); spin_lock_irq(&nacl->device_list_lock); } spin_unlock_irq(&nacl->device_list_lock); kfree(nacl->device_list); nacl->device_list = NULL; return 0; } void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) { struct se_dev_entry *deve; spin_lock_irq(&se_nacl->device_list_lock); deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; spin_unlock_irq(&se_nacl->device_list_lock); return; } void core_update_device_list_access( u32 mapped_lun, u32 lun_access, struct se_node_acl *nacl) { struct se_dev_entry *deve; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[mapped_lun]; if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; } else { deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } spin_unlock_irq(&nacl->device_list_lock); return; } /* core_update_device_list_for_node(): * * */ int core_update_device_list_for_node( struct se_lun *lun, struct se_lun_acl *lun_acl, u32 mapped_lun, u32 lun_access, struct se_node_acl *nacl, struct se_portal_group *tpg, int enable) { struct se_port *port = lun->lun_sep; struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; int trans = 0; /* * If the MappedLUN entry is being disabled, the entry in * port->sep_alua_list must be removed now before clearing the * struct se_dev_entry pointers below as logic in * core_alua_do_transition_tg_pt() depends on these being present. */ if (!(enable)) { /* * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explicitly concerted to MappedLUNs -> * struct se_lun_acl, but we remove deve->alua_port_list from * port->sep_alua_list. This also means that active UAs and * NodeACL context specific PR metadata for demo-mode * MappedLUN *deve will be released below.. */ spin_lock_bh(&port->sep_alua_lock); list_del(&deve->alua_port_list); spin_unlock_bh(&port->sep_alua_lock); } spin_lock_irq(&nacl->device_list_lock); if (enable) { /* * Check if the call is handling demo mode -> explict LUN ACL * transition. This transition must be for the same struct se_lun * + mapped_lun that was setup in demo mode.. */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { printk(KERN_ERR "struct se_dev_entry->se_lun_acl" " already set for demo mode -> explict" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -1; } if (deve->se_lun != lun) { printk(KERN_ERR "struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -1; } deve->se_lun_acl = lun_acl; trans = 1; } else { deve->se_lun = lun; deve->se_lun_acl = lun_acl; deve->mapped_lun = mapped_lun; deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; } if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; } else { deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } if (trans) { spin_unlock_irq(&nacl->device_list_lock); return 0; } deve->creation_time = get_jiffies_64(); deve->attach_count++; spin_unlock_irq(&nacl->device_list_lock); spin_lock_bh(&port->sep_alua_lock); list_add_tail(&deve->alua_port_list, &port->sep_alua_list); spin_unlock_bh(&port->sep_alua_lock); return 0; } /* * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE * PR operation to complete. */ spin_unlock_irq(&nacl->device_list_lock); while (atomic_read(&deve->pr_ref_count) != 0) cpu_relax(); spin_lock_irq(&nacl->device_list_lock); /* * Disable struct se_dev_entry LUN ACL mapping */ core_scsi3_ua_release_all(deve); deve->se_lun = NULL; deve->se_lun_acl = NULL; deve->lun_flags = 0; deve->creation_time = 0; deve->attach_count--; spin_unlock_irq(&nacl->device_list_lock); core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); return 0; } /* core_clear_lun_from_tpg(): * * */ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) { struct se_node_acl *nacl; struct se_dev_entry *deve; u32 i; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { spin_unlock_bh(&tpg->acl_node_lock); spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &nacl->device_list[i]; if (lun != deve->se_lun) continue; spin_unlock_irq(&nacl->device_list_lock); core_update_device_list_for_node(lun, NULL, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); spin_lock_irq(&nacl->device_list_lock); } spin_unlock_irq(&nacl->device_list_lock); spin_lock_bh(&tpg->acl_node_lock); } spin_unlock_bh(&tpg->acl_node_lock); return; } static struct se_port *core_alloc_port(struct se_device *dev) { struct se_port *port, *port_tmp; port = kzalloc(sizeof(struct se_port), GFP_KERNEL); if (!(port)) { printk(KERN_ERR "Unable to allocate struct se_port\n"); return NULL; } INIT_LIST_HEAD(&port->sep_alua_list); INIT_LIST_HEAD(&port->sep_list); atomic_set(&port->sep_tg_pt_secondary_offline, 0); spin_lock_init(&port->sep_alua_lock); mutex_init(&port->sep_tg_pt_md_mutex); spin_lock(&dev->se_port_lock); if (dev->dev_port_count == 0x0000ffff) { printk(KERN_WARNING "Reached dev->dev_port_count ==" " 0x0000ffff\n"); spin_unlock(&dev->se_port_lock); return NULL; } again: /* * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device * Here is the table from spc4r17 section 7.7.3.8. * * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field * * Code Description * 0h Reserved * 1h Relative port 1, historically known as port A * 2h Relative port 2, historically known as port B * 3h to FFFFh Relative port 3 through 65 535 */ port->sep_rtpi = dev->dev_rpti_counter++; if (!(port->sep_rtpi)) goto again; list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { /* * Make sure RELATIVE TARGET PORT IDENTIFER is unique * for 16-bit wrap.. */ if (port->sep_rtpi == port_tmp->sep_rtpi) goto again; } spin_unlock(&dev->se_port_lock); return port; } static void core_export_port( struct se_device *dev, struct se_portal_group *tpg, struct se_port *port, struct se_lun *lun) { struct se_subsystem_dev *su_dev = SU_DEV(dev); struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; spin_lock(&dev->se_port_lock); spin_lock(&lun->lun_sep_lock); port->sep_tpg = tpg; port->sep_lun = lun; lun->lun_sep = port; spin_unlock(&lun->lun_sep_lock); list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" "_gp_member_t\n"); return; } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, T10_ALUA(su_dev)->default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" " Group: alua/default_tg_pt_gp\n", TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); } dev->dev_port_count++; port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ } /* * Called with struct se_device->se_port_lock spinlock held. */ static void core_release_port(struct se_device *dev, struct se_port *port) __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) { /* * Wait for any port reference for PR ALL_TG_PT=1 operation * to complete in __core_scsi3_alloc_registration() */ spin_unlock(&dev->se_port_lock); if (atomic_read(&port->sep_tg_pt_ref_cnt)) cpu_relax(); spin_lock(&dev->se_port_lock); core_alua_free_tg_pt_gp_mem(port); list_del(&port->sep_list); dev->dev_port_count--; kfree(port); return; } int core_dev_export( struct se_device *dev, struct se_portal_group *tpg, struct se_lun *lun) { struct se_port *port; port = core_alloc_port(dev); if (!(port)) return -1; lun->lun_se_dev = dev; se_dev_start(dev); atomic_inc(&dev->dev_export_obj.obj_access_count); core_export_port(dev, tpg, port, lun); return 0; } void core_dev_unexport( struct se_device *dev, struct se_portal_group *tpg, struct se_lun *lun) { struct se_port *port = lun->lun_sep; spin_lock(&lun->lun_sep_lock); if (lun->lun_se_dev == NULL) { spin_unlock(&lun->lun_sep_lock); return; } spin_unlock(&lun->lun_sep_lock); spin_lock(&dev->se_port_lock); atomic_dec(&dev->dev_export_obj.obj_access_count); core_release_port(dev, port); spin_unlock(&dev->se_port_lock); se_dev_stop(dev); lun->lun_se_dev = NULL; } int transport_core_report_lun_response(struct se_cmd *se_cmd) { struct se_dev_entry *deve; struct se_lun *se_lun; struct se_session *se_sess = SE_SESS(se_cmd); struct se_task *se_task; unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) break; if (!(se_task)) { printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } /* * If no struct se_session pointer is present, this struct se_cmd is * coming via a target_core_mod PASSTHROUGH op, and not through * a $FABRIC_MOD. In that case, report LUN=0 only. */ if (!(se_sess)) { int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); lun_count = 1; goto done; } spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &SE_NODE_ACL(se_sess)->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; se_lun = deve->se_lun; /* * We determine the correct LUN LIST LENGTH even once we * have reached the initial allocation length. * See SPC2-R20 7.19. */ lun_count++; if ((cdb_offset + 8) >= se_cmd->data_length) continue; int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); offset += 8; cdb_offset += 8; } spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); /* * See SPC3 r07, page 159. */ done: lun_count *= 8; buf[0] = ((lun_count >> 24) & 0xff); buf[1] = ((lun_count >> 16) & 0xff); buf[2] = ((lun_count >> 8) & 0xff); buf[3] = (lun_count & 0xff); return PYX_TRANSPORT_SENT_TO_TRANSPORT; } /* se_release_device_for_hba(): * * */ void se_release_device_for_hba(struct se_device *dev) { struct se_hba *hba = dev->se_hba; if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) se_dev_stop(dev); if (dev->dev_ptr) { kthread_stop(dev->process_thread); if (dev->transport->free_device) dev->transport->free_device(dev->dev_ptr); } spin_lock(&hba->device_lock); list_del(&dev->dev_list); hba->dev_count--; spin_unlock(&hba->device_lock); core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); kfree(dev->dev_status_queue_obj); kfree(dev->dev_queue_obj); kfree(dev); return; } void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); list_for_each_entry_safe(vpd, vpd_tmp, &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { list_del(&vpd->vpd_list); kfree(vpd); } spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); return; } /* se_free_virtual_device(): * * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. */ int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) { if (!list_empty(&dev->dev_sep_list)) dump_stack(); core_alua_free_lu_gp_mem(dev); se_release_device_for_hba(dev); return 0; } static void se_dev_start(struct se_device *dev) { struct se_hba *hba = dev->se_hba; spin_lock(&hba->device_lock); atomic_inc(&dev->dev_obj.obj_access_count); if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; } else if (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; } } spin_unlock(&hba->device_lock); } static void se_dev_stop(struct se_device *dev) { struct se_hba *hba = dev->se_hba; spin_lock(&hba->device_lock); atomic_dec(&dev->dev_obj.obj_access_count); if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; } else if (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; } } spin_unlock(&hba->device_lock); } int se_dev_check_online(struct se_device *dev) { int ret; spin_lock_irq(&dev->dev_status_lock); ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; spin_unlock_irq(&dev->dev_status_lock); return ret; } int se_dev_check_shutdown(struct se_device *dev) { int ret; spin_lock_irq(&dev->dev_status_lock); ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); spin_unlock_irq(&dev->dev_status_lock); return ret; } void se_dev_set_default_attribs( struct se_device *dev, struct se_dev_limits *dev_limits) { struct queue_limits *limits = &dev_limits->limits; DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values * if blk_queue_discard()==1 */ DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; DEV_ATTRIB(dev)->max_unmap_block_desc_count = DA_MAX_UNMAP_BLOCK_DESC_COUNT; DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; DEV_ATTRIB(dev)->unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; /* * block_size is based on subsystem plugin dependent requirements. */ DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; DEV_ATTRIB(dev)->block_size = limits->logical_block_size; /* * max_sectors is based on subsystem plugin dependent requirements. */ DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via * configfs. */ DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; /* * queue_depth is based on subsystem plugin dependent requirements. */ DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; } int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) { if (task_timeout > DA_TASK_TIMEOUT_MAX) { printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); return -1; } else { DEV_ATTRIB(dev)->task_timeout = task_timeout; printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", dev, task_timeout); } return 0; } int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) { DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", dev, DEV_ATTRIB(dev)->max_unmap_lba_count); return 0; } int se_dev_set_max_unmap_block_desc_count( struct se_device *dev, u32 max_unmap_block_desc_count) { DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); return 0; } int se_dev_set_unmap_granularity( struct se_device *dev, u32 unmap_granularity) { DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", dev, DEV_ATTRIB(dev)->unmap_granularity); return 0; } int se_dev_set_unmap_granularity_alignment( struct se_device *dev, u32 unmap_granularity_alignment) { DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); return 0; } int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (TRANSPORT(dev)->dpo_emulated == NULL) { printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); return -1; } if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); return -1; } DEV_ATTRIB(dev)->emulate_dpo = flag; printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); return 0; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (TRANSPORT(dev)->fua_write_emulated == NULL) { printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); return -1; } if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); return -1; } DEV_ATTRIB(dev)->emulate_fua_write = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", dev, DEV_ATTRIB(dev)->emulate_fua_write); return 0; } int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (TRANSPORT(dev)->fua_read_emulated == NULL) { printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); return -1; } if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); return -1; } DEV_ATTRIB(dev)->emulate_fua_read = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", dev, DEV_ATTRIB(dev)->emulate_fua_read); return 0; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (TRANSPORT(dev)->write_cache_emulated == NULL) { printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); return -1; } if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); return -1; } DEV_ATTRIB(dev)->emulate_write_cache = flag; printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", dev, DEV_ATTRIB(dev)->emulate_write_cache); return 0; } int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1) && (flag != 2)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device" " UA_INTRLCK_CTRL while dev_export_obj: %d count" " exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -1; } DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); return 0; } int se_dev_set_emulate_tas(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -1; } DEV_ATTRIB(dev)->emulate_tas = flag; printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); return 0; } int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } DEV_ATTRIB(dev)->emulate_tpu = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", dev, flag); return 0; } int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } DEV_ATTRIB(dev)->emulate_tpws = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", dev, flag); return 0; } int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); return -1; } DEV_ATTRIB(dev)->enforce_pr_isids = flag; printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); return 0; } /* * Note, this can only be called on unexported SE Device Object. */ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) { u32 orig_queue_depth = dev->queue_depth; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -1; } if (!(queue_depth)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" "_depth\n", dev); return -1; } if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" " exceeds TCM/SE_Device TCQ: %u\n", dev, queue_depth, DEV_ATTRIB(dev)->hw_queue_depth); return -1; } } else { if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth:" " %u exceeds TCM/SE_Device MAX" " TCQ: %u\n", dev, queue_depth, DEV_ATTRIB(dev)->hw_queue_depth); return -1; } } } DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; if (queue_depth > orig_queue_depth) atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); else if (queue_depth < orig_queue_depth) atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); return 0; } int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) { int force = 0; /* Force setting for VDEVS */ if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device" " max_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -1; } if (!(max_sectors)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for" " max_sectors\n", dev); return -1; } if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MIN); return -1; } if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors:" " %u\n", dev, max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); return -1; } } else { if (!(force) && (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors)) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors" ": %u, use force=1 to override.\n", dev, max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); return -1; } if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:" " %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MAX); return -1; } } DEV_ATTRIB(dev)->max_sectors = max_sectors; printk("dev[%p]: SE Device max_sectors changed to %u\n", dev, max_sectors); return 0; } int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device" " optimal_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" " changed for TCM/pSCSI\n", dev); return -EINVAL; } if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" " greater than max_sectors: %u\n", dev, optimal_sectors, DEV_ATTRIB(dev)->max_sectors); return -EINVAL; } DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", dev, optimal_sectors); return 0; } int se_dev_set_block_size(struct se_device *dev, u32 block_size) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" " while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -1; } if ((block_size != 512) && (block_size != 1024) && (block_size != 2048) && (block_size != 4096)) { printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n", dev, block_size); return -1; } if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" " Physical Device, use for Linux/SCSI to change" " block_size for underlying hardware\n", dev); return -1; } DEV_ATTRIB(dev)->block_size = block_size; printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", dev, block_size); return 0; } struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, struct se_hba *hba, struct se_device *dev, u32 lun) { struct se_lun *lun_p; u32 lun_access = 0; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", atomic_read(&dev->dev_access_obj.obj_access_count)); return NULL; } lun_p = core_tpg_pre_addlun(tpg, lun); if ((IS_ERR(lun_p)) || !(lun_p)) return NULL; if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) return NULL; printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); /* * Update LUN maps for dynamically added initiators when * generate_node_acl is enabled. */ if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { if (acl->dynamic_node_acl) { spin_unlock_bh(&tpg->acl_node_lock); core_tpg_add_node_to_devs(acl, tpg); spin_lock_bh(&tpg->acl_node_lock); } } spin_unlock_bh(&tpg->acl_node_lock); } return lun_p; } /* core_dev_del_lun(): * * */ int core_dev_del_lun( struct se_portal_group *tpg, u32 unpacked_lun) { struct se_lun *lun; int ret = 0; lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); if (!(lun)) return ret; core_tpg_post_dellun(tpg, lun); printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" " device object\n", TPG_TFO(tpg)->get_fabric_name(), TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, TPG_TFO(tpg)->get_fabric_name()); return 0; } struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) { struct se_lun *lun; spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" "_PER_TPG-1: %u for Target Portal Group: %hu\n", TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, TPG_TFO(tpg)->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { printk(KERN_ERR "%s Logical Unit Number: %u is not free on" " Target Portal Group: %hu, ignoring request.\n", TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, TPG_TFO(tpg)->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } spin_unlock(&tpg->tpg_lun_lock); return lun; } /* core_dev_get_lun(): * * */ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) { struct se_lun *lun; spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" "_TPG-1: %u for Target Portal Group: %hu\n", TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, TPG_TFO(tpg)->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, TPG_TFO(tpg)->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } spin_unlock(&tpg->tpg_lun_lock); return lun; } struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_portal_group *tpg, u32 mapped_lun, char *initiatorname, int *ret) { struct se_lun_acl *lacl; struct se_node_acl *nacl; if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", TPG_TFO(tpg)->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); if (!(nacl)) { *ret = -EINVAL; return NULL; } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); if (!(lacl)) { printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); *ret = -ENOMEM; return NULL; } INIT_LIST_HEAD(&lacl->lacl_list); lacl->mapped_lun = mapped_lun; lacl->se_lun_nacl = nacl; snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); return lacl; } int core_dev_add_initiator_node_lun_acl( struct se_portal_group *tpg, struct se_lun_acl *lacl, u32 unpacked_lun, u32 lun_access) { struct se_lun *lun; struct se_node_acl *nacl; lun = core_dev_get_lun(tpg, unpacked_lun); if (!(lun)) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, TPG_TFO(tpg)->tpg_get_tag(tpg)); return -EINVAL; } nacl = lacl->se_lun_nacl; if (!(nacl)) return -EINVAL; if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; lacl->se_lun = lun; if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, lun_access, nacl, tpg, 1) < 0) return -EINVAL; spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); atomic_inc(&lun->lun_acl_count); smp_mb__after_atomic_inc(); spin_unlock(&lun->lun_acl_lock); printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", lacl->initiatorname); /* * Check to see if there are any existing persistent reservation APTPL * pre-registrations that need to be enabled for this LUN ACL.. */ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); return 0; } /* core_dev_del_initiator_node_lun_acl(): * * */ int core_dev_del_initiator_node_lun_acl( struct se_portal_group *tpg, struct se_lun *lun, struct se_lun_acl *lacl) { struct se_node_acl *nacl; nacl = lacl->se_lun_nacl; if (!(nacl)) return -EINVAL; spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); atomic_dec(&lun->lun_acl_count); smp_mb__after_atomic_dec(); spin_unlock(&lun->lun_acl_lock); core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); lacl->se_lun = NULL; printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" " InitiatorNode: %s Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, lacl->initiatorname, lacl->mapped_lun); return 0; } void core_dev_free_initiator_node_lun_acl( struct se_portal_group *tpg, struct se_lun_acl *lacl) { printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), TPG_TFO(tpg)->tpg_get_tag(tpg), TPG_TFO(tpg)->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun); kfree(lacl); } int core_dev_setup_virtual_lun0(void) { struct se_hba *hba; struct se_device *dev; struct se_subsystem_dev *se_dev = NULL; struct se_subsystem_api *t; char buf[16]; int ret; hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); if (IS_ERR(hba)) return PTR_ERR(hba); se_global->g_lun0_hba = hba; t = hba->transport; se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); if (!(se_dev)) { printk(KERN_ERR "Unable to allocate memory for" " struct se_subsystem_dev\n"); ret = -ENOMEM; goto out; } INIT_LIST_HEAD(&se_dev->g_se_dev_list); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); spin_lock_init(&se_dev->t10_reservation.registration_lock); spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; se_dev->se_dev_hba = hba; se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); if (!(se_dev->se_dev_su_ptr)) { printk(KERN_ERR "Unable to locate subsystem dependent pointer" " from allocate_virtdevice()\n"); ret = -ENOMEM; goto out; } se_global->g_lun0_su_dev = se_dev; memset(buf, 0, 16); sprintf(buf, "rd_pages=8"); t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); if (!(dev) || IS_ERR(dev)) { ret = -ENOMEM; goto out; } se_dev->se_dev_ptr = dev; se_global->g_lun0_dev = dev; return 0; out: se_global->g_lun0_su_dev = NULL; kfree(se_dev); if (se_global->g_lun0_hba) { core_delete_hba(se_global->g_lun0_hba); se_global->g_lun0_hba = NULL; } return ret; } void core_dev_release_virtual_lun0(void) { struct se_hba *hba = se_global->g_lun0_hba; struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; if (!(hba)) return; if (se_global->g_lun0_dev) se_free_virtual_device(se_global->g_lun0_dev, hba); kfree(su_dev); core_delete_hba(hba); }
gpl-2.0
javifo/android_kernel_samsung_smdk4412
drivers/net/enc28j60.c
2636
45529
/* * Microchip ENC28J60 ethernet driver (MAC + PHY) * * Copyright (C) 2007 Eurek srl * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> * based on enc28j60.c written by David Anders for 2.4 kernel version * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $ */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include "enc28j60_hw.h" #define DRV_NAME "enc28j60" #define DRV_VERSION "1.01" #define SPI_OPLEN 1 #define ENC28J60_MSG_DEFAULT \ (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK) /* Buffer size required for the largest SPI transfer (i.e., reading a * frame). */ #define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN) #define TX_TIMEOUT (4 * HZ) /* Max TX retries in case of collision as suggested by errata datasheet */ #define MAX_TX_RETRYCOUNT 16 enum { RXFILTER_NORMAL, RXFILTER_MULTI, RXFILTER_PROMISC }; /* Driver local data */ struct enc28j60_net { struct net_device *netdev; struct spi_device *spi; struct mutex lock; struct sk_buff *tx_skb; struct work_struct tx_work; struct work_struct irq_work; struct work_struct setrx_work; struct work_struct restart_work; u8 bank; /* current register bank selected */ u16 next_pk_ptr; /* next packet pointer within FIFO */ u16 max_pk_counter; /* statistics: max packet counter */ u16 tx_retry_count; bool hw_enable; bool full_duplex; int rxfilter; u32 msg_enable; u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN]; }; /* use ethtool to change the level for any given device */ static struct { u32 msg_enable; } debug = { -1 }; /* * SPI read buffer * wait for the SPI transfer and copy received data to destination */ static int spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) { u8 *rx_buf = priv->spi_transfer_buf + 4; u8 *tx_buf = priv->spi_transfer_buf; struct spi_transfer t = { .tx_buf = tx_buf, .rx_buf = rx_buf, .len = SPI_OPLEN + len, }; struct spi_message msg; int ret; tx_buf[0] = ENC28J60_READ_BUF_MEM; tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */ spi_message_init(&msg); spi_message_add_tail(&t, &msg); ret = spi_sync(priv->spi, &msg); if (ret == 0) { memcpy(data, &rx_buf[SPI_OPLEN], len); ret = msg.status; } if (ret && netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", __func__, ret); return ret; } /* * SPI write buffer */ static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data) { int ret; if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0) ret = -EINVAL; else { priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM; memcpy(&priv->spi_transfer_buf[1], data, len); ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); if (ret && netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", __func__, ret); } return ret; } /* * basic SPI read operation */ static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr) { u8 tx_buf[2]; u8 rx_buf[4]; u8 val = 0; int ret; int slen = SPI_OPLEN; /* do dummy read if needed */ if (addr & SPRD_MASK) slen++; tx_buf[0] = op | (addr & ADDR_MASK); ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); if (ret) printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", __func__, ret); else val = rx_buf[slen - 1]; return val; } /* * basic SPI write operation */ static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val) { int ret; priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK); priv->spi_transfer_buf[1] = val; ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); if (ret && netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", __func__, ret); return ret; } static void enc28j60_soft_reset(struct enc28j60_net *priv) { if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); /* Errata workaround #1, CLKRDY check is unreliable, * delay at least 1 mS instead */ udelay(2000); } /* * select the current register bank if necessary */ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) { u8 b = (addr & BANK_MASK) >> 5; /* These registers (EIE, EIR, ESTAT, ECON2, ECON1) * are present in all banks, no need to switch bank */ if (addr >= EIE && addr <= ECON1) return; /* Clear or set each bank selection bit as needed */ if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) { if (b & ECON1_BSEL0) spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, ECON1_BSEL0); else spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, ECON1_BSEL0); } if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) { if (b & ECON1_BSEL1) spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, ECON1_BSEL1); else spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, ECON1_BSEL1); } priv->bank = b; } /* * Register access routines through the SPI bus. * Every register access comes in two flavours: * - nolock_xxx: caller needs to invoke mutex_lock, usually to access * atomically more than one register * - locked_xxx: caller doesn't need to invoke mutex_lock, single access * * Some registers can be accessed through the bit field clear and * bit field set to avoid a read modify write cycle. */ /* * Register bit field Set */ static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) { enc28j60_set_bank(priv, addr); spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask); } static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) { mutex_lock(&priv->lock); nolock_reg_bfset(priv, addr, mask); mutex_unlock(&priv->lock); } /* * Register bit field Clear */ static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) { enc28j60_set_bank(priv, addr); spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask); } static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) { mutex_lock(&priv->lock); nolock_reg_bfclr(priv, addr, mask); mutex_unlock(&priv->lock); } /* * Register byte read */ static int nolock_regb_read(struct enc28j60_net *priv, u8 address) { enc28j60_set_bank(priv, address); return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); } static int locked_regb_read(struct enc28j60_net *priv, u8 address) { int ret; mutex_lock(&priv->lock); ret = nolock_regb_read(priv, address); mutex_unlock(&priv->lock); return ret; } /* * Register word read */ static int nolock_regw_read(struct enc28j60_net *priv, u8 address) { int rl, rh; enc28j60_set_bank(priv, address); rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1); return (rh << 8) | rl; } static int locked_regw_read(struct enc28j60_net *priv, u8 address) { int ret; mutex_lock(&priv->lock); ret = nolock_regw_read(priv, address); mutex_unlock(&priv->lock); return ret; } /* * Register byte write */ static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data) { enc28j60_set_bank(priv, address); spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data); } static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data) { mutex_lock(&priv->lock); nolock_regb_write(priv, address, data); mutex_unlock(&priv->lock); } /* * Register word write */ static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data) { enc28j60_set_bank(priv, address); spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data); spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1, (u8) (data >> 8)); } static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data) { mutex_lock(&priv->lock); nolock_regw_write(priv, address, data); mutex_unlock(&priv->lock); } /* * Buffer memory read * Select the starting address and execute a SPI buffer read */ static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len, u8 *data) { mutex_lock(&priv->lock); nolock_regw_write(priv, ERDPTL, addr); #ifdef CONFIG_ENC28J60_WRITEVERIFY if (netif_msg_drv(priv)) { u16 reg; reg = nolock_regw_read(priv, ERDPTL); if (reg != addr) printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " "(0x%04x - 0x%04x)\n", __func__, reg, addr); } #endif spi_read_buf(priv, len, data); mutex_unlock(&priv->lock); } /* * Write packet to enc28j60 TX buffer memory */ static void enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) { mutex_lock(&priv->lock); /* Set the write pointer to start of transmit buffer area */ nolock_regw_write(priv, EWRPTL, TXSTART_INIT); #ifdef CONFIG_ENC28J60_WRITEVERIFY if (netif_msg_drv(priv)) { u16 reg; reg = nolock_regw_read(priv, EWRPTL); if (reg != TXSTART_INIT) printk(KERN_DEBUG DRV_NAME ": %s() ERWPT:0x%04x != 0x%04x\n", __func__, reg, TXSTART_INIT); } #endif /* Set the TXND pointer to correspond to the packet size given */ nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len); /* write per-packet control byte */ spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00); if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() after control byte ERWPT:0x%04x\n", __func__, nolock_regw_read(priv, EWRPTL)); /* copy the packet into the transmit buffer */ spi_write_buf(priv, len, data); if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() after write packet ERWPT:0x%04x, len=%d\n", __func__, nolock_regw_read(priv, EWRPTL), len); mutex_unlock(&priv->lock); } static unsigned long msec20_to_jiffies; static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) { unsigned long timeout = jiffies + msec20_to_jiffies; /* 20 msec timeout read */ while ((nolock_regb_read(priv, reg) & mask) != val) { if (time_after(jiffies, timeout)) { if (netif_msg_drv(priv)) dev_dbg(&priv->spi->dev, "reg %02x ready timeout!\n", reg); return -ETIMEDOUT; } cpu_relax(); } return 0; } /* * Wait until the PHY operation is complete. */ static int wait_phy_ready(struct enc28j60_net *priv) { return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; } /* * PHY register read * PHY registers are not accessed directly, but through the MII */ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) { u16 ret; mutex_lock(&priv->lock); /* set the PHY register address */ nolock_regb_write(priv, MIREGADR, address); /* start the register read operation */ nolock_regb_write(priv, MICMD, MICMD_MIIRD); /* wait until the PHY read completes */ wait_phy_ready(priv); /* quit reading */ nolock_regb_write(priv, MICMD, 0x00); /* return the data */ ret = nolock_regw_read(priv, MIRDL); mutex_unlock(&priv->lock); return ret; } static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data) { int ret; mutex_lock(&priv->lock); /* set the PHY register address */ nolock_regb_write(priv, MIREGADR, address); /* write the PHY data */ nolock_regw_write(priv, MIWRL, data); /* wait until the PHY write completes and return */ ret = wait_phy_ready(priv); mutex_unlock(&priv->lock); return ret; } /* * Program the hardware MAC address from dev->dev_addr. */ static int enc28j60_set_hw_macaddr(struct net_device *ndev) { int ret; struct enc28j60_net *priv = netdev_priv(ndev); mutex_lock(&priv->lock); if (!priv->hw_enable) { if (netif_msg_drv(priv)) printk(KERN_INFO DRV_NAME ": %s: Setting MAC address to %pM\n", ndev->name, ndev->dev_addr); /* NOTE: MAC address in ENC28J60 is byte-backward */ nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]); nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]); nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]); nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]); ret = 0; } else { if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() Hardware must be disabled to set " "Mac address\n", __func__); ret = -EBUSY; } mutex_unlock(&priv->lock); return ret; } /* * Store the new hardware address in dev->dev_addr, and update the MAC. */ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *address = addr; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, address->sa_data, dev->addr_len); return enc28j60_set_hw_macaddr(dev); } /* * Debug routine to dump useful register contents */ static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg) { mutex_lock(&priv->lock); printk(KERN_DEBUG DRV_NAME " %s\n" "HwRevID: 0x%02x\n" "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" "MAC : MACON1 MACON3 MACON4\n" " 0x%02x 0x%02x 0x%02x\n" "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" " 0x%04x 0x%04x 0x%04x 0x%04x " "0x%02x 0x%02x 0x%04x\n" "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", msg, nolock_regb_read(priv, EREVID), nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), nolock_regw_read(priv, ERXWRPTL), nolock_regw_read(priv, ERXRDPTL), nolock_regb_read(priv, ERXFCON), nolock_regb_read(priv, EPKTCNT), nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), nolock_regw_read(priv, ETXNDL), nolock_regb_read(priv, MACLCON1), nolock_regb_read(priv, MACLCON2), nolock_regb_read(priv, MAPHSUP)); mutex_unlock(&priv->lock); } /* * ERXRDPT need to be set always at odd addresses, refer to errata datasheet */ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) { u16 erxrdpt; if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end)) erxrdpt = end; else erxrdpt = next_packet_ptr - 1; return erxrdpt; } /* * Calculate wrap around when reading beyond the end of the RX buffer */ static u16 rx_packet_start(u16 ptr) { if (ptr + RSV_SIZE > RXEND_INIT) return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); else return ptr + RSV_SIZE; } static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) { u16 erxrdpt; if (start > 0x1FFF || end > 0x1FFF || start > end) { if (netif_msg_drv(priv)) printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " "bad parameters!\n", __func__, start, end); return; } /* set receive buffer start + end */ priv->next_pk_ptr = start; nolock_regw_write(priv, ERXSTL, start); erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end); nolock_regw_write(priv, ERXRDPTL, erxrdpt); nolock_regw_write(priv, ERXNDL, end); } static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) { if (start > 0x1FFF || end > 0x1FFF || start > end) { if (netif_msg_drv(priv)) printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " "bad parameters!\n", __func__, start, end); return; } /* set transmit buffer start + end */ nolock_regw_write(priv, ETXSTL, start); nolock_regw_write(priv, ETXNDL, end); } /* * Low power mode shrinks power consumption about 100x, so we'd like * the chip to be in that mode whenever it's inactive. (However, we * can't stay in lowpower mode during suspend with WOL active.) */ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) { if (netif_msg_drv(priv)) dev_dbg(&priv->spi->dev, "%s power...\n", is_low ? "low" : "high"); mutex_lock(&priv->lock); if (is_low) { nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); poll_ready(priv, ECON1, ECON1_TXRTS, 0); /* ECON2_VRPS was set during initialization */ nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); } else { nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); /* caller sets ECON1_RXEN */ } mutex_unlock(&priv->lock); } static int enc28j60_hw_init(struct enc28j60_net *priv) { u8 reg; if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__, priv->full_duplex ? "FullDuplex" : "HalfDuplex"); mutex_lock(&priv->lock); /* first reset the chip */ enc28j60_soft_reset(priv); /* Clear ECON1 */ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00); priv->bank = 0; priv->hw_enable = false; priv->tx_retry_count = 0; priv->max_pk_counter = 0; priv->rxfilter = RXFILTER_NORMAL; /* enable address auto increment and voltage regulator powersave */ nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); mutex_unlock(&priv->lock); /* * Check the RevID. * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or * damaged */ reg = locked_regb_read(priv, EREVID); if (netif_msg_drv(priv)) printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg); if (reg == 0x00 || reg == 0xff) { if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", __func__, reg); return 0; } /* default filter mode: (unicast OR broadcast) AND crc valid */ locked_regb_write(priv, ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); /* enable MAC receive */ locked_regb_write(priv, MACON1, MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS); /* enable automatic padding and CRC operations */ if (priv->full_duplex) { locked_regb_write(priv, MACON3, MACON3_PADCFG0 | MACON3_TXCRCEN | MACON3_FRMLNEN | MACON3_FULDPX); /* set inter-frame gap (non-back-to-back) */ locked_regb_write(priv, MAIPGL, 0x12); /* set inter-frame gap (back-to-back) */ locked_regb_write(priv, MABBIPG, 0x15); } else { locked_regb_write(priv, MACON3, MACON3_PADCFG0 | MACON3_TXCRCEN | MACON3_FRMLNEN); locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */ /* set inter-frame gap (non-back-to-back) */ locked_regw_write(priv, MAIPGL, 0x0C12); /* set inter-frame gap (back-to-back) */ locked_regb_write(priv, MABBIPG, 0x12); } /* * MACLCON1 (default) * MACLCON2 (default) * Set the maximum packet size which the controller will accept */ locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN); /* Configure LEDs */ if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE)) return 0; if (priv->full_duplex) { if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD)) return 0; if (!enc28j60_phy_write(priv, PHCON2, 0x00)) return 0; } else { if (!enc28j60_phy_write(priv, PHCON1, 0x00)) return 0; if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS)) return 0; } if (netif_msg_hw(priv)) enc28j60_dump_regs(priv, "Hw initialized."); return 1; } static void enc28j60_hw_enable(struct enc28j60_net *priv) { /* enable interrupts */ if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", __func__); enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); mutex_lock(&priv->lock); nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF | EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF); nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE | EIE_TXIE | EIE_TXERIE | EIE_RXERIE); /* enable receive logic */ nolock_reg_bfset(priv, ECON1, ECON1_RXEN); priv->hw_enable = true; mutex_unlock(&priv->lock); } static void enc28j60_hw_disable(struct enc28j60_net *priv) { mutex_lock(&priv->lock); /* disable interrutps and packet reception */ nolock_regb_write(priv, EIE, 0x00); nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); priv->hw_enable = false; mutex_unlock(&priv->lock); } static int enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) { struct enc28j60_net *priv = netdev_priv(ndev); int ret = 0; if (!priv->hw_enable) { /* link is in low power mode now; duplex setting * will take effect on next enc28j60_hw_init(). */ if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) priv->full_duplex = (duplex == DUPLEX_FULL); else { if (netif_msg_link(priv)) dev_warn(&ndev->dev, "unsupported link setting\n"); ret = -EOPNOTSUPP; } } else { if (netif_msg_link(priv)) dev_warn(&ndev->dev, "Warning: hw must be disabled " "to set link mode\n"); ret = -EBUSY; } return ret; } /* * Read the Transmit Status Vector */ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) { int endptr; endptr = locked_regw_read(priv, ETXNDL); if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", endptr + 1); enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); } static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, u8 tsv[TSV_SIZE]) { u16 tmp1, tmp2; printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg); tmp1 = tsv[1]; tmp1 <<= 8; tmp1 |= tsv[0]; tmp2 = tsv[5]; tmp2 <<= 8; tmp2 |= tsv[4]; printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d," " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2); printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d," " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE), TSV_GETBIT(tsv, TSV_TXCRCERROR), TSV_GETBIT(tsv, TSV_TXLENCHKERROR), TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " "PacketDefer: %d, ExDefer: %d\n", TSV_GETBIT(tsv, TSV_TXMULTICAST), TSV_GETBIT(tsv, TSV_TXBROADCAST), TSV_GETBIT(tsv, TSV_TXPACKETDEFER), TSV_GETBIT(tsv, TSV_TXEXDEFER)); printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, " "Giant: %d, Underrun: %d\n", TSV_GETBIT(tsv, TSV_TXEXCOLLISION), TSV_GETBIT(tsv, TSV_TXLATECOLLISION), TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, " "BackPressApp: %d, VLanTagFrame: %d\n", TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); } /* * Receive Status vector */ static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg, u16 pk_ptr, int len, u16 sts) { printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr); printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d," " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK), RSV_GETBIT(sts, RSV_CRCERROR), RSV_GETBIT(sts, RSV_LENCHECKERR), RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " "LongDropEvent: %d, CarrierEvent: %d\n", RSV_GETBIT(sts, RSV_RXMULTICAST), RSV_GETBIT(sts, RSV_RXBROADCAST), RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), RSV_GETBIT(sts, RSV_CARRIEREV)); printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d," " UnknownOp: %d, VLanTagFrame: %d\n", RSV_GETBIT(sts, RSV_RXCONTROLFRAME), RSV_GETBIT(sts, RSV_RXPAUSEFRAME), RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), RSV_GETBIT(sts, RSV_RXTYPEVLAN)); } static void dump_packet(const char *msg, int len, const char *data) { printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len); print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, data, len, true); } /* * Hardware receive function. * Read the buffer memory, update the FIFO pointer to free the buffer, * check the status vector and decrement the packet counter. */ static void enc28j60_hw_rx(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); struct sk_buff *skb = NULL; u16 erxrdpt, next_packet, rxstat; u8 rsv[RSV_SIZE]; int len; if (netif_msg_rx_status(priv)) printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n", priv->next_pk_ptr); if (unlikely(priv->next_pk_ptr > RXEND_INIT)) { if (netif_msg_rx_err(priv)) dev_err(&ndev->dev, "%s() Invalid packet address!! 0x%04x\n", __func__, priv->next_pk_ptr); /* packet address corrupted: reset RX logic */ mutex_lock(&priv->lock); nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); nolock_reg_bfset(priv, ECON1, ECON1_RXRST); nolock_reg_bfclr(priv, ECON1, ECON1_RXRST); nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); nolock_reg_bfclr(priv, EIR, EIR_RXERIF); nolock_reg_bfset(priv, ECON1, ECON1_RXEN); mutex_unlock(&priv->lock); ndev->stats.rx_errors++; return; } /* Read next packet pointer and rx status vector */ enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv); next_packet = rsv[1]; next_packet <<= 8; next_packet |= rsv[0]; len = rsv[3]; len <<= 8; len |= rsv[2]; rxstat = rsv[5]; rxstat <<= 8; rxstat |= rsv[4]; if (netif_msg_rx_status(priv)) enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat); if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) { if (netif_msg_rx_err(priv)) dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat); ndev->stats.rx_errors++; if (RSV_GETBIT(rxstat, RSV_CRCERROR)) ndev->stats.rx_crc_errors++; if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) ndev->stats.rx_frame_errors++; if (len > MAX_FRAMELEN) ndev->stats.rx_over_errors++; } else { skb = dev_alloc_skb(len + NET_IP_ALIGN); if (!skb) { if (netif_msg_rx_err(priv)) dev_err(&ndev->dev, "out of memory for Rx'd frame\n"); ndev->stats.rx_dropped++; } else { skb->dev = ndev; skb_reserve(skb, NET_IP_ALIGN); /* copy the packet from the receive buffer */ enc28j60_mem_read(priv, rx_packet_start(priv->next_pk_ptr), len, skb_put(skb, len)); if (netif_msg_pktdata(priv)) dump_packet(__func__, skb->len, skb->data); skb->protocol = eth_type_trans(skb, ndev); /* update statistics */ ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; netif_rx_ni(skb); } } /* * Move the RX read pointer to the start of the next * received packet. * This frees the memory we just read out */ erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", __func__, erxrdpt); mutex_lock(&priv->lock); nolock_regw_write(priv, ERXRDPTL, erxrdpt); #ifdef CONFIG_ENC28J60_WRITEVERIFY if (netif_msg_drv(priv)) { u16 reg; reg = nolock_regw_read(priv, ERXRDPTL); if (reg != erxrdpt) printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " "error (0x%04x - 0x%04x)\n", __func__, reg, erxrdpt); } #endif priv->next_pk_ptr = next_packet; /* we are done with this packet, decrement the packet counter */ nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC); mutex_unlock(&priv->lock); } /* * Calculate free space in RxFIFO */ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) { int epkcnt, erxst, erxnd, erxwr, erxrd; int free_space; mutex_lock(&priv->lock); epkcnt = nolock_regb_read(priv, EPKTCNT); if (epkcnt >= 255) free_space = -1; else { erxst = nolock_regw_read(priv, ERXSTL); erxnd = nolock_regw_read(priv, ERXNDL); erxwr = nolock_regw_read(priv, ERXWRPTL); erxrd = nolock_regw_read(priv, ERXRDPTL); if (erxwr > erxrd) free_space = (erxnd - erxst) - (erxwr - erxrd); else if (erxwr == erxrd) free_space = (erxnd - erxst); else free_space = erxrd - erxwr - 1; } mutex_unlock(&priv->lock); if (netif_msg_rx_status(priv)) printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", __func__, free_space); return free_space; } /* * Access the PHY to determine link status */ static void enc28j60_check_link_status(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); u16 reg; int duplex; reg = enc28j60_phy_read(priv, PHSTAT2); if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " "PHSTAT2: %04x\n", __func__, enc28j60_phy_read(priv, PHSTAT1), reg); duplex = reg & PHSTAT2_DPXSTAT; if (reg & PHSTAT2_LSTAT) { netif_carrier_on(ndev); if (netif_msg_ifup(priv)) dev_info(&ndev->dev, "link up - %s\n", duplex ? "Full duplex" : "Half duplex"); } else { if (netif_msg_ifdown(priv)) dev_info(&ndev->dev, "link down\n"); netif_carrier_off(ndev); } } static void enc28j60_tx_clear(struct net_device *ndev, bool err) { struct enc28j60_net *priv = netdev_priv(ndev); if (err) ndev->stats.tx_errors++; else ndev->stats.tx_packets++; if (priv->tx_skb) { if (!err) ndev->stats.tx_bytes += priv->tx_skb->len; dev_kfree_skb(priv->tx_skb); priv->tx_skb = NULL; } locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); netif_wake_queue(ndev); } /* * RX handler * ignore PKTIF because is unreliable! (look at the errata datasheet) * check EPKTCNT is the suggested workaround. * We don't need to clear interrupt flag, automatically done when * enc28j60_hw_rx() decrements the packet counter. * Returns how many packet processed. */ static int enc28j60_rx_interrupt(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); int pk_counter, ret; pk_counter = locked_regb_read(priv, EPKTCNT); if (pk_counter && netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter); if (pk_counter > priv->max_pk_counter) { /* update statistics */ priv->max_pk_counter = pk_counter; if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1) printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n", priv->max_pk_counter); } ret = pk_counter; while (pk_counter-- > 0) enc28j60_hw_rx(ndev); return ret; } static void enc28j60_irq_work_handler(struct work_struct *work) { struct enc28j60_net *priv = container_of(work, struct enc28j60_net, irq_work); struct net_device *ndev = priv->netdev; int intflags, loop; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); /* disable further interrupts */ locked_reg_bfclr(priv, EIE, EIE_INTIE); do { loop = 0; intflags = locked_regb_read(priv, EIR); /* DMA interrupt handler (not currently used) */ if ((intflags & EIR_DMAIF) != 0) { loop++; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intDMA(%d)\n", loop); locked_reg_bfclr(priv, EIR, EIR_DMAIF); } /* LINK changed handler */ if ((intflags & EIR_LINKIF) != 0) { loop++; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intLINK(%d)\n", loop); enc28j60_check_link_status(ndev); /* read PHIR to clear the flag */ enc28j60_phy_read(priv, PHIR); } /* TX complete handler */ if ((intflags & EIR_TXIF) != 0) { bool err = false; loop++; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intTX(%d)\n", loop); priv->tx_retry_count = 0; if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) { if (netif_msg_tx_err(priv)) dev_err(&ndev->dev, "Tx Error (aborted)\n"); err = true; } if (netif_msg_tx_done(priv)) { u8 tsv[TSV_SIZE]; enc28j60_read_tsv(priv, tsv); enc28j60_dump_tsv(priv, "Tx Done", tsv); } enc28j60_tx_clear(ndev, err); locked_reg_bfclr(priv, EIR, EIR_TXIF); } /* TX Error handler */ if ((intflags & EIR_TXERIF) != 0) { u8 tsv[TSV_SIZE]; loop++; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intTXErr(%d)\n", loop); locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); enc28j60_read_tsv(priv, tsv); if (netif_msg_tx_err(priv)) enc28j60_dump_tsv(priv, "Tx Error", tsv); /* Reset TX logic */ mutex_lock(&priv->lock); nolock_reg_bfset(priv, ECON1, ECON1_TXRST); nolock_reg_bfclr(priv, ECON1, ECON1_TXRST); nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); mutex_unlock(&priv->lock); /* Transmit Late collision check for retransmit */ if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) { if (netif_msg_tx_err(priv)) printk(KERN_DEBUG DRV_NAME ": LateCollision TXErr (%d)\n", priv->tx_retry_count); if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT) locked_reg_bfset(priv, ECON1, ECON1_TXRTS); else enc28j60_tx_clear(ndev, true); } else enc28j60_tx_clear(ndev, true); locked_reg_bfclr(priv, EIR, EIR_TXERIF); } /* RX Error handler */ if ((intflags & EIR_RXERIF) != 0) { loop++; if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": intRXErr(%d)\n", loop); /* Check free FIFO space to flag RX overrun */ if (enc28j60_get_free_rxfifo(priv) <= 0) { if (netif_msg_rx_err(priv)) printk(KERN_DEBUG DRV_NAME ": RX Overrun\n"); ndev->stats.rx_dropped++; } locked_reg_bfclr(priv, EIR, EIR_RXERIF); } /* RX handler */ if (enc28j60_rx_interrupt(ndev)) loop++; } while (loop); /* re-enable interrupts */ locked_reg_bfset(priv, EIE, EIE_INTIE); if (netif_msg_intr(priv)) printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__); } /* * Hardware transmit function. * Fill the buffer memory and send the contents of the transmit buffer * onto the network */ static void enc28j60_hw_tx(struct enc28j60_net *priv) { if (netif_msg_tx_queued(priv)) printk(KERN_DEBUG DRV_NAME ": Tx Packet Len:%d\n", priv->tx_skb->len); if (netif_msg_pktdata(priv)) dump_packet(__func__, priv->tx_skb->len, priv->tx_skb->data); enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); #ifdef CONFIG_ENC28J60_WRITEVERIFY /* readback and verify written data */ if (netif_msg_drv(priv)) { int test_len, k; u8 test_buf[64]; /* limit the test to the first 64 bytes */ int okflag; test_len = priv->tx_skb->len; if (test_len > sizeof(test_buf)) test_len = sizeof(test_buf); /* + 1 to skip control byte */ enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf); okflag = 1; for (k = 0; k < test_len; k++) { if (priv->tx_skb->data[k] != test_buf[k]) { printk(KERN_DEBUG DRV_NAME ": Error, %d location differ: " "0x%02x-0x%02x\n", k, priv->tx_skb->data[k], test_buf[k]); okflag = 0; } } if (!okflag) printk(KERN_DEBUG DRV_NAME ": Tx write buffer, " "verify ERROR!\n"); } #endif /* set TX request flag */ locked_reg_bfset(priv, ECON1, ECON1_TXRTS); } static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); if (netif_msg_tx_queued(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); /* If some error occurs while trying to transmit this * packet, you should return '1' from this function. * In such a case you _may not_ do anything to the * SKB, it is still owned by the network queueing * layer when an error is returned. This means you * may not modify any SKB fields, you may not free * the SKB, etc. */ netif_stop_queue(dev); /* Remember the skb for deferred processing */ priv->tx_skb = skb; schedule_work(&priv->tx_work); return NETDEV_TX_OK; } static void enc28j60_tx_work_handler(struct work_struct *work) { struct enc28j60_net *priv = container_of(work, struct enc28j60_net, tx_work); /* actual delivery of data */ enc28j60_hw_tx(priv); } static irqreturn_t enc28j60_irq(int irq, void *dev_id) { struct enc28j60_net *priv = dev_id; /* * Can't do anything in interrupt context because we need to * block (spi_sync() is blocking) so fire of the interrupt * handling workqueue. * Remember that we access enc28j60 registers through SPI bus * via spi_sync() call. */ schedule_work(&priv->irq_work); return IRQ_HANDLED; } static void enc28j60_tx_timeout(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); if (netif_msg_timer(priv)) dev_err(&ndev->dev, DRV_NAME " tx timeout\n"); ndev->stats.tx_errors++; /* can't restart safely under softirq */ schedule_work(&priv->restart_work); } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int enc28j60_net_open(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); if (!is_valid_ether_addr(dev->dev_addr)) { if (netif_msg_ifup(priv)) dev_err(&dev->dev, "invalid MAC address %pM\n", dev->dev_addr); return -EADDRNOTAVAIL; } /* Reset the hardware here (and take it out of low power mode) */ enc28j60_lowpower(priv, false); enc28j60_hw_disable(priv); if (!enc28j60_hw_init(priv)) { if (netif_msg_ifup(priv)) dev_err(&dev->dev, "hw_reset() failed\n"); return -EINVAL; } /* Update the MAC address (in case user has changed it) */ enc28j60_set_hw_macaddr(dev); /* Enable interrupts */ enc28j60_hw_enable(priv); /* check link status */ enc28j60_check_link_status(dev); /* We are now ready to accept transmit requests from * the queueing layer of the networking. */ netif_start_queue(dev); return 0; } /* The inverse routine to net_open(). */ static int enc28j60_net_close(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); enc28j60_hw_disable(priv); enc28j60_lowpower(priv, true); netif_stop_queue(dev); return 0; } /* * Set or clear the multicast filter for this adapter * num_addrs == -1 Promiscuous mode, receive all packets * num_addrs == 0 Normal mode, filter out multicast packets * num_addrs > 0 Multicast mode, receive normal and MC packets */ static void enc28j60_set_multicast_list(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); int oldfilter = priv->rxfilter; if (dev->flags & IFF_PROMISC) { if (netif_msg_link(priv)) dev_info(&dev->dev, "promiscuous mode\n"); priv->rxfilter = RXFILTER_PROMISC; } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { if (netif_msg_link(priv)) dev_info(&dev->dev, "%smulticast mode\n", (dev->flags & IFF_ALLMULTI) ? "all-" : ""); priv->rxfilter = RXFILTER_MULTI; } else { if (netif_msg_link(priv)) dev_info(&dev->dev, "normal mode\n"); priv->rxfilter = RXFILTER_NORMAL; } if (oldfilter != priv->rxfilter) schedule_work(&priv->setrx_work); } static void enc28j60_setrx_work_handler(struct work_struct *work) { struct enc28j60_net *priv = container_of(work, struct enc28j60_net, setrx_work); if (priv->rxfilter == RXFILTER_PROMISC) { if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n"); locked_regb_write(priv, ERXFCON, 0x00); } else if (priv->rxfilter == RXFILTER_MULTI) { if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": multicast mode\n"); locked_regb_write(priv, ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN | ERXFCON_MCEN); } else { if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": normal mode\n"); locked_regb_write(priv, ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); } } static void enc28j60_restart_work_handler(struct work_struct *work) { struct enc28j60_net *priv = container_of(work, struct enc28j60_net, restart_work); struct net_device *ndev = priv->netdev; int ret; rtnl_lock(); if (netif_running(ndev)) { enc28j60_net_close(ndev); ret = enc28j60_net_open(ndev); if (unlikely(ret)) { dev_info(&ndev->dev, " could not restart %d\n", ret); dev_close(ndev); } } rtnl_unlock(); } /* ......................... ETHTOOL SUPPORT ........................... */ static void enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct enc28j60_net *priv = netdev_priv(dev); cmd->transceiver = XCVR_INTERNAL; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; cmd->port = PORT_TP; cmd->autoneg = AUTONEG_DISABLE; return 0; } static int enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { return enc28j60_setlink(dev, cmd->autoneg, ethtool_cmd_speed(cmd), cmd->duplex); } static u32 enc28j60_get_msglevel(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); return priv->msg_enable; } static void enc28j60_set_msglevel(struct net_device *dev, u32 val) { struct enc28j60_net *priv = netdev_priv(dev); priv->msg_enable = val; } static const struct ethtool_ops enc28j60_ethtool_ops = { .get_settings = enc28j60_get_settings, .set_settings = enc28j60_set_settings, .get_drvinfo = enc28j60_get_drvinfo, .get_msglevel = enc28j60_get_msglevel, .set_msglevel = enc28j60_set_msglevel, }; static int enc28j60_chipset_init(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); return enc28j60_hw_init(priv); } static const struct net_device_ops enc28j60_netdev_ops = { .ndo_open = enc28j60_net_open, .ndo_stop = enc28j60_net_close, .ndo_start_xmit = enc28j60_send_packet, .ndo_set_multicast_list = enc28j60_set_multicast_list, .ndo_set_mac_address = enc28j60_set_mac_address, .ndo_tx_timeout = enc28j60_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int __devinit enc28j60_probe(struct spi_device *spi) { struct net_device *dev; struct enc28j60_net *priv; int ret = 0; if (netif_msg_drv(&debug)) dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n", DRV_VERSION); dev = alloc_etherdev(sizeof(struct enc28j60_net)); if (!dev) { if (netif_msg_drv(&debug)) dev_err(&spi->dev, DRV_NAME ": unable to alloc new ethernet\n"); ret = -ENOMEM; goto error_alloc; } priv = netdev_priv(dev); priv->netdev = dev; /* priv to netdev reference */ priv->spi = spi; /* priv to spi reference */ priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT); mutex_init(&priv->lock); INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler); INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */ SET_NETDEV_DEV(dev, &spi->dev); if (!enc28j60_chipset_init(dev)) { if (netif_msg_probe(priv)) dev_info(&spi->dev, DRV_NAME " chip not found\n"); ret = -EIO; goto error_irq; } random_ether_addr(dev->dev_addr); enc28j60_set_hw_macaddr(dev); /* Board setup must set the relevant edge trigger type; * level triggers won't currently work. */ ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv); if (ret < 0) { if (netif_msg_probe(priv)) dev_err(&spi->dev, DRV_NAME ": request irq %d failed " "(ret = %d)\n", spi->irq, ret); goto error_irq; } dev->if_port = IF_PORT_10BASET; dev->irq = spi->irq; dev->netdev_ops = &enc28j60_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); enc28j60_lowpower(priv, true); ret = register_netdev(dev); if (ret) { if (netif_msg_probe(priv)) dev_err(&spi->dev, "register netdev " DRV_NAME " failed (ret = %d)\n", ret); goto error_register; } dev_info(&dev->dev, DRV_NAME " driver registered\n"); return 0; error_register: free_irq(spi->irq, priv); error_irq: free_netdev(dev); error_alloc: return ret; } static int __devexit enc28j60_remove(struct spi_device *spi) { struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); if (netif_msg_drv(priv)) printk(KERN_DEBUG DRV_NAME ": remove\n"); unregister_netdev(priv->netdev); free_irq(spi->irq, priv); free_netdev(priv->netdev); return 0; } static struct spi_driver enc28j60_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = enc28j60_probe, .remove = __devexit_p(enc28j60_remove), }; static int __init enc28j60_init(void) { msec20_to_jiffies = msecs_to_jiffies(20); return spi_register_driver(&enc28j60_driver); } module_init(enc28j60_init); static void __exit enc28j60_exit(void) { spi_unregister_driver(&enc28j60_driver); } module_exit(enc28j60_exit); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); MODULE_LICENSE("GPL"); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); MODULE_ALIAS("spi:" DRV_NAME);
gpl-2.0
fredvj/kernel_huawei_u8860
arch/m68k/kernel/signal_mm.c
2892
27701
/* * linux/arch/m68k/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Linux/m68k support by Hamish Macdonald * * 68060 fixes by Jesper Skov * * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab * * mathemu support by Roman Zippel * (Note: fpstate in the signal context is completely ignored for the emulator * and the internal floating point format is put on stack) */ /* * ++roman (07/09/96): implemented signal stacks (specially for tosemu on * Atari :-) Current limitation: Only one sigstack can be active at one time. * If a second signal with SA_ONSTACK set arrives while working on a sigstack, * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested * signal handlers! */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/highuid.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/module.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/traps.h> #include <asm/ucontext.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) static const int frame_extra_sizes[16] = { [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ [2] = sizeof(((struct frame *)0)->un.fmt2), [3] = sizeof(((struct frame *)0)->un.fmt3), [4] = sizeof(((struct frame *)0)->un.fmt4), [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ [7] = sizeof(((struct frame *)0)->un.fmt7), [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */ [9] = sizeof(((struct frame *)0)->un.fmt9), [10] = sizeof(((struct frame *)0)->un.fmta), [11] = sizeof(((struct frame *)0)->un.fmtb), [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */ [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */ [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */ [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */ }; int handle_kernel_fault(struct pt_regs *regs) { const struct exception_table_entry *fixup; struct pt_regs *tregs; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (!fixup) return 0; /* Create a new four word stack frame, discarding the old one. */ regs->stkadj = frame_extra_sizes[regs->format]; tregs = (struct pt_regs *)((long)regs + regs->stkadj); tregs->vector = regs->vector; tregs->format = 0; tregs->pc = fixup->fixup; tregs->sr = regs->sr; return 1; } /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int unused0, int unused1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { return do_sigaltstack(uss, uoss, rdusp()); } /* * Do a signal return; undo the signal stack. * * Keep the return code on the stack quadword aligned! * That makes the cache flush below easier. */ struct sigframe { char __user *pretcode; int sig; int code; struct sigcontext __user *psc; char retcode[8]; unsigned long extramask[_NSIG_WORDS-1]; struct sigcontext sc; }; struct rt_sigframe { char __user *pretcode; int sig; struct siginfo __user *pinfo; void __user *puc; char retcode[8]; struct siginfo info; struct ucontext uc; }; static unsigned char fpu_version; /* version number of fpu, set by setup_frame */ static inline int restore_fpu_state(struct sigcontext *sc) { int err = 1; if (FPU_IS_EMU) { /* restore registers */ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); memcpy(current->thread.fp, sc->sc_fpregs, 24); return 0; } if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { /* Verify the frame format. */ if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version)) goto out; if (CPU_IS_020_OR_030) { if (m68k_fputype & FPU_68881 && !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4)) goto out; if (m68k_fputype & FPU_68882 && !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4)) goto out; } else if (CPU_IS_040) { if (!(sc->sc_fpstate[1] == 0x00 || sc->sc_fpstate[1] == 0x28 || sc->sc_fpstate[1] == 0x60)) goto out; } else if (CPU_IS_060) { if (!(sc->sc_fpstate[3] == 0x00 || sc->sc_fpstate[3] == 0x60 || sc->sc_fpstate[3] == 0xe0)) goto out; } else goto out; __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %0,%%fp0-%%fp1\n\t" "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" ".chip 68k" : /* no outputs */ : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); } __asm__ volatile (".chip 68k/68881\n\t" "frestore %0\n\t" ".chip 68k" : : "m" (*sc->sc_fpstate)); err = 0; out: return err; } #define FPCONTEXT_SIZE 216 #define uc_fpstate uc_filler[0] #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] static inline int rt_restore_fpu_state(struct ucontext __user *uc) { unsigned char fpstate[FPCONTEXT_SIZE]; int context_size = CPU_IS_060 ? 8 : 0; fpregset_t fpregs; int err = 1; if (FPU_IS_EMU) { /* restore fpu control register */ if (__copy_from_user(current->thread.fpcntl, uc->uc_mcontext.fpregs.f_fpcntl, 12)) goto out; /* restore all other fpu register */ if (__copy_from_user(current->thread.fp, uc->uc_mcontext.fpregs.f_fpregs, 96)) goto out; return 0; } if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) goto out; if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (!CPU_IS_060) context_size = fpstate[1]; /* Verify the frame format. */ if (!CPU_IS_060 && (fpstate[0] != fpu_version)) goto out; if (CPU_IS_020_OR_030) { if (m68k_fputype & FPU_68881 && !(context_size == 0x18 || context_size == 0xb4)) goto out; if (m68k_fputype & FPU_68882 && !(context_size == 0x38 || context_size == 0xd4)) goto out; } else if (CPU_IS_040) { if (!(context_size == 0x00 || context_size == 0x28 || context_size == 0x60)) goto out; } else if (CPU_IS_060) { if (!(fpstate[3] == 0x00 || fpstate[3] == 0x60 || fpstate[3] == 0xe0)) goto out; } else goto out; if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, sizeof(fpregs))) goto out; __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %0,%%fp0-%%fp7\n\t" "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" ".chip 68k" : /* no outputs */ : "m" (*fpregs.f_fpregs), "m" (*fpregs.f_fpcntl)); } if (context_size && __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, context_size)) goto out; __asm__ volatile (".chip 68k/68881\n\t" "frestore %0\n\t" ".chip 68k" : : "m" (*fpstate)); err = 0; out: return err; } static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, void __user *fp) { int fsize = frame_extra_sizes[formatvec >> 12]; if (fsize < 0) { /* * user process trying to return with weird frame format */ #ifdef DEBUG printk("user process returning with weird frame format\n"); #endif return 1; } if (!fsize) { regs->format = formatvec >> 12; regs->vector = formatvec & 0xfff; } else { struct switch_stack *sw = (struct switch_stack *)regs - 1; unsigned long buf[fsize / 2]; /* yes, twice as much */ /* that'll make sure that expansion won't crap over data */ if (copy_from_user(buf + fsize / 4, fp, fsize)) return 1; /* point of no return */ regs->format = formatvec >> 12; regs->vector = formatvec & 0xfff; #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) __asm__ __volatile__ (" movel %0,%/a0\n\t" " subl %1,%/a0\n\t" /* make room on stack */ " movel %/a0,%/sp\n\t" /* set stack pointer */ /* move switch_stack and pt_regs */ "1: movel %0@+,%/a0@+\n\t" " dbra %2,1b\n\t" " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */ " lsrl #2,%1\n\t" " subql #1,%1\n\t" /* copy to the gap we'd made */ "2: movel %4@+,%/a0@+\n\t" " dbra %1,2b\n\t" " bral ret_from_signal\n" : /* no outputs, it doesn't ever return */ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), "n" (frame_offset), "a" (buf + fsize/4) : "a0"); #undef frame_offset } return 0; } static inline int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp) { int formatvec; struct sigcontext context; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* get previous context */ if (copy_from_user(&context, usc, sizeof(context))) goto badframe; /* restore passed registers */ regs->d0 = context.sc_d0; regs->d1 = context.sc_d1; regs->a0 = context.sc_a0; regs->a1 = context.sc_a1; regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); regs->pc = context.sc_pc; regs->orig_d0 = -1; /* disable syscall checks */ wrusp(context.sc_usp); formatvec = context.sc_formatvec; err = restore_fpu_state(&context); if (err || mangle_kernel_stack(regs, formatvec, fp)) goto badframe; return 0; badframe: return 1; } static inline int rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, struct ucontext __user *uc) { int temp; greg_t __user *gregs = uc->uc_mcontext.gregs; unsigned long usp; int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; err = __get_user(temp, &uc->uc_mcontext.version); if (temp != MCONTEXT_VERSION) goto badframe; /* restore passed registers */ err |= __get_user(regs->d0, &gregs[0]); err |= __get_user(regs->d1, &gregs[1]); err |= __get_user(regs->d2, &gregs[2]); err |= __get_user(regs->d3, &gregs[3]); err |= __get_user(regs->d4, &gregs[4]); err |= __get_user(regs->d5, &gregs[5]); err |= __get_user(sw->d6, &gregs[6]); err |= __get_user(sw->d7, &gregs[7]); err |= __get_user(regs->a0, &gregs[8]); err |= __get_user(regs->a1, &gregs[9]); err |= __get_user(regs->a2, &gregs[10]); err |= __get_user(sw->a3, &gregs[11]); err |= __get_user(sw->a4, &gregs[12]); err |= __get_user(sw->a5, &gregs[13]); err |= __get_user(sw->a6, &gregs[14]); err |= __get_user(usp, &gregs[15]); wrusp(usp); err |= __get_user(regs->pc, &gregs[16]); err |= __get_user(temp, &gregs[17]); regs->sr = (regs->sr & 0xff00) | (temp & 0xff); regs->orig_d0 = -1; /* disable syscall checks */ err |= __get_user(temp, &uc->uc_formatvec); err |= rt_restore_fpu_state(uc); if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) goto badframe; if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) goto badframe; return 0; badframe: return 1; } asmlinkage int do_sigreturn(unsigned long __unused) { struct switch_stack *sw = (struct switch_stack *) &__unused; struct pt_regs *regs = (struct pt_regs *) (sw + 1); unsigned long usp = rdusp(); struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.sc_mask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); current->blocked = set; recalc_sigpending(); if (restore_sigcontext(regs, &frame->sc, frame + 1)) goto badframe; return regs->d0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int do_rt_sigreturn(unsigned long __unused) { struct switch_stack *sw = (struct switch_stack *) &__unused; struct pt_regs *regs = (struct pt_regs *) (sw + 1); unsigned long usp = rdusp(); struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); current->blocked = set; recalc_sigpending(); if (rt_restore_ucontext(regs, sw, &frame->uc)) goto badframe; return regs->d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) { if (FPU_IS_EMU) { /* save registers */ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); memcpy(sc->sc_fpregs, current->thread.fp, 24); return; } __asm__ volatile (".chip 68k/68881\n\t" "fsave %0\n\t" ".chip 68k" : : "m" (*sc->sc_fpstate) : "memory"); if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { fpu_version = sc->sc_fpstate[0]; if (CPU_IS_020_OR_030 && regs->vector >= (VEC_FPBRUC * 4) && regs->vector <= (VEC_FPNAN * 4)) { /* Clear pending exception in 68882 idle frame */ if (*(unsigned short *) sc->sc_fpstate == 0x1f38) sc->sc_fpstate[0x38] |= 1 << 3; } __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %%fp0-%%fp1,%0\n\t" "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" ".chip 68k" : "=m" (*sc->sc_fpregs), "=m" (*sc->sc_fpcntl) : /* no inputs */ : "memory"); } } static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) { unsigned char fpstate[FPCONTEXT_SIZE]; int context_size = CPU_IS_060 ? 8 : 0; int err = 0; if (FPU_IS_EMU) { /* save fpu control register */ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl, current->thread.fpcntl, 12); /* save all other fpu register */ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, current->thread.fp, 96); return err; } __asm__ volatile (".chip 68k/68881\n\t" "fsave %0\n\t" ".chip 68k" : : "m" (*fpstate) : "memory"); err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { fpregset_t fpregs; if (!CPU_IS_060) context_size = fpstate[1]; fpu_version = fpstate[0]; if (CPU_IS_020_OR_030 && regs->vector >= (VEC_FPBRUC * 4) && regs->vector <= (VEC_FPNAN * 4)) { /* Clear pending exception in 68882 idle frame */ if (*(unsigned short *) fpstate == 0x1f38) fpstate[0x38] |= 1 << 3; } __asm__ volatile (".chip 68k/68881\n\t" "fmovemx %%fp0-%%fp7,%0\n\t" "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" ".chip 68k" : "=m" (*fpregs.f_fpregs), "=m" (*fpregs.f_fpcntl) : /* no inputs */ : "memory"); err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, sizeof(fpregs)); } if (context_size) err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4, context_size); return err; } static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) { sc->sc_mask = mask; sc->sc_usp = rdusp(); sc->sc_d0 = regs->d0; sc->sc_d1 = regs->d1; sc->sc_a0 = regs->a0; sc->sc_a1 = regs->a1; sc->sc_sr = regs->sr; sc->sc_pc = regs->pc; sc->sc_formatvec = regs->format << 12 | regs->vector; save_fpu_state(sc, regs); } static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; greg_t __user *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); err |= __put_user(regs->d0, &gregs[0]); err |= __put_user(regs->d1, &gregs[1]); err |= __put_user(regs->d2, &gregs[2]); err |= __put_user(regs->d3, &gregs[3]); err |= __put_user(regs->d4, &gregs[4]); err |= __put_user(regs->d5, &gregs[5]); err |= __put_user(sw->d6, &gregs[6]); err |= __put_user(sw->d7, &gregs[7]); err |= __put_user(regs->a0, &gregs[8]); err |= __put_user(regs->a1, &gregs[9]); err |= __put_user(regs->a2, &gregs[10]); err |= __put_user(sw->a3, &gregs[11]); err |= __put_user(sw->a4, &gregs[12]); err |= __put_user(sw->a5, &gregs[13]); err |= __put_user(sw->a6, &gregs[14]); err |= __put_user(rdusp(), &gregs[15]); err |= __put_user(regs->pc, &gregs[16]); err |= __put_user(regs->sr, &gregs[17]); err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); err |= rt_save_fpu_state(uc, regs); return err; } static inline void push_cache (unsigned long vaddr) { /* * Using the old cache_push_v() was really a big waste. * * What we are trying to do is to flush 8 bytes to ram. * Flushing 2 cache lines of 16 bytes is much cheaper than * flushing 1 or 2 pages, as previously done in * cache_push_v(). * Jes */ if (CPU_IS_040) { unsigned long temp; __asm__ __volatile__ (".chip 68040\n\t" "nop\n\t" "ptestr (%1)\n\t" "movec %%mmusr,%0\n\t" ".chip 68k" : "=r" (temp) : "a" (vaddr)); temp &= PAGE_MASK; temp |= vaddr & ~PAGE_MASK; __asm__ __volatile__ (".chip 68040\n\t" "nop\n\t" "cpushl %%bc,(%0)\n\t" ".chip 68k" : : "a" (temp)); } else if (CPU_IS_060) { unsigned long temp; __asm__ __volatile__ (".chip 68060\n\t" "plpar (%0)\n\t" ".chip 68k" : "=a" (temp) : "0" (vaddr)); __asm__ __volatile__ (".chip 68060\n\t" "cpushl %%bc,(%0)\n\t" ".chip 68k" : : "a" (temp)); } else { /* * 68030/68020 have no writeback cache; * still need to clear icache. * Note that vaddr is guaranteed to be long word aligned. */ unsigned long temp; asm volatile ("movec %%cacr,%0" : "=r" (temp)); temp += 4; asm volatile ("movec %0,%%caar\n\t" "movec %1,%%cacr" : : "r" (vaddr), "r" (temp)); asm volatile ("movec %0,%%caar\n\t" "movec %1,%%cacr" : : "r" (vaddr + 4), "r" (temp)); } } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long usp; /* Default to using normal stack. */ usp = rdusp(); /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (!sas_ss_flags(usp)) usp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *)((usp - frame_size) & -8UL); } static int setup_frame (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int fsize = frame_extra_sizes[regs->format]; struct sigcontext context; int err = 0; if (fsize < 0) { #ifdef DEBUG printk ("setup_frame: Unknown frame format %#x\n", regs->format); #endif goto give_sigsegv; } frame = get_sigframe(ka, regs, sizeof(*frame) + fsize); if (fsize) err |= copy_to_user (frame + 1, regs + 1, fsize); err |= __put_user((current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(regs->vector, &frame->code); err |= __put_user(&frame->sc, &frame->psc); if (_NSIG_WORDS > 1) err |= copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); setup_sigcontext(&context, regs, set->sig[0]); err |= copy_to_user (&frame->sc, &context, sizeof(context)); /* Set up to return from userspace. */ err |= __put_user(frame->retcode, &frame->pretcode); /* moveq #,d0; trap #0 */ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), (long __user *)(frame->retcode)); if (err) goto give_sigsegv; push_cache ((unsigned long) &frame->retcode); /* * Set up registers for signal handler. All the state we are about * to destroy is successfully copied to sigframe. */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; /* * This is subtle; if we build more than one sigframe, all but the * first one will see frame format 0 and have fsize == 0, so we won't * screw stkadj. */ if (fsize) regs->stkadj = fsize; /* Prepare to skip over the extra stuff in the exception frame. */ if (regs->stkadj) { struct pt_regs *tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); #ifdef DEBUG printk("Performing stackadjust=%04x\n", regs->stkadj); #endif /* This must be copied with decreasing addresses to handle overlaps. */ tregs->vector = 0; tregs->format = 0; tregs->pc = regs->pc; tregs->sr = regs->sr; } return 0; give_sigsegv: force_sigsegv(sig, current); return err; } static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int fsize = frame_extra_sizes[regs->format]; int err = 0; if (fsize < 0) { #ifdef DEBUG printk ("setup_frame: Unknown frame format %#x\n", regs->format); #endif goto give_sigsegv; } frame = get_sigframe(ka, regs, sizeof(*frame)); if (fsize) err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); err |= __put_user((current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(rdusp()), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= rt_setup_ucontext(&frame->uc, regs); err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. */ err |= __put_user(frame->retcode, &frame->pretcode); #ifdef __mcoldfire__ /* movel #__NR_rt_sigreturn,d0; trap #0 */ err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0)); err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16), (long __user *)(frame->retcode + 4)); #else /* moveq #,d0; notb d0; trap #0 */ err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), (long __user *)(frame->retcode + 0)); err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4)); #endif if (err) goto give_sigsegv; push_cache ((unsigned long) &frame->retcode); /* * Set up registers for signal handler. All the state we are about * to destroy is successfully copied to sigframe. */ wrusp ((unsigned long) frame); regs->pc = (unsigned long) ka->sa.sa_handler; /* * This is subtle; if we build more than one sigframe, all but the * first one will see frame format 0 and have fsize == 0, so we won't * screw stkadj. */ if (fsize) regs->stkadj = fsize; /* Prepare to skip over the extra stuff in the exception frame. */ if (regs->stkadj) { struct pt_regs *tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); #ifdef DEBUG printk("Performing stackadjust=%04x\n", regs->stkadj); #endif /* This must be copied with decreasing addresses to handle overlaps. */ tregs->vector = 0; tregs->format = 0; tregs->pc = regs->pc; tregs->sr = regs->sr; } return 0; give_sigsegv: force_sigsegv(sig, current); return err; } static inline void handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { switch (regs->d0) { case -ERESTARTNOHAND: if (!has_handler) goto do_restart; regs->d0 = -EINTR; break; case -ERESTART_RESTARTBLOCK: if (!has_handler) { regs->d0 = __NR_restart_syscall; regs->pc -= 2; break; } regs->d0 = -EINTR; break; case -ERESTARTSYS: if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { regs->d0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: do_restart: regs->d0 = regs->orig_d0; regs->pc -= 2; break; } } void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) { if (regs->orig_d0 < 0) return; switch (regs->d0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; regs->orig_d0 = -1; regs->pc -= 2; break; } } /* * OK, we're invoking a handler */ static void handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { int err; /* are we from a system call? */ if (regs->orig_d0 >= 0) /* If so, check system call restarting.. */ handle_restart(regs, ka, 1); /* set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) err = setup_rt_frame(sig, ka, info, oldset, regs); else err = setup_frame(sig, ka, oldset, regs); if (err) return; sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked,sig); recalc_sigpending(); if (test_thread_flag(TIF_DELAYED_TRACE)) { regs->sr &= ~0x8000; send_sig(SIGTRAP, current, 1); } clear_thread_flag(TIF_RESTORE_SIGMASK); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ asmlinkage void do_signal(struct pt_regs *regs) { siginfo_t info; struct k_sigaction ka; int signr; sigset_t *oldset; current->thread.esp0 = (unsigned long) regs; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &ka, &info, oldset, regs); return; } /* Did we come from a system call? */ if (regs->orig_d0 >= 0) /* Restart the system call - no handlers present */ handle_restart(regs, NULL, 0); /* If there's no signal to deliver, we just restore the saved mask. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } }
gpl-2.0
AOKP/kernel_samsung_manta
drivers/mtd/ubi/wl.c
3916
46168
/* * @ubi: UBI device description object * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner */ /* * UBI wear-leveling sub-system. * * This sub-system is responsible for wear-leveling. It works in terms of * physical eraseblocks and erase counters and knows nothing about logical * eraseblocks, volumes, etc. From this sub-system's perspective all physical * eraseblocks are of two types - used and free. Used physical eraseblocks are * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. * * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter * header. The rest of the physical eraseblock contains only %0xFF bytes. * * When physical eraseblocks are returned to the WL sub-system by means of the * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is * done asynchronously in context of the per-UBI device background thread, * which is also managed by the WL sub-system. * * The wear-leveling is ensured by means of moving the contents of used * physical eraseblocks with low erase counter to free physical eraseblocks * with high erase counter. * * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick * an "optimal" physical eraseblock. For example, when it is known that the * physical eraseblock will be "put" soon because it contains short-term data, * the WL sub-system may pick a free physical eraseblock with low erase * counter, and so forth. * * If the WL sub-system fails to erase a physical eraseblock, it marks it as * bad. * * This sub-system is also responsible for scrubbing. If a bit-flip is detected * in a physical eraseblock, it has to be moved. Technically this is the same * as moving it for wear-leveling reasons. * * As it was said, for the UBI sub-system all physical eraseblocks are either * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub * RB-trees, as well as (temporarily) in the @wl->pq queue. * * When the WL sub-system returns a physical eraseblock, the physical * eraseblock is protected from being moved for some "time". For this reason, * the physical eraseblock is not directly moved from the @wl->free tree to the * @wl->used tree. There is a protection queue in between where this * physical eraseblock is temporarily stored (@wl->pq). * * All this protection stuff is needed because: * o we don't want to move physical eraseblocks just after we have given them * to the user; instead, we first want to let users fill them up with data; * * o there is a chance that the user will put the physical eraseblock very * soon, so it makes sense not to move it for some time, but wait; this is * especially important in case of "short term" physical eraseblocks. * * Physical eraseblocks stay protected only for limited time. But the "time" is * measured in erase cycles in this case. This is implemented with help of the * protection queue. Eraseblocks are put to the tail of this queue when they * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the * head of the queue on each erase operation (for any eraseblock). So the * length of the queue defines how may (global) erase cycles PEBs are protected. * * To put it differently, each physical eraseblock has 2 main states: free and * used. The former state corresponds to the @wl->free tree. The latter state * is split up on several sub-states: * o the WL movement is allowed (@wl->used tree); * o the WL movement is disallowed (@wl->erroneous) because the PEB is * erroneous - e.g., there was a read error; * o the WL movement is temporarily prohibited (@wl->pq queue); * o scrubbing is needed (@wl->scrub tree). * * Depending on the sub-state, wear-leveling entries of the used physical * eraseblocks may be kept in one of those structures. * * Note, in this implementation, we keep a small in-RAM object for each physical * eraseblock. This is surely not a scalable solution. But it appears to be good * enough for moderately large flashes and it is simple. In future, one may * re-work this sub-system and make it more scalable. * * At the moment this sub-system does not utilize the sequence number, which * was introduced relatively recently. But it would be wise to do this because * the sequence number of a logical eraseblock characterizes how old is it. For * example, when we move a PEB with low erase counter, and we need to pick the * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we * pick target PEB with an average EC if our PEB is not very "old". This is a * room for future re-works of the WL sub-system. */ #include <linux/slab.h> #include <linux/crc32.h> #include <linux/freezer.h> #include <linux/kthread.h> #include "ubi.h" /* Number of physical eraseblocks reserved for wear-leveling purposes */ #define WL_RESERVED_PEBS 1 /* * Maximum difference between two erase counters. If this threshold is * exceeded, the WL sub-system starts moving data from used physical * eraseblocks with low erase counter to free physical eraseblocks with high * erase counter. */ #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD /* * When a physical eraseblock is moved, the WL sub-system has to pick the target * physical eraseblock to move to. The simplest way would be just to pick the * one with the highest erase counter. But in certain workloads this could lead * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a * situation when the picked physical eraseblock is constantly erased after the * data is written to it. So, we have a constant which limits the highest erase * counter of the free physical eraseblock to pick. Namely, the WL sub-system * does not pick eraseblocks with erase counter greater than the lowest erase * counter plus %WL_FREE_MAX_DIFF. */ #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) /* * Maximum number of consecutive background thread failures which is enough to * switch to read-only mode. */ #define WL_MAX_FAILURES 32 /** * struct ubi_work - UBI work description data structure. * @list: a link in the list of pending works * @func: worker function * @e: physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * The @func pointer points to the worker function. If the @cancel argument is * not zero, the worker has to free the resources and exit immediately. The * worker has to return zero in case of success and a negative error code in * case of failure. */ struct ubi_work { struct list_head list; int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); /* The below fields are only relevant to erasure works */ struct ubi_wl_entry *e; int torture; }; #ifdef CONFIG_MTD_UBI_DEBUG static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_in_wl_tree(const struct ubi_device *ubi, struct ubi_wl_entry *e, struct rb_root *root); static int paranoid_check_in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e); #else #define paranoid_check_ec(ubi, pnum, ec) 0 #define paranoid_check_in_wl_tree(ubi, e, root) #define paranoid_check_in_pq(ubi, e) 0 #endif /** * wl_tree_add - add a wear-leveling entry to a WL RB-tree. * @e: the wear-leveling entry to add * @root: the root of the tree * * Note, we use (erase counter, physical eraseblock number) pairs as keys in * the @ubi->used and @ubi->free RB-trees. */ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) { struct rb_node **p, *parent = NULL; p = &root->rb_node; while (*p) { struct ubi_wl_entry *e1; parent = *p; e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); if (e->ec < e1->ec) p = &(*p)->rb_left; else if (e->ec > e1->ec) p = &(*p)->rb_right; else { ubi_assert(e->pnum != e1->pnum); if (e->pnum < e1->pnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; } } rb_link_node(&e->u.rb, parent, p); rb_insert_color(&e->u.rb, root); } /** * do_work - do one pending work. * @ubi: UBI device description object * * This function returns zero in case of success and a negative error code in * case of failure. */ static int do_work(struct ubi_device *ubi) { int err; struct ubi_work *wrk; cond_resched(); /* * @ubi->work_sem is used to synchronize with the workers. Workers take * it in read mode, so many of them may be doing works at a time. But * the queue flush code has to be sure the whole queue of works is * done, and it takes the mutex in write mode. */ down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works)) { spin_unlock(&ubi->wl_lock); up_read(&ubi->work_sem); return 0; } wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); spin_unlock(&ubi->wl_lock); /* * Call the worker function. Do not touch the work structure * after this call as it will have been freed or reused by that * time by the worker function. */ err = wrk->func(ubi, wrk, 0); if (err) ubi_err("work failed with error code %d", err); up_read(&ubi->work_sem); return err; } /** * produce_free_peb - produce a free physical eraseblock. * @ubi: UBI device description object * * This function tries to make a free PEB by means of synchronous execution of * pending works. This may be needed if, for example the background thread is * disabled. Returns zero in case of success and a negative error code in case * of failure. */ static int produce_free_peb(struct ubi_device *ubi) { int err; spin_lock(&ubi->wl_lock); while (!ubi->free.rb_node) { spin_unlock(&ubi->wl_lock); dbg_wl("do one work synchronously"); err = do_work(ubi); if (err) return err; spin_lock(&ubi->wl_lock); } spin_unlock(&ubi->wl_lock); return 0; } /** * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. * @e: the wear-leveling entry to check * @root: the root of the tree * * This function returns non-zero if @e is in the @root RB-tree and zero if it * is not. */ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) { struct rb_node *p; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e->pnum == e1->pnum) { ubi_assert(e == e1); return 1; } if (e->ec < e1->ec) p = p->rb_left; else if (e->ec > e1->ec) p = p->rb_right; else { ubi_assert(e->pnum != e1->pnum); if (e->pnum < e1->pnum) p = p->rb_left; else p = p->rb_right; } } return 0; } /** * prot_queue_add - add physical eraseblock to the protection queue. * @ubi: UBI device description object * @e: the physical eraseblock to add * * This function adds @e to the tail of the protection queue @ubi->pq, where * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be * temporarily protected from the wear-leveling worker. Note, @wl->lock has to * be locked. */ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) { int pq_tail = ubi->pq_head - 1; if (pq_tail < 0) pq_tail = UBI_PROT_QUEUE_LEN - 1; ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); list_add_tail(&e->u.list, &ubi->pq[pq_tail]); dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); } /** * find_wl_entry - find wear-leveling entry closest to certain erase counter. * @root: the RB-tree where to look for * @diff: maximum possible difference from the smallest erase counter * * This function looks for a wear leveling entry with erase counter closest to * min + @diff, where min is the smallest erase counter. */ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) { struct rb_node *p; struct ubi_wl_entry *e; int max; e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); max = e->ec + diff; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e1->ec >= max) p = p->rb_left; else { p = p->rb_right; e = e1; } } return e; } /** * ubi_wl_get_peb - get a physical eraseblock. * @ubi: UBI device description object * @dtype: type of data which will be stored in this physical eraseblock * * This function returns a physical eraseblock in case of success and a * negative error code in case of failure. Might sleep. */ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) { int err; struct ubi_wl_entry *e, *first, *last; ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || dtype == UBI_UNKNOWN); retry: spin_lock(&ubi->wl_lock); if (!ubi->free.rb_node) { if (ubi->works_count == 0) { ubi_assert(list_empty(&ubi->works)); ubi_err("no free eraseblocks"); spin_unlock(&ubi->wl_lock); return -ENOSPC; } spin_unlock(&ubi->wl_lock); err = produce_free_peb(ubi); if (err < 0) return err; goto retry; } switch (dtype) { case UBI_LONGTERM: /* * For long term data we pick a physical eraseblock with high * erase counter. But the highest erase counter we can pick is * bounded by the the lowest erase counter plus * %WL_FREE_MAX_DIFF. */ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); break; case UBI_UNKNOWN: /* * For unknown data we pick a physical eraseblock with medium * erase counter. But we by no means can pick a physical * eraseblock with erase counter greater or equivalent than the * lowest erase counter plus %WL_FREE_MAX_DIFF/2. */ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); if (last->ec - first->ec < WL_FREE_MAX_DIFF) e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); else e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); break; case UBI_SHORTTERM: /* * For short term data we pick a physical eraseblock with the * lowest erase counter as we expect it will be erased soon. */ e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); break; default: BUG(); } paranoid_check_in_wl_tree(ubi, e, &ubi->free); /* * Move the physical eraseblock to the protection queue where it will * be protected from being moved for some time. */ rb_erase(&e->u.rb, &ubi->free); dbg_wl("PEB %d EC %d", e->pnum, e->ec); prot_queue_add(ubi, e); spin_unlock(&ubi->wl_lock); err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, ubi->peb_size - ubi->vid_hdr_aloffset); if (err) { ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); return err; } return e->pnum; } /** * prot_queue_del - remove a physical eraseblock from the protection queue. * @ubi: UBI device description object * @pnum: the physical eraseblock to remove * * This function deletes PEB @pnum from the protection queue and returns zero * in case of success and %-ENODEV if the PEB was not found. */ static int prot_queue_del(struct ubi_device *ubi, int pnum) { struct ubi_wl_entry *e; e = ubi->lookuptbl[pnum]; if (!e) return -ENODEV; if (paranoid_check_in_pq(ubi, e)) return -ENODEV; list_del(&e->u.list); dbg_wl("deleted PEB %d from the protection queue", e->pnum); return 0; } /** * sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @e: the the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a negative error code in * case of failure. */ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) { int err; struct ubi_ec_hdr *ec_hdr; unsigned long long ec = e->ec; dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); err = paranoid_check_ec(ubi, e->pnum, e->ec); if (err) return -EINVAL; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_sync_erase(ubi, e->pnum, torture); if (err < 0) goto out_free; ec += err; if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ ubi_err("erase counter overflow at PEB %d, EC %llu", e->pnum, ec); err = -EINVAL; goto out_free; } dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); ec_hdr->ec = cpu_to_be64(ec); err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); if (err) goto out_free; e->ec = ec; spin_lock(&ubi->wl_lock); if (e->ec > ubi->max_ec) ubi->max_ec = e->ec; spin_unlock(&ubi->wl_lock); out_free: kfree(ec_hdr); return err; } /** * serve_prot_queue - check if it is time to stop protecting PEBs. * @ubi: UBI device description object * * This function is called after each erase operation and removes PEBs from the * tail of the protection queue. These PEBs have been protected for long enough * and should be moved to the used tree. */ static void serve_prot_queue(struct ubi_device *ubi) { struct ubi_wl_entry *e, *tmp; int count; /* * There may be several protected physical eraseblock to remove, * process them all. */ repeat: count = 0; spin_lock(&ubi->wl_lock); list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { dbg_wl("PEB %d EC %d protection over, move to used tree", e->pnum, e->ec); list_del(&e->u.list); wl_tree_add(e, &ubi->used); if (count++ > 32) { /* * Let's be nice and avoid holding the spinlock for * too long. */ spin_unlock(&ubi->wl_lock); cond_resched(); goto repeat; } } ubi->pq_head += 1; if (ubi->pq_head == UBI_PROT_QUEUE_LEN) ubi->pq_head = 0; ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); spin_unlock(&ubi->wl_lock); } /** * schedule_ubi_work - schedule a work. * @ubi: UBI device description object * @wrk: the work to schedule * * This function adds a work defined by @wrk to the tail of the pending works * list. */ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) { spin_lock(&ubi->wl_lock); list_add_tail(&wrk->list, &ubi->works); ubi_assert(ubi->works_count >= 0); ubi->works_count += 1; if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); } static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int cancel); /** * schedule_erase - schedule an erase work. * @ubi: UBI device description object * @e: the WL entry of the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a %-ENOMEM in case of * failure. */ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) { struct ubi_work *wl_wrk; dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", e->pnum, e->ec, torture); wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wl_wrk) return -ENOMEM; wl_wrk->func = &erase_worker; wl_wrk->e = e; wl_wrk->torture = torture; schedule_ubi_work(ubi, wl_wrk); return 0; } /** * wear_leveling_worker - wear-leveling worker function. * @ubi: UBI device description object * @wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function copies a more worn out physical eraseblock to a less worn out * one. Returns zero in case of success and a negative error code in case of * failure. */ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int vol_id = -1, uninitialized_var(lnum); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; kfree(wrk); if (cancel) return 0; vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; mutex_lock(&ubi->move_mutex); spin_lock(&ubi->wl_lock); ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_to_put); if (!ubi->free.rb_node || (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* * No free physical eraseblocks? Well, they must be waiting in * the queue to be erased. Cancel movement - it will be * triggered again when a free physical eraseblock appears. * * No used physical eraseblocks? They must be temporarily * protected from being moved. They will be moved to the * @ubi->used tree later and the wear-leveling will be * triggered again. */ dbg_wl("cancel WL, a list is empty: free %d, used %d", !ubi->free.rb_node, !ubi->used.rb_node); goto out_cancel; } if (!ubi->scrub.rb_node) { /* * Now pick the least worn-out used physical eraseblock and a * highly worn-out free physical eraseblock. If the erase * counters differ much enough, start wear-leveling. */ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { dbg_wl("no WL needed: min used EC %d, max free EC %d", e1->ec, e2->ec); goto out_cancel; } paranoid_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { /* Perform scrubbing */ scrubbing = 1; e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub); rb_erase(&e1->u.rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } paranoid_check_in_wl_tree(ubi, e2, &ubi->free); rb_erase(&e2->u.rb, &ubi->free); ubi->move_from = e1; ubi->move_to = e2; spin_unlock(&ubi->wl_lock); /* * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. * We so far do not know which logical eraseblock our physical * eraseblock (@e1) belongs to. We have to read the volume identifier * header first. * * Note, we are protected from this PEB being unmapped and erased. The * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB * which is being moved was unmapped. */ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { if (err == UBI_IO_FF) { /* * We are trying to move PEB without a VID header. UBI * always write VID headers shortly after the PEB was * given, so we have a situation when it has not yet * had a chance to write it, because it was preempted. * So add this PEB to the protection queue so far, * because presumably more data will be written there * (including the missing VID header), and then we'll * move it. */ dbg_wl("PEB %d has no VID header", e1->pnum); protect = 1; goto out_not_moved; } else if (err == UBI_IO_FF_BITFLIPS) { /* * The same situation as %UBI_IO_FF, but bit-flips were * detected. It is better to schedule this PEB for * scrubbing. */ dbg_wl("PEB %d has no VID header but has bit-flips", e1->pnum); scrubbing = 1; goto out_not_moved; } ubi_err("error %d while reading VID header from PEB %d", err, e1->pnum); goto out_error; } vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { if (err == MOVE_CANCEL_RACE) { /* * The LEB has not been moved because the volume is * being deleted or the PEB has been put meanwhile. We * should prevent this PEB from being selected for * wear-leveling movement again, so put it to the * protection queue. */ protect = 1; goto out_not_moved; } if (err == MOVE_RETRY) { scrubbing = 1; goto out_not_moved; } if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || err == MOVE_TARGET_RD_ERR) { /* * Target PEB had bit-flips or write error - torture it. */ torture = 1; goto out_not_moved; } if (err == MOVE_SOURCE_RD_ERR) { /* * An error happened while reading the source PEB. Do * not switch to R/O mode in this case, and give the * upper layers a possibility to recover from this, * e.g. by unmapping corresponding LEB. Instead, just * put this PEB to the @ubi->erroneous list to prevent * UBI from trying to move it over and over again. */ if (ubi->erroneous_peb_count > ubi->max_erroneous) { ubi_err("too many erroneous eraseblocks (%d)", ubi->erroneous_peb_count); goto out_error; } erroneous = 1; goto out_not_moved; } if (err < 0) goto out_error; ubi_assert(0); } /* The PEB has been successfully moved */ if (scrubbing) ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", e1->pnum, vol_id, lnum, e2->pnum); ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); if (!ubi->move_to_put) { wl_tree_add(e2, &ubi->used); e2 = NULL; } ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, e1, 0); if (err) { kmem_cache_free(ubi_wl_entry_slab, e1); if (e2) kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; } if (e2) { /* * Well, the target PEB was put meanwhile, schedule it for * erasure. */ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", e2->pnum, vol_id, lnum); err = schedule_erase(ubi, e2, 0); if (err) { kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; } } dbg_wl("done"); mutex_unlock(&ubi->move_mutex); return 0; /* * For some reasons the LEB was not moved, might be an error, might be * something else. @e1 was not changed, so return it back. @e2 might * have been changed, schedule it for erasure. */ out_not_moved: if (vol_id != -1) dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", e1->pnum, vol_id, lnum, e2->pnum, err); else dbg_wl("cancel moving PEB %d to PEB %d (%d)", e1->pnum, e2->pnum, err); spin_lock(&ubi->wl_lock); if (protect) prot_queue_add(ubi, e1); else if (erroneous) { wl_tree_add(e1, &ubi->erroneous); ubi->erroneous_peb_count += 1; } else if (scrubbing) wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); err = schedule_erase(ubi, e2, torture); if (err) { kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; } mutex_unlock(&ubi->move_mutex); return 0; out_error: if (vol_id != -1) ubi_err("error %d while moving PEB %d to PEB %d", err, e1->pnum, e2->pnum); else ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", err, e1->pnum, vol_id, lnum, e2->pnum); spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); kmem_cache_free(ubi_wl_entry_slab, e1); kmem_cache_free(ubi_wl_entry_slab, e2); out_ro: ubi_ro_mode(ubi); mutex_unlock(&ubi->move_mutex); ubi_assert(err != 0); return err < 0 ? err : -EIO; out_cancel: ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); mutex_unlock(&ubi->move_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return 0; } /** * ensure_wear_leveling - schedule wear-leveling if it is needed. * @ubi: UBI device description object * * This function checks if it is time to start wear-leveling and schedules it * if yes. This function returns zero in case of success and a negative error * code in case of failure. */ static int ensure_wear_leveling(struct ubi_device *ubi) { int err = 0; struct ubi_wl_entry *e1; struct ubi_wl_entry *e2; struct ubi_work *wrk; spin_lock(&ubi->wl_lock); if (ubi->wl_scheduled) /* Wear-leveling is already in the work queue */ goto out_unlock; /* * If the ubi->scrub tree is not empty, scrubbing is needed, and the * the WL worker has to be scheduled anyway. */ if (!ubi->scrub.rb_node) { if (!ubi->used.rb_node || !ubi->free.rb_node) /* No physical eraseblocks - no deal */ goto out_unlock; /* * We schedule wear-leveling only if the difference between the * lowest erase counter of used physical eraseblocks and a high * erase counter of free physical eraseblocks is greater than * %UBI_WL_THRESHOLD. */ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) goto out_unlock; dbg_wl("schedule wear-leveling"); } else dbg_wl("schedule scrubbing"); ubi->wl_scheduled = 1; spin_unlock(&ubi->wl_lock); wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wrk) { err = -ENOMEM; goto out_cancel; } wrk->func = &wear_leveling_worker; schedule_ubi_work(ubi, wrk); return err; out_cancel: spin_lock(&ubi->wl_lock); ubi->wl_scheduled = 0; out_unlock: spin_unlock(&ubi->wl_lock); return err; } /** * erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function erases a physical eraseblock and perform torture testing if * needed. It also takes care about marking the physical eraseblock bad if * needed. Returns zero in case of success and a negative error code in case of * failure. */ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int cancel) { struct ubi_wl_entry *e = wl_wrk->e; int pnum = e->pnum, err, need; if (cancel) { dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); kfree(wl_wrk); kmem_cache_free(ubi_wl_entry_slab, e); return 0; } dbg_wl("erase PEB %d EC %d", pnum, e->ec); err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { /* Fine, we've erased it successfully */ kfree(wl_wrk); spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->free); spin_unlock(&ubi->wl_lock); /* * One more erase operation has happened, take care about * protected physical eraseblocks. */ serve_prot_queue(ubi); /* And take care about wear-leveling */ err = ensure_wear_leveling(ubi); return err; } ubi_err("failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { int err1; /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, 0); if (err1) { err = err1; goto out_ro; } return err; } kmem_cache_free(ubi_wl_entry_slab, e); if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause * errors again and again. Well, lets switch to R/O mode. */ goto out_ro; /* It is %-EIO, the PEB went bad */ if (!ubi->bad_allowed) { ubi_err("bad physical eraseblock %d detected", pnum); goto out_ro; } spin_lock(&ubi->volumes_lock); need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; if (need > 0) { need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; ubi->avail_pebs -= need; ubi->rsvd_pebs += need; ubi->beb_rsvd_pebs += need; if (need > 0) ubi_msg("reserve more %d PEBs", need); } if (ubi->beb_rsvd_pebs == 0) { spin_unlock(&ubi->volumes_lock); ubi_err("no reserved physical eraseblocks"); goto out_ro; } spin_unlock(&ubi->volumes_lock); ubi_msg("mark PEB %d as bad", pnum); err = ubi_io_mark_bad(ubi, pnum); if (err) goto out_ro; spin_lock(&ubi->volumes_lock); ubi->beb_rsvd_pebs -= 1; ubi->bad_peb_count += 1; ubi->good_peb_count -= 1; ubi_calculate_reserved(ubi); if (ubi->beb_rsvd_pebs) ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); else ubi_warn("last PEB from the reserved pool was used"); spin_unlock(&ubi->volumes_lock); return err; out_ro: ubi_ro_mode(ubi); return err; } /** * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. * @ubi: UBI device description object * @pnum: physical eraseblock to return * @torture: if this physical eraseblock has to be tortured * * This function is called to return physical eraseblock @pnum to the pool of * free physical eraseblocks. The @torture flag has to be set if an I/O error * occurred to this @pnum and it has to be tested. This function returns zero * in case of success, and a negative error code in case of failure. */ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) { int err; struct ubi_wl_entry *e; dbg_wl("PEB %d", pnum); ubi_assert(pnum >= 0); ubi_assert(pnum < ubi->peb_count); retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to * be moved. It will be scheduled for erasure in the * wear-leveling worker. */ dbg_wl("PEB %d is being moved, wait", pnum); spin_unlock(&ubi->wl_lock); /* Wait for the WL worker by taking the @ubi->move_mutex */ mutex_lock(&ubi->move_mutex); mutex_unlock(&ubi->move_mutex); goto retry; } else if (e == ubi->move_to) { /* * User is putting the physical eraseblock which was selected * as the target the data is moved to. It may happen if the EBA * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' * but the WL sub-system has not put the PEB to the "used" tree * yet, but it is about to do this. So we just set a flag which * will tell the WL worker that the PEB is not needed anymore * and should be scheduled for erasure. */ dbg_wl("PEB %d is the target of data moving", pnum); ubi_assert(!ubi->move_to_put); ubi->move_to_put = 1; spin_unlock(&ubi->wl_lock); return 0; } else { if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(ubi, e, &ubi->used); rb_erase(&e->u.rb, &ubi->used); } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(ubi, e, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub); } else if (in_wl_tree(e, &ubi->erroneous)) { paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous); rb_erase(&e->u.rb, &ubi->erroneous); ubi->erroneous_peb_count -= 1; ubi_assert(ubi->erroneous_peb_count >= 0); /* Erroneous PEBs should be tortured */ torture = 1; } else { err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } } spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, e, torture); if (err) { spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->used); spin_unlock(&ubi->wl_lock); } return err; } /** * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. * @ubi: UBI device description object * @pnum: the physical eraseblock to schedule * * If a bit-flip in a physical eraseblock is detected, this physical eraseblock * needs scrubbing. This function schedules a physical eraseblock for * scrubbing which is done in background. This function returns zero in case of * success and a negative error code in case of failure. */ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) { struct ubi_wl_entry *e; dbg_msg("schedule PEB %d for scrubbing", pnum); retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || in_wl_tree(e, &ubi->erroneous)) { spin_unlock(&ubi->wl_lock); return 0; } if (e == ubi->move_to) { /* * This physical eraseblock was used to move data to. The data * was moved but the PEB was not yet inserted to the proper * tree. We should just wait a little and let the WL worker * proceed. */ spin_unlock(&ubi->wl_lock); dbg_wl("the PEB %d is not in proper tree, retry", pnum); yield(); goto retry; } if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(ubi, e, &ubi->used); rb_erase(&e->u.rb, &ubi->used); } else { int err; err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } wl_tree_add(e, &ubi->scrub); spin_unlock(&ubi->wl_lock); /* * Technically scrubbing is the same as wear-leveling, so it is done * by the WL worker. */ return ensure_wear_leveling(ubi); } /** * ubi_wl_flush - flush all pending works. * @ubi: UBI device description object * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_wl_flush(struct ubi_device *ubi) { int err; /* * Erase while the pending works queue is not empty, but not more than * the number of currently pending works. */ dbg_wl("flush (%d pending works)", ubi->works_count); while (ubi->works_count) { err = do_work(ubi); if (err) return err; } /* * Make sure all the works which have been done in parallel are * finished. */ down_write(&ubi->work_sem); up_write(&ubi->work_sem); /* * And in case last was the WL worker and it canceled the LEB * movement, flush again. */ while (ubi->works_count) { dbg_wl("flush more (%d pending works)", ubi->works_count); err = do_work(ubi); if (err) return err; } return 0; } /** * tree_destroy - destroy an RB-tree. * @root: the root of the tree to destroy */ static void tree_destroy(struct rb_root *root) { struct rb_node *rb; struct ubi_wl_entry *e; rb = root->rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { e = rb_entry(rb, struct ubi_wl_entry, u.rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &e->u.rb) rb->rb_left = NULL; else rb->rb_right = NULL; } kmem_cache_free(ubi_wl_entry_slab, e); } } } /** * ubi_thread - UBI background thread. * @u: the UBI device description object pointer */ int ubi_thread(void *u) { int failures = 0; struct ubi_device *ubi = u; ubi_msg("background thread \"%s\" started, PID %d", ubi->bgt_name, task_pid_nr(current)); set_freezable(); for (;;) { int err; if (kthread_should_stop()) break; if (try_to_freeze()) continue; spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works) || ubi->ro_mode || !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ubi->wl_lock); schedule(); continue; } spin_unlock(&ubi->wl_lock); err = do_work(ubi); if (err) { ubi_err("%s: work failed with error code %d", ubi->bgt_name, err); if (failures++ > WL_MAX_FAILURES) { /* * Too many failures, disable the thread and * switch to read-only mode. */ ubi_msg("%s: %d consecutive failures", ubi->bgt_name, WL_MAX_FAILURES); ubi_ro_mode(ubi); ubi->thread_enabled = 0; continue; } } else failures = 0; cond_resched(); } dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); return 0; } /** * cancel_pending - cancel all pending works. * @ubi: UBI device description object */ static void cancel_pending(struct ubi_device *ubi) { while (!list_empty(&ubi->works)) { struct ubi_work *wrk; wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); wrk->func(ubi, wrk, 1); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); } } /** * ubi_wl_init_scan - initialize the WL sub-system using scanning information. * @ubi: UBI device description object * @si: scanning information * * This function returns zero in case of success, and a negative error code in * case of failure. */ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) { int err, i; struct rb_node *rb1, *rb2; struct ubi_scan_volume *sv; struct ubi_scan_leb *seb, *tmp; struct ubi_wl_entry *e; ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); ubi->max_ec = si->max_ec; INIT_LIST_HEAD(&ubi->works); sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); err = -ENOMEM; ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); if (!ubi->lookuptbl) return err; for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) INIT_LIST_HEAD(&ubi->pq[i]); ubi->pq_head = 0; list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, 0)) { kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } } list_for_each_entry(seb, &si->free, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi_assert(e->ec >= 0); wl_tree_add(e, &ubi->free); ubi->lookuptbl[e->pnum] = e; } ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (!seb->scrub) { dbg_wl("add PEB %d EC %d to the used tree", e->pnum, e->ec); wl_tree_add(e, &ubi->used); } else { dbg_wl("add PEB %d EC %d to the scrub tree", e->pnum, e->ec); wl_tree_add(e, &ubi->scrub); } } } if (ubi->avail_pebs < WL_RESERVED_PEBS) { ubi_err("no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, WL_RESERVED_PEBS); if (ubi->corr_peb_count) ubi_err("%d PEBs are corrupted and not used", ubi->corr_peb_count); goto out_free; } ubi->avail_pebs -= WL_RESERVED_PEBS; ubi->rsvd_pebs += WL_RESERVED_PEBS; /* Schedule wear-leveling if needed */ err = ensure_wear_leveling(ubi); if (err) goto out_free; return 0; out_free: cancel_pending(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); return err; } /** * protection_queue_destroy - destroy the protection queue. * @ubi: UBI device description object */ static void protection_queue_destroy(struct ubi_device *ubi) { int i; struct ubi_wl_entry *e, *tmp; for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { list_del(&e->u.list); kmem_cache_free(ubi_wl_entry_slab, e); } } } /** * ubi_wl_close - close the wear-leveling sub-system. * @ubi: UBI device description object */ void ubi_wl_close(struct ubi_device *ubi) { dbg_wl("close the WL sub-system"); cancel_pending(ubi); protection_queue_destroy(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->erroneous); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); } #ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_check_ec - make sure that the erase counter of a PEB is correct. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @ec: the erase counter to check * * This function returns zero if the erase counter of physical eraseblock @pnum * is equivalent to @ec, and a negative error code if not or if an error * occurred. */ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) { int err; long long read_ec; struct ubi_ec_hdr *ec_hdr; if (!ubi->dbg->chk_gen) return 0; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { /* The header does not have to exist */ err = 0; goto out_free; } read_ec = be64_to_cpu(ec_hdr->ec); if (ec != read_ec) { ubi_err("paranoid check failed for PEB %d", pnum); ubi_err("read EC is %lld, should be %d", read_ec, ec); ubi_dbg_dump_stack(); err = 1; } else err = 0; out_free: kfree(ec_hdr); return err; } /** * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. * @ubi: UBI device description object * @e: the wear-leveling entry to check * @root: the root of the tree * * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it * is not. */ static int paranoid_check_in_wl_tree(const struct ubi_device *ubi, struct ubi_wl_entry *e, struct rb_root *root) { if (!ubi->dbg->chk_gen) return 0; if (in_wl_tree(e, root)) return 0; ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", e->pnum, e->ec, root); ubi_dbg_dump_stack(); return -EINVAL; } /** * paranoid_check_in_pq - check if wear-leveling entry is in the protection * queue. * @ubi: UBI device description object * @e: the wear-leveling entry to check * * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. */ static int paranoid_check_in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) { struct ubi_wl_entry *p; int i; if (!ubi->dbg->chk_gen) return 0; for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) list_for_each_entry(p, &ubi->pq[i], u.list) if (p == e) return 0; ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue", e->pnum, e->ec); ubi_dbg_dump_stack(); return -EINVAL; } #endif /* CONFIG_MTD_UBI_DEBUG */
gpl-2.0
pocketbook-free/kernel_622
drivers/ssb/sdio.c
4172
16198
/* * Sonics Silicon Backplane * SDIO-Hostbus related functions * * Copyright 2009 Albert Herranz <albert_herranz@yahoo.es> * * Based on drivers/ssb/pcmcia.c * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * Copyright 2007-2008 Michael Buesch <mb@bu3sch.de> * * Licensed under the GNU/GPL. See COPYING for details. * */ #include <linux/ssb/ssb.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/etherdevice.h> #include <linux/mmc/sdio_func.h> #include "ssb_private.h" /* Define the following to 1 to enable a printk on each coreswitch. */ #define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0 /* Hardware invariants CIS tuples */ #define SSB_SDIO_CIS 0x80 #define SSB_SDIO_CIS_SROMREV 0x00 #define SSB_SDIO_CIS_ID 0x01 #define SSB_SDIO_CIS_BOARDREV 0x02 #define SSB_SDIO_CIS_PA 0x03 #define SSB_SDIO_CIS_PA_PA0B0_LO 0 #define SSB_SDIO_CIS_PA_PA0B0_HI 1 #define SSB_SDIO_CIS_PA_PA0B1_LO 2 #define SSB_SDIO_CIS_PA_PA0B1_HI 3 #define SSB_SDIO_CIS_PA_PA0B2_LO 4 #define SSB_SDIO_CIS_PA_PA0B2_HI 5 #define SSB_SDIO_CIS_PA_ITSSI 6 #define SSB_SDIO_CIS_PA_MAXPOW 7 #define SSB_SDIO_CIS_OEMNAME 0x04 #define SSB_SDIO_CIS_CCODE 0x05 #define SSB_SDIO_CIS_ANTENNA 0x06 #define SSB_SDIO_CIS_ANTGAIN 0x07 #define SSB_SDIO_CIS_BFLAGS 0x08 #define SSB_SDIO_CIS_LEDS 0x09 #define CISTPL_FUNCE_LAN_NODE_ID 0x04 /* same as in PCMCIA */ /* * Function 1 miscellaneous registers. * * Definitions match src/include/sbsdio.h from the * Android Open Source Project * http://android.git.kernel.org/?p=platform/system/wlan/broadcom.git * */ #define SBSDIO_FUNC1_SBADDRLOW 0x1000a /* SB Address window Low (b15) */ #define SBSDIO_FUNC1_SBADDRMID 0x1000b /* SB Address window Mid (b23-b16) */ #define SBSDIO_FUNC1_SBADDRHIGH 0x1000c /* SB Address window High (b24-b31) */ /* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ #define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid address bits in SBADDRLOW */ #define SBSDIO_SBADDRMID_MASK 0xff /* Valid address bits in SBADDRMID */ #define SBSDIO_SBADDRHIGH_MASK 0xff /* Valid address bits in SBADDRHIGH */ #define SBSDIO_SB_OFT_ADDR_MASK 0x7FFF /* sb offset addr is <= 15 bits, 32k */ /* REVISIT: this flag doesn't seem to matter */ #define SBSDIO_SB_ACCESS_2_4B_FLAG 0x8000 /* forces 32-bit SB access */ /* * Address map within the SDIO function address space (128K). * * Start End Description * ------- ------- ------------------------------------------ * 0x00000 0x0ffff selected backplane address window (64K) * 0x10000 0x1ffff backplane control registers (max 64K) * * The current address window is configured by writing to registers * SBADDRLOW, SBADDRMID and SBADDRHIGH. * * In order to access the contents of a 32-bit Silicon Backplane address * the backplane address window must be first loaded with the highest * 16 bits of the target address. Then, an access must be done to the * SDIO function address space using the lower 15 bits of the address. * Bit 15 of the address must be set when doing 32 bit accesses. * * 10987654321098765432109876543210 * WWWWWWWWWWWWWWWWW SB Address Window * OOOOOOOOOOOOOOOO Offset within SB Address Window * a 32-bit access flag */ /* * SSB I/O via SDIO. * * NOTE: SDIO address @addr is 17 bits long (SDIO address space is 128K). */ static inline struct device *ssb_sdio_dev(struct ssb_bus *bus) { return &bus->host_sdio->dev; } /* host claimed */ static int ssb_sdio_writeb(struct ssb_bus *bus, unsigned int addr, u8 val) { int error = 0; sdio_writeb(bus->host_sdio, val, addr, &error); if (unlikely(error)) { dev_dbg(ssb_sdio_dev(bus), "%08X <- %02x, error %d\n", addr, val, error); } return error; } #if 0 static u8 ssb_sdio_readb(struct ssb_bus *bus, unsigned int addr) { u8 val; int error = 0; val = sdio_readb(bus->host_sdio, addr, &error); if (unlikely(error)) { dev_dbg(ssb_sdio_dev(bus), "%08X -> %02x, error %d\n", addr, val, error); } return val; } #endif /* host claimed */ static int ssb_sdio_set_sbaddr_window(struct ssb_bus *bus, u32 address) { int error; error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRLOW, (address >> 8) & SBSDIO_SBADDRLOW_MASK); if (error) goto out; error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRMID, (address >> 16) & SBSDIO_SBADDRMID_MASK); if (error) goto out; error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRHIGH, (address >> 24) & SBSDIO_SBADDRHIGH_MASK); if (error) goto out; bus->sdio_sbaddr = address; out: if (error) { dev_dbg(ssb_sdio_dev(bus), "failed to set address window" " to 0x%08x, error %d\n", address, error); } return error; } /* for enumeration use only */ u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset) { u32 val; int error; sdio_claim_host(bus->host_sdio); val = sdio_readl(bus->host_sdio, offset, &error); sdio_release_host(bus->host_sdio); if (unlikely(error)) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } return val; } /* for enumeration use only */ int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx) { u32 sbaddr; int error; sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; sdio_claim_host(bus->host_sdio); error = ssb_sdio_set_sbaddr_window(bus, sbaddr); sdio_release_host(bus->host_sdio); if (error) { dev_err(ssb_sdio_dev(bus), "failed to switch to core %u," " error %d\n", coreidx, error); goto out; } out: return error; } /* host must be already claimed */ int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev) { u8 coreidx = dev->core_index; u32 sbaddr; int error = 0; sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; if (unlikely(bus->sdio_sbaddr != sbaddr)) { #if SSB_VERBOSE_SDIOCORESWITCH_DEBUG dev_info(ssb_sdio_dev(bus), "switching to %s core, index %d\n", ssb_core_name(dev->id.coreid), coreidx); #endif error = ssb_sdio_set_sbaddr_window(bus, sbaddr); if (error) { dev_dbg(ssb_sdio_dev(bus), "failed to switch to" " core %u, error %d\n", coreidx, error); goto out; } bus->mapped_device = dev; } out: return error; } static u8 ssb_sdio_read8(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; u8 val = 0xff; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; val = sdio_readb(bus->host_sdio, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %02x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); return val; } static u16 ssb_sdio_read16(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; u16 val = 0xffff; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; val = sdio_readw(bus->host_sdio, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %04x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); return val; } static u32 ssb_sdio_read32(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; u32 val = 0xffffffff; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ val = sdio_readl(bus->host_sdio, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); return val; } #ifdef CONFIG_SSB_BLOCKIO static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer, size_t count, u16 offset, u8 reg_width) { size_t saved_count = count; struct ssb_bus *bus = dev->bus; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) { error = -EIO; memset(buffer, 0xff, count); goto err_out; } offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; switch (reg_width) { case sizeof(u8): { error = sdio_readsb(bus->host_sdio, buffer, offset, count); break; } case sizeof(u16): { SSB_WARN_ON(count & 1); error = sdio_readsb(bus->host_sdio, buffer, offset, count); break; } case sizeof(u32): { SSB_WARN_ON(count & 3); offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ error = sdio_readsb(bus->host_sdio, buffer, offset, count); break; } default: SSB_WARN_ON(1); } if (!error) goto out; err_out: dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n", bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error); out: sdio_release_host(bus->host_sdio); } #endif /* CONFIG_SSB_BLOCKIO */ static void ssb_sdio_write8(struct ssb_device *dev, u16 offset, u8 val) { struct ssb_bus *bus = dev->bus; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; sdio_writeb(bus->host_sdio, val, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %02x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); } static void ssb_sdio_write16(struct ssb_device *dev, u16 offset, u16 val) { struct ssb_bus *bus = dev->bus; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; sdio_writew(bus->host_sdio, val, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %04x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } out: sdio_release_host(bus->host_sdio); } static void ssb_sdio_write32(struct ssb_device *dev, u16 offset, u32 val) { struct ssb_bus *bus = dev->bus; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) goto out; offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ sdio_writel(bus->host_sdio, val, offset, &error); if (error) { dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %08x, error %d\n", bus->sdio_sbaddr >> 16, offset, val, error); } if (bus->quirks & SSB_QUIRK_SDIO_READ_AFTER_WRITE32) sdio_readl(bus->host_sdio, 0, &error); out: sdio_release_host(bus->host_sdio); } #ifdef CONFIG_SSB_BLOCKIO static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer, size_t count, u16 offset, u8 reg_width) { size_t saved_count = count; struct ssb_bus *bus = dev->bus; int error = 0; sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) { error = -EIO; memset((void *)buffer, 0xff, count); goto err_out; } offset |= bus->sdio_sbaddr & 0xffff; offset &= SBSDIO_SB_OFT_ADDR_MASK; switch (reg_width) { case sizeof(u8): error = sdio_writesb(bus->host_sdio, offset, (void *)buffer, count); break; case sizeof(u16): SSB_WARN_ON(count & 1); error = sdio_writesb(bus->host_sdio, offset, (void *)buffer, count); break; case sizeof(u32): SSB_WARN_ON(count & 3); offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ error = sdio_writesb(bus->host_sdio, offset, (void *)buffer, count); break; default: SSB_WARN_ON(1); } if (!error) goto out; err_out: dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n", bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error); out: sdio_release_host(bus->host_sdio); } #endif /* CONFIG_SSB_BLOCKIO */ /* Not "static", as it's used in main.c */ const struct ssb_bus_ops ssb_sdio_ops = { .read8 = ssb_sdio_read8, .read16 = ssb_sdio_read16, .read32 = ssb_sdio_read32, .write8 = ssb_sdio_write8, .write16 = ssb_sdio_write16, .write32 = ssb_sdio_write32, #ifdef CONFIG_SSB_BLOCKIO .block_read = ssb_sdio_block_read, .block_write = ssb_sdio_block_write, #endif }; #define GOTO_ERROR_ON(condition, description) do { \ if (unlikely(condition)) { \ error_description = description; \ goto error; \ } \ } while (0) int ssb_sdio_get_invariants(struct ssb_bus *bus, struct ssb_init_invariants *iv) { struct ssb_sprom *sprom = &iv->sprom; struct ssb_boardinfo *bi = &iv->boardinfo; const char *error_description = "none"; struct sdio_func_tuple *tuple; void *mac; memset(sprom, 0xFF, sizeof(*sprom)); sprom->boardflags_lo = 0; sprom->boardflags_hi = 0; tuple = bus->host_sdio->tuples; while (tuple) { switch (tuple->code) { case 0x22: /* extended function */ switch (tuple->data[0]) { case CISTPL_FUNCE_LAN_NODE_ID: GOTO_ERROR_ON((tuple->size != 7) && (tuple->data[1] != 6), "mac tpl size"); /* fetch the MAC address. */ mac = tuple->data + 2; memcpy(sprom->il0mac, mac, ETH_ALEN); memcpy(sprom->et1mac, mac, ETH_ALEN); break; default: break; } break; case 0x80: /* vendor specific tuple */ switch (tuple->data[0]) { case SSB_SDIO_CIS_SROMREV: GOTO_ERROR_ON(tuple->size != 2, "sromrev tpl size"); sprom->revision = tuple->data[1]; break; case SSB_SDIO_CIS_ID: GOTO_ERROR_ON((tuple->size != 5) && (tuple->size != 7), "id tpl size"); bi->vendor = tuple->data[1] | (tuple->data[2]<<8); break; case SSB_SDIO_CIS_BOARDREV: GOTO_ERROR_ON(tuple->size != 2, "boardrev tpl size"); sprom->board_rev = tuple->data[1]; break; case SSB_SDIO_CIS_PA: GOTO_ERROR_ON((tuple->size != 9) && (tuple->size != 10), "pa tpl size"); sprom->pa0b0 = tuple->data[1] | ((u16)tuple->data[2] << 8); sprom->pa0b1 = tuple->data[3] | ((u16)tuple->data[4] << 8); sprom->pa0b2 = tuple->data[5] | ((u16)tuple->data[6] << 8); sprom->itssi_a = tuple->data[7]; sprom->itssi_bg = tuple->data[7]; sprom->maxpwr_a = tuple->data[8]; sprom->maxpwr_bg = tuple->data[8]; break; case SSB_SDIO_CIS_OEMNAME: /* Not present */ break; case SSB_SDIO_CIS_CCODE: GOTO_ERROR_ON(tuple->size != 2, "ccode tpl size"); sprom->country_code = tuple->data[1]; break; case SSB_SDIO_CIS_ANTENNA: GOTO_ERROR_ON(tuple->size != 2, "ant tpl size"); sprom->ant_available_a = tuple->data[1]; sprom->ant_available_bg = tuple->data[1]; break; case SSB_SDIO_CIS_ANTGAIN: GOTO_ERROR_ON(tuple->size != 2, "antg tpl size"); sprom->antenna_gain.ghz24.a0 = tuple->data[1]; sprom->antenna_gain.ghz24.a1 = tuple->data[1]; sprom->antenna_gain.ghz24.a2 = tuple->data[1]; sprom->antenna_gain.ghz24.a3 = tuple->data[1]; sprom->antenna_gain.ghz5.a0 = tuple->data[1]; sprom->antenna_gain.ghz5.a1 = tuple->data[1]; sprom->antenna_gain.ghz5.a2 = tuple->data[1]; sprom->antenna_gain.ghz5.a3 = tuple->data[1]; break; case SSB_SDIO_CIS_BFLAGS: GOTO_ERROR_ON((tuple->size != 3) && (tuple->size != 5), "bfl tpl size"); sprom->boardflags_lo = tuple->data[1] | ((u16)tuple->data[2] << 8); break; case SSB_SDIO_CIS_LEDS: GOTO_ERROR_ON(tuple->size != 5, "leds tpl size"); sprom->gpio0 = tuple->data[1]; sprom->gpio1 = tuple->data[2]; sprom->gpio2 = tuple->data[3]; sprom->gpio3 = tuple->data[4]; break; default: break; } break; default: break; } tuple = tuple->next; } return 0; error: dev_err(ssb_sdio_dev(bus), "failed to fetch device invariants: %s\n", error_description); return -ENODEV; } void ssb_sdio_exit(struct ssb_bus *bus) { if (bus->bustype != SSB_BUSTYPE_SDIO) return; /* Nothing to do here. */ } int ssb_sdio_init(struct ssb_bus *bus) { if (bus->bustype != SSB_BUSTYPE_SDIO) return 0; bus->sdio_sbaddr = ~0; return 0; }
gpl-2.0
sensarliar/zfcs_linux
net/wireless/ibss.c
4428
12187
/* * Some IBSS support code for cfg80211. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/export.h> #include <net/cfg80211.h> #include "wext-compat.h" #include "nl80211.h" void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return; if (!wdev->ssid_len) return; bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); if (WARN_ON(!bss)) return; if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); } cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); cfg80211_upload_connect_keys(wdev); nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); #endif } void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(!wdev->ssid_len); ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_IBSS_JOINED; memcpy(ev->cr.bssid, bssid, ETH_ALEN); spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_ibss_joined); int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params, struct cfg80211_cached_keys *connkeys) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (wdev->ssid_len) return -EALREADY; if (!params->basic_rates) { /* * If no rates were explicitly configured, * use the mandatory rate set for 11b or * 11a for maximum compatibility. */ struct ieee80211_supported_band *sband = rdev->wiphy.bands[params->channel->band]; int j; u32 flag = params->channel->band == IEEE80211_BAND_5GHZ ? IEEE80211_RATE_MANDATORY_A : IEEE80211_RATE_MANDATORY_B; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].flags & flag) params->basic_rates |= BIT(j); } } if (WARN_ON(wdev->connect_keys)) kfree(wdev->connect_keys); wdev->connect_keys = connkeys; #ifdef CONFIG_CFG80211_WEXT wdev->wext.ibss.channel = params->channel; #endif err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); if (err) { wdev->connect_keys = NULL; return err; } memcpy(wdev->ssid, params->ssid, params->ssid_len); wdev->ssid_len = params->ssid_len; return 0; } int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params, struct cfg80211_cached_keys *connkeys) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = __cfg80211_join_ibss(rdev, dev, params, connkeys); wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); return err; } static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); int i; ASSERT_WDEV_LOCK(wdev); kfree(wdev->connect_keys); wdev->connect_keys = NULL; /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); } wdev->current_bss = NULL; wdev->ssid_len = 0; #ifdef CONFIG_CFG80211_WEXT if (!nowext) wdev->wext.ibss.ssid_len = 0; #endif } void cfg80211_clear_ibss(struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_clear_ibss(dev, nowext); wdev_unlock(wdev); } int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->ssid_len) return -ENOLINK; err = rdev->ops->leave_ibss(&rdev->wiphy, dev); if (err) return err; __cfg80211_clear_ibss(dev, nowext); return 0; } int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, bool nowext) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_leave_ibss(rdev, dev, nowext); wdev_unlock(wdev); return err; } #ifdef CONFIG_CFG80211_WEXT int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; enum ieee80211_band band; int i, err; ASSERT_WDEV_LOCK(wdev); if (!wdev->wext.ibss.beacon_interval) wdev->wext.ibss.beacon_interval = 100; /* try to find an IBSS channel if none requested ... */ if (!wdev->wext.ibss.channel) { for (band = 0; band < IEEE80211_NUM_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_NO_IBSS) continue; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; wdev->wext.ibss.channel = chan; break; } if (wdev->wext.ibss.channel) break; } if (!wdev->wext.ibss.channel) return -EINVAL; } /* don't join -- SSID is not there */ if (!wdev->wext.ibss.ssid_len) return 0; if (!netif_running(wdev->netdev)) return 0; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; } wdev->wext.ibss.privacy = wdev->wext.default_key != -1; if (wdev->wext.keys) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 6; i++) ck->params[i].key = ck->data[i]; } err = __cfg80211_join_ibss(rdev, wdev->netdev, &wdev->wext.ibss, ck); if (err) kfree(ck); return err; } int cfg80211_ibss_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_NO_IBSS || chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } if (wdev->wext.ibss.channel == chan) return 0; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; if (chan) { wdev->wext.ibss.channel = chan; wdev->wext.ibss.channel_fixed = true; } else { /* cfg80211_ibss_wext_join will pick one if needed */ wdev->wext.ibss.channel_fixed = false; } mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); return err; } int cfg80211_ibss_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; wdev_lock(wdev); if (wdev->current_bss) chan = wdev->current_bss->pub.channel; else if (wdev->wext.ibss.channel) chan = wdev->wext.ibss.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_ibss_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); size_t len = data->length; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; wdev->wext.ibss.ssid = wdev->ssid; memcpy(wdev->wext.ibss.ssid, ssid, len); wdev->wext.ibss.ssid_len = len; mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); return err; } int cfg80211_ibss_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->ssid_len) { data->flags = 1; data->length = wdev->ssid_len; memcpy(ssid, wdev->ssid, data->length); } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) { data->flags = 1; data->length = wdev->wext.ibss.ssid_len; memcpy(ssid, wdev->wext.ibss.ssid, data->length); } wdev_unlock(wdev); return 0; } int cfg80211_ibss_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; /* both automatic */ if (!bssid && !wdev->wext.ibss.bssid) return 0; /* fixed already - and no change */ if (wdev->wext.ibss.bssid && bssid && compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) return 0; wdev_lock(wdev); err = 0; if (wdev->ssid_len) err = __cfg80211_leave_ibss(rdev, dev, true); wdev_unlock(wdev); if (err) return err; if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.ibss.bssid = wdev->wext.bssid; } else wdev->wext.ibss.bssid = NULL; mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); mutex_unlock(&rdev->devlist_mtx); return err; } int cfg80211_ibss_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for ibss! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->current_bss) memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); else if (wdev->wext.ibss.bssid) memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); else memset(ap_addr->sa_data, 0, ETH_ALEN); wdev_unlock(wdev); return 0; } #endif
gpl-2.0
Validus-Lollipop/android_kernel_motorola_msm8960dt-common
arch/unicore32/kernel/traps.c
4684
7709
/* * linux/arch/unicore32/kernel/traps.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * 'traps.c' handles hardware exceptions after we have saved some state. * Mostly a debugging aid, but will probably kill the offending process. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/personality.h> #include <linux/kallsyms.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/init.h> #include <linux/atomic.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/traps.h> #include "setup.h" static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { #ifdef CONFIG_KALLSYMS printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif } /* * Stack pointers should always be within the kernels view of * physical memory. If it is not there, then we can't dump * out any information relating to the stack. */ static int verify_stack(unsigned long sp) { if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; } /* * Dump out the contents of some memory nicely... */ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, unsigned long top) { unsigned long first; mm_segment_t fs; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { unsigned long p; char str[sizeof(" 12345678") * 8 + 1]; memset(str, ' ', sizeof(str)); str[sizeof(str) - 1] = '\0'; for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; if (__get_user(val, (unsigned long *)p) == 0) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); } } printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str); } set_fs(fs); } static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int width = 8; mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); for (i = -4; i < 1; i++) { unsigned int val, bad; bad = __get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", width, val); else { p += sprintf(p, "bad PC value"); break; } } printk(KERN_DEFAULT "%sCode: %s\n", lvl, str); set_fs(fs); } static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { unsigned int fp, mode; int ok = 1; printk(KERN_DEFAULT "Backtrace: "); if (!tsk) tsk = current; if (regs) { fp = regs->UCreg_fp; mode = processor_mode(regs); } else if (tsk != current) { fp = thread_saved_fp(tsk); mode = 0x10; } else { asm("mov %0, fp" : "=r" (fp) : : "cc"); mode = 0x10; } if (!fp) { printk("no frame pointer"); ok = 0; } else if (verify_stack(fp)) { printk("invalid frame pointer 0x%08x", fp); ok = 0; } else if (fp < (unsigned long)end_of_stack(tsk)) printk("frame pointer underflow"); printk("\n"); if (ok) c_backtrace(fp, mode); } void dump_stack(void) { dump_backtrace(NULL, NULL); } EXPORT_SYMBOL(dump_stack); void show_stack(struct task_struct *tsk, unsigned long *sp) { dump_backtrace(NULL, tsk); barrier(); } static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; int ret; printk(KERN_EMERG "Internal error: %s: %x [#%d]\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on UniCore */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \ SIGSEGV); if (ret == NOTIFY_STOP) return ret; print_modules(); __show_regs(regs); printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } return ret; } DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, thread, regs); bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); } void uc32_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; current->thread.trap_no = trap; force_sig_info(info->si_signo, info, current); } else die(str, regs, err); } /* * bad_mode handles the impossible case in the vectors. If you see one of * these, then it's extremely serious, and could mean you have buggy hardware. * It never returns, and never tries to sync. We hope that we can at least * dump out some state information... */ asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason) { console_verbose(); printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason); die("Oops - bad mode", regs, 0); local_irq_disable(); panic("bad mode"); } void __pte_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val); } void __pmd_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val); } void __pgd_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val); } asmlinkage void __div0(void) { printk(KERN_DEFAULT "Division by zero in kernel.\n"); dump_stack(); } EXPORT_SYMBOL(__div0); void abort(void) { BUG(); /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } EXPORT_SYMBOL(abort); void __init trap_init(void) { return; } void __init early_trap_init(void) { unsigned long vectors = VECTORS_BASE; /* * Copy the vectors, stubs (in entry-unicore.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); early_signal_init(); flush_icache_range(vectors, vectors + PAGE_SIZE); }
gpl-2.0
weevergh/android_kernel_lenovo_kiton
drivers/staging/vt6656/card.c
4940
30278
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: card.c * Purpose: Provide functions to setup NIC operation mode * Functions: * s_vSafeResetTx - Rest Tx * CARDvSetRSPINF - Set RSPINF * vUpdateIFS - Update slotTime,SIFS,DIFS, and EIFS * CARDvUpdateBasicTopRate - Update BasicTopRate * CARDbAddBasicRate - Add to BasicRateSet * CARDbSetBasicRate - Set Basic Tx Rate * CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet * CARDvSetLoopbackMode - Set Loopback mode * CARDbSoftwareReset - Sortware reset NIC * CARDqGetTSFOffset - Caculate TSFOffset * CARDbGetCurrentTSF - Read Current NIC TSF counter * CARDqGetNextTBTT - Caculate Next Beacon TSF counter * CARDvSetFirstNextTBTT - Set NIC Beacon time * CARDvUpdateNextTBTT - Sync. NIC Beacon time * CARDbRadioPowerOff - Turn Off NIC Radio Power * CARDbRadioPowerOn - Turn On NIC Radio Power * CARDbSetWEPMode - Set NIC Wep mode * CARDbSetTxPower - Set NIC tx power * * Revision History: * 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec. * 08-26-2003 Kyle Hsu: Modify the defination type of dwIoBase. * 09-01-2003 Bryan YC Fan: Add vUpdateIFS(). * */ #include "tmacro.h" #include "card.h" #include "baseband.h" #include "mac.h" #include "desc.h" #include "rf.h" #include "power.h" #include "key.h" #include "rc4.h" #include "country.h" #include "datarate.h" #include "rndis.h" #include "control.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Definitions -------------------------*/ #define CB_TXPOWER_LEVEL 6 /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ //const WORD cwRXBCNTSFOff[MAX_RATE] = //{17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3}; const WORD cwRXBCNTSFOff[MAX_RATE] = {192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3}; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /* * Description: Set NIC media channel * * Parameters: * In: * pDevice - The adapter to be set * uConnectionChannel - Channel to be set * Out: * none */ void CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38 if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL)) uConnectionChannel = (CB_MAX_CHANNEL_24G+1); } else { if ((uConnectionChannel > CB_MAX_CHANNEL_24G) || (uConnectionChannel == 0)) // 1 ~ 14 uConnectionChannel = 1; } // clear NAV MACvRegBitsOn(pDevice, MAC_REG_MACCR, MACCR_CLRNAV); // Set Channel[7] = 0 to tell H/W channel is changing now. MACvRegBitsOff(pDevice, MAC_REG_CHANNEL, 0x80); //if (pMgmt->uCurrChannel == uConnectionChannel) // return bResult; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SELECT_CHANNLE, (WORD) uConnectionChannel, 0, 0, NULL ); //{{ RobertYu: 20041202 //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput if (pDevice->byBBType == BB_TYPE_11A) { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyOFDMAPwrTbl[uConnectionChannel-15], RATE_54M); } else if (pDevice->byBBType == BB_TYPE_11G) { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyOFDMPwrTbl[uConnectionChannel-1], RATE_54M); } else { pDevice->byCurPwr = 0xFF; RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M); } ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(BYTE)(uConnectionChannel|0x80)); } /* * Description: Get CCK mode basic rate * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ static WORD swGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ui = (unsigned int)wRateIdx; while (ui > RATE_1M) { if (pDevice->wBasicRate & ((WORD)1 << ui)) { return (WORD)ui; } ui --; } return (WORD)RATE_1M; } /* * Description: Get OFDM mode basic rate * * Parameters: * In: * pDevice - The adapter to be set * wRateIdx - Receiving data rate * Out: * none * * Return Value: response Control frame rate * */ static WORD swGetOFDMControlRate(void *pDeviceHandler, WORD wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ui = (unsigned int)wRateIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate); if (!CARDbIsOFDMinBasicRate(pDevice)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); if (wRateIdx > RATE_24M) wRateIdx = RATE_24M; return wRateIdx; } while (ui > RATE_11M) { if (pDevice->wBasicRate & ((WORD)1 << ui)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate : %d\n", ui); return (WORD)ui; } ui --; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"swGetOFDMControlRate: 6M\n"); return (WORD)RATE_24M; } /* * Description: Caculate TxRate and RsvTime fields for RSPINF in OFDM mode. * * Parameters: * In: * wRate - Tx Rate * byPktType - Tx Packet type * Out: * pbyTxRate - pointer to RSPINF TxRate field * pbyRsvTime - pointer to RSPINF RsvTime field * * Return Value: none * */ void CARDvCaculateOFDMRParameter ( WORD wRate, BYTE byBBType, PBYTE pbyTxRate, PBYTE pbyRsvTime ) { switch (wRate) { case RATE_6M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9B; *pbyRsvTime = 24; } else { *pbyTxRate = 0x8B; *pbyRsvTime = 30; } break; case RATE_9M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9F; *pbyRsvTime = 16; } else { *pbyTxRate = 0x8F; *pbyRsvTime = 22; } break; case RATE_12M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9A; *pbyRsvTime = 12; } else { *pbyTxRate = 0x8A; *pbyRsvTime = 18; } break; case RATE_18M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9E; *pbyRsvTime = 8; } else { *pbyTxRate = 0x8E; *pbyRsvTime = 14; } break; case RATE_36M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9D; *pbyRsvTime = 4; } else { *pbyTxRate = 0x8D; *pbyRsvTime = 10; } break; case RATE_48M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x98; *pbyRsvTime = 4; } else { *pbyTxRate = 0x88; *pbyRsvTime = 10; } break; case RATE_54M : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x9C; *pbyRsvTime = 4; } else { *pbyTxRate = 0x8C; *pbyRsvTime = 10; } break; case RATE_24M : default : if (byBBType == BB_TYPE_11A) {//5GHZ *pbyTxRate = 0x99; *pbyRsvTime = 8; } else { *pbyTxRate = 0x89; *pbyRsvTime = 14; } break; } } /* * Description: Set RSPINF * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType) { PSDevice pDevice = (PSDevice) pDeviceHandler; BYTE abyServ[4] = {0,0,0,0}; // For CCK BYTE abySignal[4] = {0,0,0,0}; WORD awLen[4] = {0,0,0,0}; BYTE abyTxRate[9] = {0,0,0,0,0,0,0,0,0}; // For OFDM BYTE abyRsvTime[9] = {0,0,0,0,0,0,0,0,0}; BYTE abyData[34]; int i; //RSPINF_b_1 BBvCaculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_1M), PK_TYPE_11B, &awLen[0], &abyServ[0], &abySignal[0] ); ///RSPINF_b_2 BBvCaculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_2M), PK_TYPE_11B, &awLen[1], &abyServ[1], &abySignal[1] ); //RSPINF_b_5 BBvCaculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_5M), PK_TYPE_11B, &awLen[2], &abyServ[2], &abySignal[2] ); //RSPINF_b_11 BBvCaculateParameter(pDevice, 14, swGetCCKControlRate(pDevice, RATE_11M), PK_TYPE_11B, &awLen[3], &abyServ[3], &abySignal[3] ); //RSPINF_a_6 CARDvCaculateOFDMRParameter (RATE_6M, byBBType, &abyTxRate[0], &abyRsvTime[0]); //RSPINF_a_9 CARDvCaculateOFDMRParameter (RATE_9M, byBBType, &abyTxRate[1], &abyRsvTime[1]); //RSPINF_a_12 CARDvCaculateOFDMRParameter (RATE_12M, byBBType, &abyTxRate[2], &abyRsvTime[2]); //RSPINF_a_18 CARDvCaculateOFDMRParameter (RATE_18M, byBBType, &abyTxRate[3], &abyRsvTime[3]); //RSPINF_a_24 CARDvCaculateOFDMRParameter (RATE_24M, byBBType, &abyTxRate[4], &abyRsvTime[4]); //RSPINF_a_36 CARDvCaculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_36M), byBBType, &abyTxRate[5], &abyRsvTime[5]); //RSPINF_a_48 CARDvCaculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_48M), byBBType, &abyTxRate[6], &abyRsvTime[6]); //RSPINF_a_54 CARDvCaculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_54M), byBBType, &abyTxRate[7], &abyRsvTime[7]); //RSPINF_a_72 CARDvCaculateOFDMRParameter (swGetOFDMControlRate(pDevice, RATE_54M), byBBType, &abyTxRate[8], &abyRsvTime[8]); abyData[0] = (BYTE)(awLen[0]&0xFF); abyData[1] = (BYTE)(awLen[0]>>8); abyData[2] = abySignal[0]; abyData[3] = abyServ[0]; abyData[4] = (BYTE)(awLen[1]&0xFF); abyData[5] = (BYTE)(awLen[1]>>8); abyData[6] = abySignal[1]; abyData[7] = abyServ[1]; abyData[8] = (BYTE)(awLen[2]&0xFF); abyData[9] = (BYTE)(awLen[2]>>8); abyData[10] = abySignal[2]; abyData[11] = abyServ[2]; abyData[12] = (BYTE)(awLen[3]&0xFF); abyData[13] = (BYTE)(awLen[3]>>8); abyData[14] = abySignal[3]; abyData[15] = abyServ[3]; for (i = 0; i < 9; i++) { abyData[16+i*2] = abyTxRate[i]; abyData[16+i*2+1] = abyRsvTime[i]; } CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1, MESSAGE_REQUEST_MACREG, 34, &abyData[0]); } /* * Description: Update IFS * * Parameters: * In: * pDevice - The adapter to be set * Out: * none * * Return Value: None. * */ void vUpdateIFS(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; //Set SIFS, DIFS, EIFS, SlotTime, CwMin BYTE byMaxMin = 0; BYTE byData[4]; if (pDevice->byPacketType==PK_TYPE_11A) {//0000 0000 0000 0000,11a pDevice->uSlot = C_SLOT_SHORT; pDevice->uSIFS = C_SIFS_A; pDevice->uDIFS = C_SIFS_A + 2*C_SLOT_SHORT; pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else if (pDevice->byPacketType==PK_TYPE_11B) {//0000 0001 0000 0000,11b pDevice->uSlot = C_SLOT_LONG; pDevice->uSIFS = C_SIFS_BG; pDevice->uDIFS = C_SIFS_BG + 2*C_SLOT_LONG; pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } else {// PK_TYPE_11GA & PK_TYPE_11GB BYTE byRate = 0; BOOL bOFDMRate = FALSE; unsigned int ii = 0; PWLAN_IE_SUPP_RATES pItemRates = NULL; pDevice->uSIFS = C_SIFS_BG; if (pDevice->bShortSlotTime) { pDevice->uSlot = C_SLOT_SHORT; } else { pDevice->uSlot = C_SLOT_LONG; } pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot; pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->sMgmtObj.abyCurrSuppRates; for (ii = 0; ii < pItemRates->len; ii++) { byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F); if (RATEwGetRateIdx(byRate) > RATE_11M) { bOFDMRate = TRUE; break; } } if (bOFDMRate == FALSE) { pItemRates = (PWLAN_IE_SUPP_RATES)pDevice->sMgmtObj.abyCurrExtSuppRates; for (ii = 0; ii < pItemRates->len; ii++) { byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F); if (RATEwGetRateIdx(byRate) > RATE_11M) { bOFDMRate = TRUE; break; } } } if (bOFDMRate == TRUE) { pDevice->uCwMin = C_CWMIN_A; byMaxMin = 4; } else { pDevice->uCwMin = C_CWMIN_B; byMaxMin = 5; } } pDevice->uCwMax = C_CWMAX; pDevice->uEIFS = C_EIFS; byData[0] = (BYTE)pDevice->uSIFS; byData[1] = (BYTE)pDevice->uDIFS; byData[2] = (BYTE)pDevice->uEIFS; byData[3] = (BYTE)pDevice->uSlot; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_SIFS, MESSAGE_REQUEST_MACREG, 4, &byData[0]); byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023 CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0, MESSAGE_REQUEST_MACREG, 1, &byMaxMin); } void CARDvUpdateBasicTopRate(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; BYTE byTopOFDM = RATE_24M, byTopCCK = RATE_1M; BYTE ii; //Determines the highest basic rate. for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ( (pDevice->wBasicRate) & ((WORD)(1<<ii)) ) { byTopOFDM = ii; break; } } pDevice->byTopOFDMBasicRate = byTopOFDM; for (ii = RATE_11M;; ii --) { if ( (pDevice->wBasicRate) & ((WORD)(1<<ii)) ) { byTopCCK = ii; break; } if (ii == RATE_1M) break; } pDevice->byTopCCKBasicRate = byTopCCK; } /* * Description: Set NIC Tx Basic Rate * * Parameters: * In: * pDevice - The adapter to be set * wBasicRate - Basic Rate to be set * Out: * none * * Return Value: TRUE if succeeded; FALSE if failed. * */ void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx) { PSDevice pDevice = (PSDevice) pDeviceHandler; WORD wRate = (WORD)(1<<wRateIdx); pDevice->wBasicRate |= wRate; //Determines the highest basic rate. CARDvUpdateBasicTopRate(pDevice); } BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; int ii; for (ii = RATE_54M; ii >= RATE_6M; ii --) { if ((pDevice->wBasicRate) & ((WORD)(1<<ii))) return TRUE; } return FALSE; } BYTE CARDbyGetPktType(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) { return (BYTE)pDevice->byBBType; } else if (CARDbIsOFDMinBasicRate(pDevice)) { return PK_TYPE_11GA; } else { return PK_TYPE_11GB; } } /* * Description: Caculate TSF offset of two TSF input * Get TSF Offset from RxBCN's TSF and local TSF * * Parameters: * In: * pDevice - The adapter to be sync. * qwTSF1 - Rx BCN's TSF * qwTSF2 - Local TSF * Out: * none * * Return Value: TSF Offset value * */ QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2) { QWORD qwTSFOffset; WORD wRxBcnTSFOffst = 0; HIDWORD(qwTSFOffset) = 0; LODWORD(qwTSFOffset) = 0; wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE]; (qwTSF2).u.dwLowDword += (DWORD)(wRxBcnTSFOffst); if ((qwTSF2).u.dwLowDword < (DWORD)(wRxBcnTSFOffst)) { (qwTSF2).u.dwHighDword++; } LODWORD(qwTSFOffset) = LODWORD(qwTSF1) - LODWORD(qwTSF2); if (LODWORD(qwTSF1) < LODWORD(qwTSF2)) { // if borrow needed HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2) - 1 ; } else { HIDWORD(qwTSFOffset) = HIDWORD(qwTSF1) - HIDWORD(qwTSF2); }; return (qwTSFOffset); } /* * Description: Sync. TSF counter to BSS * Get TSF offset and write to HW * * Parameters: * In: * pDevice - The adapter to be sync. * qwBSSTimestamp - Rx BCN's TSF * qwLocalTSF - Local TSF * Out: * none * * Return Value: none * */ void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF) { PSDevice pDevice = (PSDevice) pDeviceHandler; QWORD qwTSFOffset; DWORD dwTSFOffset1,dwTSFOffset2; BYTE pbyData[8]; HIDWORD(qwTSFOffset) = 0; LODWORD(qwTSFOffset) = 0; qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF); // adjust TSF // HW's TSF add TSF Offset reg dwTSFOffset1 = LODWORD(qwTSFOffset); dwTSFOffset2 = HIDWORD(qwTSFOffset); pbyData[0] = (BYTE)dwTSFOffset1; pbyData[1] = (BYTE)(dwTSFOffset1>>8); pbyData[2] = (BYTE)(dwTSFOffset1>>16); pbyData[3] = (BYTE)(dwTSFOffset1>>24); pbyData[4] = (BYTE)dwTSFOffset2; pbyData[5] = (BYTE)(dwTSFOffset2>>8); pbyData[6] = (BYTE)(dwTSFOffset2>>16); pbyData[7] = (BYTE)(dwTSFOffset2>>24); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TSF, 0, 8, pbyData ); } /* * Description: Read NIC TSF counter * Get local TSF counter * * Parameters: * In: * pDevice - The adapter to be read * Out: * qwCurrTSF - Current TSF counter * * Return Value: TRUE if success; otherwise FALSE * */ BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF) { PSDevice pDevice = (PSDevice) pDeviceHandler; LODWORD(*pqwCurrTSF) = LODWORD(pDevice->qwCurrTSF); HIDWORD(*pqwCurrTSF) = HIDWORD(pDevice->qwCurrTSF); return(TRUE); } /* * Description: Clear NIC TSF counter * Clear local TSF counter * * Parameters: * In: * pDevice - The adapter to be read * * Return Value: TRUE if success; otherwise FALSE * */ BOOL CARDbClearCurrentTSF(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; MACvRegBitsOn(pDevice,MAC_REG_TFTCTL,TFTCTL_TSFCNTRST); LODWORD(pDevice->qwCurrTSF) = 0; HIDWORD(pDevice->qwCurrTSF) = 0; return(TRUE); } /* * Description: Read NIC TSF counter * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * qwTSF - Current TSF counter * wbeaconInterval - Beacon Interval * Out: * qwCurrTSF - Current TSF counter * * Return Value: TSF value of next Beacon * */ QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval) { unsigned int uLowNextTBTT; unsigned int uHighRemain, uLowRemain; unsigned int uBeaconInterval; uBeaconInterval = wBeaconInterval * 1024; // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval uLowNextTBTT = (LODWORD(qwTSF) >> 10) << 10; uLowRemain = (uLowNextTBTT) % uBeaconInterval; uHighRemain = ((0x80000000 % uBeaconInterval)* 2 * HIDWORD(qwTSF)) % uBeaconInterval; uLowRemain = (uHighRemain + uLowRemain) % uBeaconInterval; uLowRemain = uBeaconInterval - uLowRemain; // check if carry when add one beacon interval if ((~uLowNextTBTT) < uLowRemain) HIDWORD(qwTSF) ++ ; LODWORD(qwTSF) = uLowNextTBTT + uLowRemain; return (qwTSF); } /* * Description: Set NIC TSF counter for first Beacon time * Get NEXTTBTT from adjusted TSF and Beacon Interval * * Parameters: * In: * dwIoBase - IO Base * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval) { PSDevice pDevice = (PSDevice) pDeviceHandler; QWORD qwNextTBTT; DWORD dwLoTBTT,dwHiTBTT; BYTE pbyData[8]; HIDWORD(qwNextTBTT) = 0; LODWORD(qwNextTBTT) = 0; CARDbClearCurrentTSF(pDevice); //CARDbGetCurrentTSF(pDevice, &qwNextTBTT); //Get Local TSF counter qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval); // Set NextTBTT dwLoTBTT = LODWORD(qwNextTBTT); dwHiTBTT = HIDWORD(qwNextTBTT); pbyData[0] = (BYTE)dwLoTBTT; pbyData[1] = (BYTE)(dwLoTBTT>>8); pbyData[2] = (BYTE)(dwLoTBTT>>16); pbyData[3] = (BYTE)(dwLoTBTT>>24); pbyData[4] = (BYTE)dwHiTBTT; pbyData[5] = (BYTE)(dwHiTBTT>>8); pbyData[6] = (BYTE)(dwHiTBTT>>16); pbyData[7] = (BYTE)(dwHiTBTT>>24); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TBTT, 0, 8, pbyData ); return; } /* * Description: Sync NIC TSF counter for Beacon time * Get NEXTTBTT and write to HW * * Parameters: * In: * pDevice - The adapter to be set * qwTSF - Current TSF counter * wBeaconInterval - Beacon Interval * Out: * none * * Return Value: none * */ void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF, WORD wBeaconInterval) { PSDevice pDevice = (PSDevice) pDeviceHandler; DWORD dwLoTBTT,dwHiTBTT; BYTE pbyData[8]; qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval); // Set NextTBTT dwLoTBTT = LODWORD(qwTSF); dwHiTBTT = HIDWORD(qwTSF); pbyData[0] = (BYTE)dwLoTBTT; pbyData[1] = (BYTE)(dwLoTBTT>>8); pbyData[2] = (BYTE)(dwLoTBTT>>16); pbyData[3] = (BYTE)(dwLoTBTT>>24); pbyData[4] = (BYTE)dwHiTBTT; pbyData[5] = (BYTE)(dwHiTBTT>>8); pbyData[6] = (BYTE)(dwHiTBTT>>16); pbyData[7] = (BYTE)(dwHiTBTT>>24); CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_SET_TSFTBTT, MESSAGE_REQUEST_TBTT, 0, 8, pbyData ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Card:Update Next TBTT[%8xh:%8xh] \n",(int)HIDWORD(qwTSF), (int)LODWORD(qwTSF)); return; } /* * Description: Turn off Radio power * * Parameters: * In: * pDevice - The adapter to be turned off * Out: * none * * Return Value: TRUE if success; otherwise FALSE * */ BOOL CARDbRadioPowerOff(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; BOOL bResult = TRUE; //if (pDevice->bRadioOff == TRUE) // return TRUE; pDevice->bRadioOff = TRUE; switch (pDevice->byRFType) { case RF_AL2230: case RF_AL2230S: case RF_AIROHA7230: case RF_VT3226: //RobertYu:20051111 case RF_VT3226D0: case RF_VT3342A0: //RobertYu:20060609 MACvRegBitsOff(pDevice, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; } MACvRegBitsOff(pDevice, MAC_REG_HOSTCR, HOSTCR_RXON); BBvSetDeepSleep(pDevice); return bResult; } /* * Description: Turn on Radio power * * Parameters: * In: * pDevice - The adapter to be turned on * Out: * none * * Return Value: TRUE if success; otherwise FALSE * */ BOOL CARDbRadioPowerOn(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; BOOL bResult = TRUE; if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) { return FALSE; } //if (pDevice->bRadioOff == FALSE) // return TRUE; pDevice->bRadioOff = FALSE; BBvExitDeepSleep(pDevice); MACvRegBitsOn(pDevice, MAC_REG_HOSTCR, HOSTCR_RXON); switch (pDevice->byRFType) { case RF_AL2230: case RF_AL2230S: case RF_AIROHA7230: case RF_VT3226: //RobertYu:20051111 case RF_VT3226D0: case RF_VT3342A0: //RobertYu:20060609 MACvRegBitsOn(pDevice, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; } return bResult; } void CARDvSetBSSMode(void *pDeviceHandler) { PSDevice pDevice = (PSDevice) pDeviceHandler; // Set BB and packet type at the same time.//{{RobertYu:20050222, AL7230 have two TX PA output, only connet to b/g now // so in 11a mode need to set the MAC Reg0x4C to 11b/g mode to turn on PA if( (pDevice->byRFType == RF_AIROHA7230 ) && (pDevice->byBBType == BB_TYPE_11A) ) { MACvSetBBType(pDevice, BB_TYPE_11G); } else { MACvSetBBType(pDevice, pDevice->byBBType); } pDevice->byPacketType = CARDbyGetPktType(pDevice); if (pDevice->byBBType == BB_TYPE_11A) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x03); } else if (pDevice->byBBType == BB_TYPE_11B) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x02); } else if (pDevice->byBBType == BB_TYPE_11G) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x88, 0x08); } vUpdateIFS(pDevice); CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType); if ( pDevice->byBBType == BB_TYPE_11A ) { //request by Jack 2005-04-26 if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x20; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, pDevice->abyBBVGA[0]); } pDevice->abyBBVGA[2] = 0x10; pDevice->abyBBVGA[3] = 0x10; } else { //request by Jack 2005-04-26 if (pDevice->byRFType == RF_AIROHA7230) { pDevice->abyBBVGA[0] = 0x1C; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, pDevice->abyBBVGA[0]); } pDevice->abyBBVGA[2] = 0x0; pDevice->abyBBVGA[3] = 0x0; } } /* * * Description: * Do Channel Switch defined in 802.11h * * Parameters: * In: * hDeviceContext - device structure point * Out: * none * * Return Value: none. * -*/ BOOL CARDbChannelSwitch ( void *pDeviceHandler, BYTE byMode, BYTE byNewChannel, BYTE byCount ) { PSDevice pDevice = (PSDevice) pDeviceHandler; BOOL bResult = TRUE; if (byCount == 0) { pDevice->sMgmtObj.uCurrChannel = byNewChannel; CARDbSetMediaChannel(pDevice, byNewChannel); return bResult; } pDevice->byChannelSwitchCount = byCount; pDevice->byNewChannel = byNewChannel; pDevice->bChannelSwitch = TRUE; if (byMode == 1) { //bResult=CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL); pDevice->bStopDataPkt = TRUE; } return bResult; }
gpl-2.0
nychitman1/android_kernel_lge_hammerhead
drivers/hwmon/sht21.c
4940
7557
/* Sensirion SHT21 humidity and temperature sensor driver * * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA * * Data sheet available (5/2010) at * http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/device.h> /* I2C command bytes */ #define SHT21_TRIG_T_MEASUREMENT_HM 0xe3 #define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5 /** * struct sht21 - SHT21 device specific data * @hwmon_dev: device registered with hwmon * @lock: mutex to protect measurement values * @valid: only 0 before first measurement is taken * @last_update: time of last update (jiffies) * @temperature: cached temperature measurement value * @humidity: cached humidity measurement value */ struct sht21 { struct device *hwmon_dev; struct mutex lock; char valid; unsigned long last_update; int temperature; int humidity; }; /** * sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to * milli celsius * @ticks: temperature ticks value received from sensor */ static inline int sht21_temp_ticks_to_millicelsius(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2, * optimized for integer fixed point (3 digits) arithmetic */ return ((21965 * ticks) >> 13) - 46850; } /** * sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to * one-thousandths of a percent relative humidity * @ticks: humidity ticks value received from sensor */ static inline int sht21_rh_ticks_to_per_cent_mille(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1, * optimized for integer fixed point (3 digits) arithmetic */ return ((15625 * ticks) >> 13) - 6000; } /** * sht21_update_measurements() - get updated measurements from device * @client: I2C client device * * Returns 0 on success, else negative errno. */ static int sht21_update_measurements(struct i2c_client *client) { int ret = 0; struct sht21 *sht21 = i2c_get_clientdata(client); mutex_lock(&sht21->lock); /* * Data sheet 2.4: * SHT2x should not be active for more than 10% of the time - e.g. * maximum two measurements per second at 12bit accuracy shall be made. */ if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) { ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_T_MEASUREMENT_HM); if (ret < 0) goto out; sht21->temperature = sht21_temp_ticks_to_millicelsius(ret); ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_RH_MEASUREMENT_HM); if (ret < 0) goto out; sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret); sht21->last_update = jiffies; sht21->valid = 1; } out: mutex_unlock(&sht21->lock); return ret >= 0 ? 0 : ret; } /** * sht21_show_temperature() - show temperature measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to temp1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->temperature); } /** * sht21_show_humidity() - show humidity measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to humidity1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_humidity(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->humidity); } /* sysfs attributes */ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature, NULL, 0); static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity, NULL, 0); static struct attribute *sht21_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_humidity1_input.dev_attr.attr, NULL }; static const struct attribute_group sht21_attr_group = { .attrs = sht21_attributes, }; /** * sht21_probe() - probe device * @client: I2C client device * @id: device ID * * Called by the I2C core when an entry in the ID table matches a * device's name. * Returns 0 on success. */ static int __devinit sht21_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sht21 *sht21; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "adapter does not support SMBus word transactions\n"); return -ENODEV; } sht21 = kzalloc(sizeof(*sht21), GFP_KERNEL); if (!sht21) { dev_dbg(&client->dev, "kzalloc failed\n"); return -ENOMEM; } i2c_set_clientdata(client, sht21); mutex_init(&sht21->lock); err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group); if (err) { dev_dbg(&client->dev, "could not create sysfs files\n"); goto fail_free; } sht21->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(sht21->hwmon_dev)) { dev_dbg(&client->dev, "unable to register hwmon device\n"); err = PTR_ERR(sht21->hwmon_dev); goto fail_remove_sysfs; } dev_info(&client->dev, "initialized\n"); return 0; fail_remove_sysfs: sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); fail_free: kfree(sht21); return err; } /** * sht21_remove() - remove device * @client: I2C client device */ static int __devexit sht21_remove(struct i2c_client *client) { struct sht21 *sht21 = i2c_get_clientdata(client); hwmon_device_unregister(sht21->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); kfree(sht21); return 0; } /* Device ID table */ static const struct i2c_device_id sht21_id[] = { { "sht21", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sht21_id); static struct i2c_driver sht21_driver = { .driver.name = "sht21", .probe = sht21_probe, .remove = __devexit_p(sht21_remove), .id_table = sht21_id, }; module_i2c_driver(sht21_driver); MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>"); MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
BENETNATH/kernel_hp_maya
drivers/video/pxafb.c
4940
62683
/* * linux/drivers/video/pxafb.c * * Copyright (C) 1999 Eric A. Thomas. * Copyright (C) 2004 Jean-Frederic Clere. * Copyright (C) 2004 Ian Campbell. * Copyright (C) 2004 Jeff Lackey. * Based on sa1100fb.c Copyright (C) 1999 Eric A. Thomas * which in turn is * Based on acornfb.c Copyright (C) Russell King. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Intel PXA250/210 LCD Controller Frame Buffer Driver * * Please direct your questions and comments on this driver to the following * email address: * * linux-arm-kernel@lists.arm.linux.org.uk * * Add support for overlay1 and overlay2 based on pxafb_overlay.c: * * Copyright (C) 2004, Intel Corporation * * 2003/08/27: <yu.tang@intel.com> * 2004/03/10: <stanley.cai@intel.com> * 2004/10/28: <yan.yin@intel.com> * * Copyright (C) 2006-2008 Marvell International Ltd. * All Rights Reserved */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/cpufreq.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/console.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/div64.h> #include <mach/bitfield.h> #include <mach/pxafb.h> /* * Complain if VAR is out of range. */ #define DEBUG_VAR 1 #include "pxafb.h" /* Bits which should not be set in machine configuration structures */ #define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\ LCCR0_DIS | LCCR0_EFM | LCCR0_IUM |\ LCCR0_SFM | LCCR0_LDM | LCCR0_ENB) #define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\ LCCR3_PCD | LCCR3_BPP(0xf)) static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *); static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); static void setup_base_frame(struct pxafb_info *fbi, struct fb_var_screeninfo *var, int branch); static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, unsigned long offset, size_t size); static unsigned long video_mem_size = 0; static inline unsigned long lcd_readl(struct pxafb_info *fbi, unsigned int off) { return __raw_readl(fbi->mmio_base + off); } static inline void lcd_writel(struct pxafb_info *fbi, unsigned int off, unsigned long val) { __raw_writel(val, fbi->mmio_base + off); } static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) { unsigned long flags; local_irq_save(flags); /* * We need to handle two requests being made at the same time. * There are two important cases: * 1. When we are changing VT (C_REENABLE) while unblanking * (C_ENABLE) We must perform the unblanking, which will * do our REENABLE for us. * 2. When we are blanking, but immediately unblank before * we have blanked. We do the "REENABLE" thing here as * well, just to be sure. */ if (fbi->task_state == C_ENABLE && state == C_REENABLE) state = (u_int) -1; if (fbi->task_state == C_DISABLE && state == C_ENABLE) state = C_REENABLE; if (state != (u_int)-1) { fbi->task_state = state; schedule_work(&fbi->task); } local_irq_restore(flags); } static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; u_int val; if (regno >= fbi->palette_size) return 1; if (fbi->fb.var.grayscale) { fbi->palette_cpu[regno] = ((blue >> 8) & 0x00ff); return 0; } switch (fbi->lccr4 & LCCR4_PAL_FOR_MASK) { case LCCR4_PAL_FOR_0: val = ((red >> 0) & 0xf800); val |= ((green >> 5) & 0x07e0); val |= ((blue >> 11) & 0x001f); fbi->palette_cpu[regno] = val; break; case LCCR4_PAL_FOR_1: val = ((red << 8) & 0x00f80000); val |= ((green >> 0) & 0x0000fc00); val |= ((blue >> 8) & 0x000000f8); ((u32 *)(fbi->palette_cpu))[regno] = val; break; case LCCR4_PAL_FOR_2: val = ((red << 8) & 0x00fc0000); val |= ((green >> 0) & 0x0000fc00); val |= ((blue >> 8) & 0x000000fc); ((u32 *)(fbi->palette_cpu))[regno] = val; break; case LCCR4_PAL_FOR_3: val = ((red << 8) & 0x00ff0000); val |= ((green >> 0) & 0x0000ff00); val |= ((blue >> 8) & 0x000000ff); ((u32 *)(fbi->palette_cpu))[regno] = val; break; } return 0; } static int pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; unsigned int val; int ret = 1; /* * If inverse mode was selected, invert all the colours * rather than the register number. The register number * is what you poke into the framebuffer to produce the * colour you requested. */ if (fbi->cmap_inverse) { red = 0xffff - red; green = 0xffff - green; blue = 0xffff - blue; } /* * If greyscale is true, then we convert the RGB value * to greyscale no matter what visual we are using. */ if (fbi->fb.var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; switch (fbi->fb.fix.visual) { case FB_VISUAL_TRUECOLOR: /* * 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < 16) { u32 *pal = fbi->fb.pseudo_palette; val = chan_to_field(red, &fbi->fb.var.red); val |= chan_to_field(green, &fbi->fb.var.green); val |= chan_to_field(blue, &fbi->fb.var.blue); pal[regno] = val; ret = 0; } break; case FB_VISUAL_STATIC_PSEUDOCOLOR: case FB_VISUAL_PSEUDOCOLOR: ret = pxafb_setpalettereg(regno, red, green, blue, trans, info); break; } return ret; } /* calculate pixel depth, transparency bit included, >=16bpp formats _only_ */ static inline int var_to_depth(struct fb_var_screeninfo *var) { return var->red.length + var->green.length + var->blue.length + var->transp.length; } /* calculate 4-bit BPP value for LCCR3 and OVLxC1 */ static int pxafb_var_to_bpp(struct fb_var_screeninfo *var) { int bpp = -EINVAL; switch (var->bits_per_pixel) { case 1: bpp = 0; break; case 2: bpp = 1; break; case 4: bpp = 2; break; case 8: bpp = 3; break; case 16: bpp = 4; break; case 24: switch (var_to_depth(var)) { case 18: bpp = 6; break; /* 18-bits/pixel packed */ case 19: bpp = 8; break; /* 19-bits/pixel packed */ case 24: bpp = 9; break; } break; case 32: switch (var_to_depth(var)) { case 18: bpp = 5; break; /* 18-bits/pixel unpacked */ case 19: bpp = 7; break; /* 19-bits/pixel unpacked */ case 25: bpp = 10; break; } break; } return bpp; } /* * pxafb_var_to_lccr3(): * Convert a bits per pixel value to the correct bit pattern for LCCR3 * * NOTE: for PXA27x with overlays support, the LCCR3_PDFOR_x bits have an * implication of the acutal use of transparency bit, which we handle it * here separatedly. See PXA27x Developer's Manual, Section <<7.4.6 Pixel * Formats>> for the valid combination of PDFOR, PAL_FOR for various BPP. * * Transparency for palette pixel formats is not supported at the moment. */ static uint32_t pxafb_var_to_lccr3(struct fb_var_screeninfo *var) { int bpp = pxafb_var_to_bpp(var); uint32_t lccr3; if (bpp < 0) return 0; lccr3 = LCCR3_BPP(bpp); switch (var_to_depth(var)) { case 16: lccr3 |= var->transp.length ? LCCR3_PDFOR_3 : 0; break; case 18: lccr3 |= LCCR3_PDFOR_3; break; case 24: lccr3 |= var->transp.length ? LCCR3_PDFOR_2 : LCCR3_PDFOR_3; break; case 19: case 25: lccr3 |= LCCR3_PDFOR_0; break; } return lccr3; } #define SET_PIXFMT(v, r, g, b, t) \ ({ \ (v)->transp.offset = (t) ? (r) + (g) + (b) : 0; \ (v)->transp.length = (t) ? (t) : 0; \ (v)->blue.length = (b); (v)->blue.offset = 0; \ (v)->green.length = (g); (v)->green.offset = (b); \ (v)->red.length = (r); (v)->red.offset = (b) + (g); \ }) /* set the RGBT bitfields of fb_var_screeninf according to * var->bits_per_pixel and given depth */ static void pxafb_set_pixfmt(struct fb_var_screeninfo *var, int depth) { if (depth == 0) depth = var->bits_per_pixel; if (var->bits_per_pixel < 16) { /* indexed pixel formats */ var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 8; } switch (depth) { case 16: var->transp.length ? SET_PIXFMT(var, 5, 5, 5, 1) : /* RGBT555 */ SET_PIXFMT(var, 5, 6, 5, 0); break; /* RGB565 */ case 18: SET_PIXFMT(var, 6, 6, 6, 0); break; /* RGB666 */ case 19: SET_PIXFMT(var, 6, 6, 6, 1); break; /* RGBT666 */ case 24: var->transp.length ? SET_PIXFMT(var, 8, 8, 7, 1) : /* RGBT887 */ SET_PIXFMT(var, 8, 8, 8, 0); break; /* RGB888 */ case 25: SET_PIXFMT(var, 8, 8, 8, 1); break; /* RGBT888 */ } } #ifdef CONFIG_CPU_FREQ /* * pxafb_display_dma_period() * Calculate the minimum period (in picoseconds) between two DMA * requests for the LCD controller. If we hit this, it means we're * doing nothing but LCD DMA. */ static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var) { /* * Period = pixclock * bits_per_byte * bytes_per_transfer * / memory_bits_per_pixel; */ return var->pixclock * 8 * 16 / var->bits_per_pixel; } #endif /* * Select the smallest mode that allows the desired resolution to be * displayed. If desired parameters can be rounded up. */ static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struct fb_var_screeninfo *var) { struct pxafb_mode_info *mode = NULL; struct pxafb_mode_info *modelist = mach->modes; unsigned int best_x = 0xffffffff, best_y = 0xffffffff; unsigned int i; for (i = 0; i < mach->num_modes; i++) { if (modelist[i].xres >= var->xres && modelist[i].yres >= var->yres && modelist[i].xres < best_x && modelist[i].yres < best_y && modelist[i].bpp >= var->bits_per_pixel) { best_x = modelist[i].xres; best_y = modelist[i].yres; mode = &modelist[i]; } } return mode; } static void pxafb_setmode(struct fb_var_screeninfo *var, struct pxafb_mode_info *mode) { var->xres = mode->xres; var->yres = mode->yres; var->bits_per_pixel = mode->bpp; var->pixclock = mode->pixclock; var->hsync_len = mode->hsync_len; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->vsync_len = mode->vsync_len; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->sync = mode->sync; var->grayscale = mode->cmap_greyscale; var->transp.length = mode->transparency; /* set the initial RGBA bitfields */ pxafb_set_pixfmt(var, mode->depth); } static int pxafb_adjust_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { int line_length; var->xres = max_t(int, var->xres, MIN_XRES); var->yres = max_t(int, var->yres, MIN_YRES); if (!(fbi->lccr0 & LCCR0_LCDT)) { clamp_val(var->hsync_len, 1, 64); clamp_val(var->vsync_len, 1, 64); clamp_val(var->left_margin, 1, 255); clamp_val(var->right_margin, 1, 255); clamp_val(var->upper_margin, 1, 255); clamp_val(var->lower_margin, 1, 255); } /* make sure each line is aligned on word boundary */ line_length = var->xres * var->bits_per_pixel / 8; line_length = ALIGN(line_length, 4); var->xres = line_length * 8 / var->bits_per_pixel; /* we don't support xpan, force xres_virtual to be equal to xres */ var->xres_virtual = var->xres; if (var->accel_flags & FB_ACCELF_TEXT) var->yres_virtual = fbi->fb.fix.smem_len / line_length; else var->yres_virtual = max(var->yres_virtual, var->yres); /* check for limits */ if (var->xres > MAX_XRES || var->yres > MAX_YRES) return -EINVAL; if (var->yres > var->yres_virtual) return -EINVAL; return 0; } /* * pxafb_check_var(): * Get the video params out of 'var'. If a value doesn't fit, round it up, * if it's too big, return -EINVAL. * * Round up in the following order: bits_per_pixel, xres, * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, * bitfields, horizontal timing, vertical timing. */ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; struct pxafb_mach_info *inf = fbi->dev->platform_data; int err; if (inf->fixed_modes) { struct pxafb_mode_info *mode; mode = pxafb_getmode(inf, var); if (!mode) return -EINVAL; pxafb_setmode(var, mode); } /* do a test conversion to BPP fields to check the color formats */ err = pxafb_var_to_bpp(var); if (err < 0) return err; pxafb_set_pixfmt(var, var_to_depth(var)); err = pxafb_adjust_timing(fbi, var); if (err) return err; #ifdef CONFIG_CPU_FREQ pr_debug("pxafb: dma period = %d ps\n", pxafb_display_dma_period(var)); #endif return 0; } /* * pxafb_set_par(): * Set the user defined part of the display for the specified console */ static int pxafb_set_par(struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; struct fb_var_screeninfo *var = &info->var; if (var->bits_per_pixel >= 16) fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; else if (!fbi->cmap_static) fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; else { /* * Some people have weird ideas about wanting static * pseudocolor maps. I suspect their user space * applications are broken. */ fbi->fb.fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; } fbi->fb.fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; if (var->bits_per_pixel >= 16) fbi->palette_size = 0; else fbi->palette_size = var->bits_per_pixel == 1 ? 4 : 1 << var->bits_per_pixel; fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0]; if (fbi->fb.var.bits_per_pixel >= 16) fb_dealloc_cmap(&fbi->fb.cmap); else fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); pxafb_activate_var(var, fbi); return 0; } static int pxafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; struct fb_var_screeninfo newvar; int dma = DMA_MAX + DMA_BASE; if (fbi->state != C_ENABLE) return 0; /* Only take .xoffset, .yoffset and .vmode & FB_VMODE_YWRAP from what * was passed in and copy the rest from the old screeninfo. */ memcpy(&newvar, &fbi->fb.var, sizeof(newvar)); newvar.xoffset = var->xoffset; newvar.yoffset = var->yoffset; newvar.vmode &= ~FB_VMODE_YWRAP; newvar.vmode |= var->vmode & FB_VMODE_YWRAP; setup_base_frame(fbi, &newvar, 1); if (fbi->lccr0 & LCCR0_SDS) lcd_writel(fbi, FBR1, fbi->fdadr[dma + 1] | 0x1); lcd_writel(fbi, FBR0, fbi->fdadr[dma] | 0x1); return 0; } /* * pxafb_blank(): * Blank the display by setting all palette values to zero. Note, the * 16 bpp mode does not really use the palette, so this will not * blank the display in all modes. */ static int pxafb_blank(int blank, struct fb_info *info) { struct pxafb_info *fbi = (struct pxafb_info *)info; int i; switch (blank) { case FB_BLANK_POWERDOWN: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_NORMAL: if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) for (i = 0; i < fbi->palette_size; i++) pxafb_setpalettereg(i, 0, 0, 0, 0, info); pxafb_schedule_work(fbi, C_DISABLE); /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ break; case FB_BLANK_UNBLANK: /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) fb_set_cmap(&fbi->fb.cmap, info); pxafb_schedule_work(fbi, C_ENABLE); } return 0; } static struct fb_ops pxafb_ops = { .owner = THIS_MODULE, .fb_check_var = pxafb_check_var, .fb_set_par = pxafb_set_par, .fb_pan_display = pxafb_pan_display, .fb_setcolreg = pxafb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_blank = pxafb_blank, }; #ifdef CONFIG_FB_PXA_OVERLAY static void overlay1fb_setup(struct pxafb_layer *ofb) { int size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual; unsigned long start = ofb->video_mem_phys; setup_frame_dma(ofb->fbi, DMA_OV1, PAL_NONE, start, size); } /* Depending on the enable status of overlay1/2, the DMA should be * updated from FDADRx (when disabled) or FBRx (when enabled). */ static void overlay1fb_enable(struct pxafb_layer *ofb) { int enabled = lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN; uint32_t fdadr1 = ofb->fbi->fdadr[DMA_OV1] | (enabled ? 0x1 : 0); lcd_writel(ofb->fbi, enabled ? FBR1 : FDADR1, fdadr1); lcd_writel(ofb->fbi, OVL1C2, ofb->control[1]); lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] | OVLxC1_OEN); } static void overlay1fb_disable(struct pxafb_layer *ofb) { uint32_t lccr5; if (!(lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN)) return; lccr5 = lcd_readl(ofb->fbi, LCCR5); lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN); lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(1)); lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(1)); lcd_writel(ofb->fbi, FBR1, ofb->fbi->fdadr[DMA_OV1] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) pr_warning("%s: timeout disabling overlay1\n", __func__); lcd_writel(ofb->fbi, LCCR5, lccr5); } static void overlay2fb_setup(struct pxafb_layer *ofb) { int size, div = 1, pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd); unsigned long start[3] = { ofb->video_mem_phys, 0, 0 }; if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED) { size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual; setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); } else { size = ofb->fb.var.xres_virtual * ofb->fb.var.yres_virtual; switch (pfor) { case OVERLAY_FORMAT_YUV444_PLANAR: div = 1; break; case OVERLAY_FORMAT_YUV422_PLANAR: div = 2; break; case OVERLAY_FORMAT_YUV420_PLANAR: div = 4; break; } start[1] = start[0] + size; start[2] = start[1] + size / div; setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); setup_frame_dma(ofb->fbi, DMA_OV2_Cb, -1, start[1], size / div); setup_frame_dma(ofb->fbi, DMA_OV2_Cr, -1, start[2], size / div); } } static void overlay2fb_enable(struct pxafb_layer *ofb) { int pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd); int enabled = lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN; uint32_t fdadr2 = ofb->fbi->fdadr[DMA_OV2_Y] | (enabled ? 0x1 : 0); uint32_t fdadr3 = ofb->fbi->fdadr[DMA_OV2_Cb] | (enabled ? 0x1 : 0); uint32_t fdadr4 = ofb->fbi->fdadr[DMA_OV2_Cr] | (enabled ? 0x1 : 0); if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED) lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2); else { lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2); lcd_writel(ofb->fbi, enabled ? FBR3 : FDADR3, fdadr3); lcd_writel(ofb->fbi, enabled ? FBR4 : FDADR4, fdadr4); } lcd_writel(ofb->fbi, OVL2C2, ofb->control[1]); lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] | OVLxC1_OEN); } static void overlay2fb_disable(struct pxafb_layer *ofb) { uint32_t lccr5; if (!(lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN)) return; lccr5 = lcd_readl(ofb->fbi, LCCR5); lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN); lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(2)); lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(2)); lcd_writel(ofb->fbi, FBR2, ofb->fbi->fdadr[DMA_OV2_Y] | 0x3); lcd_writel(ofb->fbi, FBR3, ofb->fbi->fdadr[DMA_OV2_Cb] | 0x3); lcd_writel(ofb->fbi, FBR4, ofb->fbi->fdadr[DMA_OV2_Cr] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) pr_warning("%s: timeout disabling overlay2\n", __func__); } static struct pxafb_layer_ops ofb_ops[] = { [0] = { .enable = overlay1fb_enable, .disable = overlay1fb_disable, .setup = overlay1fb_setup, }, [1] = { .enable = overlay2fb_enable, .disable = overlay2fb_disable, .setup = overlay2fb_setup, }, }; static int overlayfb_open(struct fb_info *info, int user) { struct pxafb_layer *ofb = (struct pxafb_layer *)info; /* no support for framebuffer console on overlay */ if (user == 0) return -ENODEV; if (ofb->usage++ == 0) { /* unblank the base framebuffer */ console_lock(); fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); console_unlock(); } return 0; } static int overlayfb_release(struct fb_info *info, int user) { struct pxafb_layer *ofb = (struct pxafb_layer*) info; if (ofb->usage == 1) { ofb->ops->disable(ofb); ofb->fb.var.height = -1; ofb->fb.var.width = -1; ofb->fb.var.xres = ofb->fb.var.xres_virtual = 0; ofb->fb.var.yres = ofb->fb.var.yres_virtual = 0; ofb->usage--; } return 0; } static int overlayfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_layer *ofb = (struct pxafb_layer *)info; struct fb_var_screeninfo *base_var = &ofb->fbi->fb.var; int xpos, ypos, pfor, bpp; xpos = NONSTD_TO_XPOS(var->nonstd); ypos = NONSTD_TO_YPOS(var->nonstd); pfor = NONSTD_TO_PFOR(var->nonstd); bpp = pxafb_var_to_bpp(var); if (bpp < 0) return -EINVAL; /* no support for YUV format on overlay1 */ if (ofb->id == OVERLAY1 && pfor != 0) return -EINVAL; /* for YUV packed formats, bpp = 'minimum bpp of YUV components' */ switch (pfor) { case OVERLAY_FORMAT_RGB: bpp = pxafb_var_to_bpp(var); if (bpp < 0) return -EINVAL; pxafb_set_pixfmt(var, var_to_depth(var)); break; case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break; case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 8; break; case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 4; break; case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 2; break; default: return -EINVAL; } /* each line must start at a 32-bit word boundary */ if ((xpos * bpp) % 32) return -EINVAL; /* xres must align on 32-bit word boundary */ var->xres = roundup(var->xres * bpp, 32) / bpp; if ((xpos + var->xres > base_var->xres) || (ypos + var->yres > base_var->yres)) return -EINVAL; var->xres_virtual = var->xres; var->yres_virtual = max(var->yres, var->yres_virtual); return 0; } static int overlayfb_check_video_memory(struct pxafb_layer *ofb) { struct fb_var_screeninfo *var = &ofb->fb.var; int pfor = NONSTD_TO_PFOR(var->nonstd); int size, bpp = 0; switch (pfor) { case OVERLAY_FORMAT_RGB: bpp = var->bits_per_pixel; break; case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break; case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 24; break; case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 16; break; case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 12; break; } ofb->fb.fix.line_length = var->xres_virtual * bpp / 8; size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual); if (ofb->video_mem) { if (ofb->video_mem_size >= size) return 0; } return -EINVAL; } static int overlayfb_set_par(struct fb_info *info) { struct pxafb_layer *ofb = (struct pxafb_layer *)info; struct fb_var_screeninfo *var = &info->var; int xpos, ypos, pfor, bpp, ret; ret = overlayfb_check_video_memory(ofb); if (ret) return ret; bpp = pxafb_var_to_bpp(var); xpos = NONSTD_TO_XPOS(var->nonstd); ypos = NONSTD_TO_YPOS(var->nonstd); pfor = NONSTD_TO_PFOR(var->nonstd); ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) | OVLxC1_BPP(bpp); ofb->control[1] = OVLxC2_XPOS(xpos) | OVLxC2_YPOS(ypos); if (ofb->id == OVERLAY2) ofb->control[1] |= OVL2C2_PFOR(pfor); ofb->ops->setup(ofb); ofb->ops->enable(ofb); return 0; } static struct fb_ops overlay_fb_ops = { .owner = THIS_MODULE, .fb_open = overlayfb_open, .fb_release = overlayfb_release, .fb_check_var = overlayfb_check_var, .fb_set_par = overlayfb_set_par, }; static void __devinit init_pxafb_overlay(struct pxafb_info *fbi, struct pxafb_layer *ofb, int id) { sprintf(ofb->fb.fix.id, "overlay%d", id + 1); ofb->fb.fix.type = FB_TYPE_PACKED_PIXELS; ofb->fb.fix.xpanstep = 0; ofb->fb.fix.ypanstep = 1; ofb->fb.var.activate = FB_ACTIVATE_NOW; ofb->fb.var.height = -1; ofb->fb.var.width = -1; ofb->fb.var.vmode = FB_VMODE_NONINTERLACED; ofb->fb.fbops = &overlay_fb_ops; ofb->fb.flags = FBINFO_FLAG_DEFAULT; ofb->fb.node = -1; ofb->fb.pseudo_palette = NULL; ofb->id = id; ofb->ops = &ofb_ops[id]; ofb->usage = 0; ofb->fbi = fbi; init_completion(&ofb->branch_done); } static inline int pxafb_overlay_supported(void) { if (cpu_is_pxa27x() || cpu_is_pxa3xx()) return 1; return 0; } static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb, struct pxafb_layer *ofb) { /* We assume that user will use at most video_mem_size for overlay fb, * anyway, it's useless to use 16bpp main plane and 24bpp overlay */ ofb->video_mem = alloc_pages_exact(PAGE_ALIGN(pxafb->video_mem_size), GFP_KERNEL | __GFP_ZERO); if (ofb->video_mem == NULL) return -ENOMEM; ofb->video_mem_phys = virt_to_phys(ofb->video_mem); ofb->video_mem_size = PAGE_ALIGN(pxafb->video_mem_size); mutex_lock(&ofb->fb.mm_lock); ofb->fb.fix.smem_start = ofb->video_mem_phys; ofb->fb.fix.smem_len = pxafb->video_mem_size; mutex_unlock(&ofb->fb.mm_lock); ofb->fb.screen_base = ofb->video_mem; return 0; } static void __devinit pxafb_overlay_init(struct pxafb_info *fbi) { int i, ret; if (!pxafb_overlay_supported()) return; for (i = 0; i < 2; i++) { struct pxafb_layer *ofb = &fbi->overlay[i]; init_pxafb_overlay(fbi, ofb, i); ret = register_framebuffer(&ofb->fb); if (ret) { dev_err(fbi->dev, "failed to register overlay %d\n", i); continue; } ret = pxafb_overlay_map_video_memory(fbi, ofb); if (ret) { dev_err(fbi->dev, "failed to map video memory for overlay %d\n", i); unregister_framebuffer(&ofb->fb); continue; } ofb->registered = 1; } /* mask all IU/BS/EOF/SOF interrupts */ lcd_writel(fbi, LCCR5, ~0); pr_info("PXA Overlay driver loaded successfully!\n"); } static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi) { int i; if (!pxafb_overlay_supported()) return; for (i = 0; i < 2; i++) { struct pxafb_layer *ofb = &fbi->overlay[i]; if (ofb->registered) { if (ofb->video_mem) free_pages_exact(ofb->video_mem, ofb->video_mem_size); unregister_framebuffer(&ofb->fb); } } } #else static inline void pxafb_overlay_init(struct pxafb_info *fbi) {} static inline void pxafb_overlay_exit(struct pxafb_info *fbi) {} #endif /* CONFIG_FB_PXA_OVERLAY */ /* * Calculate the PCD value from the clock rate (in picoseconds). * We take account of the PPCR clock setting. * From PXA Developer's Manual: * * PixelClock = LCLK * ------------- * 2 ( PCD + 1 ) * * PCD = LCLK * ------------- - 1 * 2(PixelClock) * * Where: * LCLK = LCD/Memory Clock * PCD = LCCR3[7:0] * * PixelClock here is in Hz while the pixclock argument given is the * period in picoseconds. Hence PixelClock = 1 / ( pixclock * 10^-12 ) * * The function get_lclk_frequency_10khz returns LCLK in units of * 10khz. Calling the result of this function lclk gives us the * following * * PCD = (lclk * 10^4 ) * ( pixclock * 10^-12 ) * -------------------------------------- - 1 * 2 * * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below. */ static inline unsigned int get_pcd(struct pxafb_info *fbi, unsigned int pixclock) { unsigned long long pcd; /* FIXME: Need to take into account Double Pixel Clock mode * (DPC) bit? or perhaps set it based on the various clock * speeds */ pcd = (unsigned long long)(clk_get_rate(fbi->clk) / 10000); pcd *= pixclock; do_div(pcd, 100000000 * 2); /* no need for this, since we should subtract 1 anyway. they cancel */ /* pcd += 1; */ /* make up for integer math truncations */ return (unsigned int)pcd; } /* * Some touchscreens need hsync information from the video driver to * function correctly. We export it here. Note that 'hsync_time' and * the value returned from pxafb_get_hsync_time() is the *reciprocal* * of the hsync period in seconds. */ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd) { unsigned long htime; if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) { fbi->hsync_time = 0; return; } htime = clk_get_rate(fbi->clk) / (pcd * fbi->fb.var.hsync_len); fbi->hsync_time = htime; } unsigned long pxafb_get_hsync_time(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); /* If display is blanked/suspended, hsync isn't active */ if (!fbi || (fbi->state != C_ENABLE)) return 0; return fbi->hsync_time; } EXPORT_SYMBOL(pxafb_get_hsync_time); static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, unsigned long start, size_t size) { struct pxafb_dma_descriptor *dma_desc, *pal_desc; unsigned int dma_desc_off, pal_desc_off; if (dma < 0 || dma >= DMA_MAX * 2) return -EINVAL; dma_desc = &fbi->dma_buff->dma_desc[dma]; dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]); dma_desc->fsadr = start; dma_desc->fidr = 0; dma_desc->ldcmd = size; if (pal < 0 || pal >= PAL_MAX * 2) { dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; } else { pal_desc = &fbi->dma_buff->pal_desc[pal]; pal_desc_off = offsetof(struct pxafb_dma_buff, pal_desc[pal]); pal_desc->fsadr = fbi->dma_buff_phys + pal * PALETTE_SIZE; pal_desc->fidr = 0; if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) pal_desc->ldcmd = fbi->palette_size * sizeof(u16); else pal_desc->ldcmd = fbi->palette_size * sizeof(u32); pal_desc->ldcmd |= LDCMD_PAL; /* flip back and forth between palette and frame buffer */ pal_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; dma_desc->fdadr = fbi->dma_buff_phys + pal_desc_off; fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; } return 0; } static void setup_base_frame(struct pxafb_info *fbi, struct fb_var_screeninfo *var, int branch) { struct fb_fix_screeninfo *fix = &fbi->fb.fix; int nbytes, dma, pal, bpp = var->bits_per_pixel; unsigned long offset; dma = DMA_BASE + (branch ? DMA_MAX : 0); pal = (bpp >= 16) ? PAL_NONE : PAL_BASE + (branch ? PAL_MAX : 0); nbytes = fix->line_length * var->yres; offset = fix->line_length * var->yoffset + fbi->video_mem_phys; if (fbi->lccr0 & LCCR0_SDS) { nbytes = nbytes / 2; setup_frame_dma(fbi, dma + 1, PAL_NONE, offset + nbytes, nbytes); } setup_frame_dma(fbi, dma, pal, offset, nbytes); } #ifdef CONFIG_FB_PXA_SMARTPANEL static int setup_smart_dma(struct pxafb_info *fbi) { struct pxafb_dma_descriptor *dma_desc; unsigned long dma_desc_off, cmd_buff_off; dma_desc = &fbi->dma_buff->dma_desc[DMA_CMD]; dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[DMA_CMD]); cmd_buff_off = offsetof(struct pxafb_dma_buff, cmd_buff); dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; dma_desc->fsadr = fbi->dma_buff_phys + cmd_buff_off; dma_desc->fidr = 0; dma_desc->ldcmd = fbi->n_smart_cmds * sizeof(uint16_t); fbi->fdadr[DMA_CMD] = dma_desc->fdadr; return 0; } int pxafb_smart_flush(struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); uint32_t prsr; int ret = 0; /* disable controller until all registers are set up */ lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); /* 1. make it an even number of commands to align on 32-bit boundary * 2. add the interrupt command to the end of the chain so we can * keep track of the end of the transfer */ while (fbi->n_smart_cmds & 1) fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_NOOP; fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_INTERRUPT; fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_WAIT_FOR_VSYNC; setup_smart_dma(fbi); /* continue to execute next command */ prsr = lcd_readl(fbi, PRSR) | PRSR_ST_OK | PRSR_CON_NT; lcd_writel(fbi, PRSR, prsr); /* stop the processor in case it executed "wait for sync" cmd */ lcd_writel(fbi, CMDCR, 0x0001); /* don't send interrupts for fifo underruns on channel 6 */ lcd_writel(fbi, LCCR5, LCCR5_IUM(6)); lcd_writel(fbi, LCCR1, fbi->reg_lccr1); lcd_writel(fbi, LCCR2, fbi->reg_lccr2); lcd_writel(fbi, LCCR3, fbi->reg_lccr3); lcd_writel(fbi, LCCR4, fbi->reg_lccr4); lcd_writel(fbi, FDADR0, fbi->fdadr[0]); lcd_writel(fbi, FDADR6, fbi->fdadr[6]); /* begin sending */ lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) { pr_warning("%s: timeout waiting for command done\n", __func__); ret = -ETIMEDOUT; } /* quick disable */ prsr = lcd_readl(fbi, PRSR) & ~(PRSR_ST_OK | PRSR_CON_NT); lcd_writel(fbi, PRSR, prsr); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); lcd_writel(fbi, FDADR6, 0); fbi->n_smart_cmds = 0; return ret; } int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds) { int i; struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); for (i = 0; i < n_cmds; i++, cmds++) { /* if it is a software delay, flush and delay */ if ((*cmds & 0xff00) == SMART_CMD_DELAY) { pxafb_smart_flush(info); mdelay(*cmds & 0xff); continue; } /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */ if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8) pxafb_smart_flush(info); fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds; } return 0; } static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk) { unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000); return (t == 0) ? 1 : t; } static void setup_smart_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { struct pxafb_mach_info *inf = fbi->dev->platform_data; struct pxafb_mode_info *mode = &inf->modes[0]; unsigned long lclk = clk_get_rate(fbi->clk); unsigned t1, t2, t3, t4; t1 = max(mode->a0csrd_set_hld, mode->a0cswr_set_hld); t2 = max(mode->rd_pulse_width, mode->wr_pulse_width); t3 = mode->op_hold_time; t4 = mode->cmd_inh_time; fbi->reg_lccr1 = LCCR1_DisWdth(var->xres) | LCCR1_BegLnDel(__smart_timing(t1, lclk)) | LCCR1_EndLnDel(__smart_timing(t2, lclk)) | LCCR1_HorSnchWdth(__smart_timing(t3, lclk)); fbi->reg_lccr2 = LCCR2_DisHght(var->yres); fbi->reg_lccr3 = fbi->lccr3 | LCCR3_PixClkDiv(__smart_timing(t4, lclk)); fbi->reg_lccr3 |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? LCCR3_HSP : 0; fbi->reg_lccr3 |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? LCCR3_VSP : 0; /* FIXME: make this configurable */ fbi->reg_cmdcr = 1; } static int pxafb_smart_thread(void *arg) { struct pxafb_info *fbi = arg; struct pxafb_mach_info *inf = fbi->dev->platform_data; if (!inf->smart_update) { pr_err("%s: not properly initialized, thread terminated\n", __func__); return -EINVAL; } inf = fbi->dev->platform_data; pr_debug("%s(): task starting\n", __func__); set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) continue; mutex_lock(&fbi->ctrlr_lock); if (fbi->state == C_ENABLE) { inf->smart_update(&fbi->fb); complete(&fbi->refresh_done); } mutex_unlock(&fbi->ctrlr_lock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(30 * HZ / 1000); } pr_debug("%s(): task ending\n", __func__); return 0; } static int pxafb_smart_init(struct pxafb_info *fbi) { if (!(fbi->lccr0 & LCCR0_LCDT)) return 0; fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; fbi->n_smart_cmds = 0; init_completion(&fbi->command_done); init_completion(&fbi->refresh_done); fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi, "lcd_refresh"); if (IS_ERR(fbi->smart_thread)) { pr_err("%s: unable to create kernel thread\n", __func__); return PTR_ERR(fbi->smart_thread); } return 0; } #else static inline int pxafb_smart_init(struct pxafb_info *fbi) { return 0; } #endif /* CONFIG_FB_PXA_SMARTPANEL */ static void setup_parallel_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { unsigned int lines_per_panel, pcd = get_pcd(fbi, var->pixclock); fbi->reg_lccr1 = LCCR1_DisWdth(var->xres) + LCCR1_HorSnchWdth(var->hsync_len) + LCCR1_BegLnDel(var->left_margin) + LCCR1_EndLnDel(var->right_margin); /* * If we have a dual scan LCD, we need to halve * the YRES parameter. */ lines_per_panel = var->yres; if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) lines_per_panel /= 2; fbi->reg_lccr2 = LCCR2_DisHght(lines_per_panel) + LCCR2_VrtSnchWdth(var->vsync_len) + LCCR2_BegFrmDel(var->upper_margin) + LCCR2_EndFrmDel(var->lower_margin); fbi->reg_lccr3 = fbi->lccr3 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); if (pcd) { fbi->reg_lccr3 |= LCCR3_PixClkDiv(pcd); set_hsync_time(fbi, pcd); } } /* * pxafb_activate_var(): * Configures LCD Controller based on entries in var parameter. * Settings are only written to the controller if changes were made. */ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *fbi) { u_long flags; /* Update shadow copy atomically */ local_irq_save(flags); #ifdef CONFIG_FB_PXA_SMARTPANEL if (fbi->lccr0 & LCCR0_LCDT) setup_smart_timing(fbi, var); else #endif setup_parallel_timing(fbi, var); setup_base_frame(fbi, var, 0); fbi->reg_lccr0 = fbi->lccr0 | (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_QDM | LCCR0_BM | LCCR0_OUM); fbi->reg_lccr3 |= pxafb_var_to_lccr3(var); fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK; fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); local_irq_restore(flags); /* * Only update the registers if the controller is enabled * and something has changed. */ if ((lcd_readl(fbi, LCCR0) != fbi->reg_lccr0) || (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) || (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) || (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) || (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || ((fbi->lccr0 & LCCR0_SDS) && (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))) pxafb_schedule_work(fbi, C_REENABLE); return 0; } /* * NOTE! The following functions are purely helpers for set_ctrlr_state. * Do not call them directly; set_ctrlr_state does the correct serialisation * to ensure that things happen in the right way 100% of time time. * -- rmk */ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) { pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); if (fbi->backlight_power) fbi->backlight_power(on); } static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) { pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff"); if (fbi->lcd_power) fbi->lcd_power(on, &fbi->fb.var); } static void pxafb_enable_controller(struct pxafb_info *fbi) { pr_debug("pxafb: Enabling LCD controller\n"); pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr[0]); pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr[1]); pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); /* enable LCD controller clock */ clk_prepare_enable(fbi->clk); if (fbi->lccr0 & LCCR0_LCDT) return; /* Sequence from 11.7.10 */ lcd_writel(fbi, LCCR4, fbi->reg_lccr4); lcd_writel(fbi, LCCR3, fbi->reg_lccr3); lcd_writel(fbi, LCCR2, fbi->reg_lccr2); lcd_writel(fbi, LCCR1, fbi->reg_lccr1); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); lcd_writel(fbi, FDADR0, fbi->fdadr[0]); if (fbi->lccr0 & LCCR0_SDS) lcd_writel(fbi, FDADR1, fbi->fdadr[1]); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); } static void pxafb_disable_controller(struct pxafb_info *fbi) { uint32_t lccr0; #ifdef CONFIG_FB_PXA_SMARTPANEL if (fbi->lccr0 & LCCR0_LCDT) { wait_for_completion_timeout(&fbi->refresh_done, 200 * HZ / 1000); return; } #endif /* Clear LCD Status Register */ lcd_writel(fbi, LCSR, 0xffffffff); lccr0 = lcd_readl(fbi, LCCR0) & ~LCCR0_LDM; lcd_writel(fbi, LCCR0, lccr0); lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS); wait_for_completion_timeout(&fbi->disable_done, 200 * HZ / 1000); /* disable LCD controller clock */ clk_disable_unprepare(fbi->clk); } /* * pxafb_handle_irq: Handle 'LCD DONE' interrupts. */ static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) { struct pxafb_info *fbi = dev_id; unsigned int lccr0, lcsr; lcsr = lcd_readl(fbi, LCSR); if (lcsr & LCSR_LDD) { lccr0 = lcd_readl(fbi, LCCR0); lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM); complete(&fbi->disable_done); } #ifdef CONFIG_FB_PXA_SMARTPANEL if (lcsr & LCSR_CMD_INT) complete(&fbi->command_done); #endif lcd_writel(fbi, LCSR, lcsr); #ifdef CONFIG_FB_PXA_OVERLAY { unsigned int lcsr1 = lcd_readl(fbi, LCSR1); if (lcsr1 & LCSR1_BS(1)) complete(&fbi->overlay[0].branch_done); if (lcsr1 & LCSR1_BS(2)) complete(&fbi->overlay[1].branch_done); lcd_writel(fbi, LCSR1, lcsr1); } #endif return IRQ_HANDLED; } /* * This function must be called from task context only, since it will * sleep when disabling the LCD controller, or if we get two contending * processes trying to alter state. */ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) { u_int old_state; mutex_lock(&fbi->ctrlr_lock); old_state = fbi->state; /* * Hack around fbcon initialisation. */ if (old_state == C_STARTUP && state == C_REENABLE) state = C_ENABLE; switch (state) { case C_DISABLE_CLKCHANGE: /* * Disable controller for clock change. If the * controller is already disabled, then do nothing. */ if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { fbi->state = state; /* TODO __pxafb_lcd_power(fbi, 0); */ pxafb_disable_controller(fbi); } break; case C_DISABLE_PM: case C_DISABLE: /* * Disable controller */ if (old_state != C_DISABLE) { fbi->state = state; __pxafb_backlight_power(fbi, 0); __pxafb_lcd_power(fbi, 0); if (old_state != C_DISABLE_CLKCHANGE) pxafb_disable_controller(fbi); } break; case C_ENABLE_CLKCHANGE: /* * Enable the controller after clock change. Only * do this if we were disabled for the clock change. */ if (old_state == C_DISABLE_CLKCHANGE) { fbi->state = C_ENABLE; pxafb_enable_controller(fbi); /* TODO __pxafb_lcd_power(fbi, 1); */ } break; case C_REENABLE: /* * Re-enable the controller only if it was already * enabled. This is so we reprogram the control * registers. */ if (old_state == C_ENABLE) { __pxafb_lcd_power(fbi, 0); pxafb_disable_controller(fbi); pxafb_enable_controller(fbi); __pxafb_lcd_power(fbi, 1); } break; case C_ENABLE_PM: /* * Re-enable the controller after PM. This is not * perfect - think about the case where we were doing * a clock change, and we suspended half-way through. */ if (old_state != C_DISABLE_PM) break; /* fall through */ case C_ENABLE: /* * Power up the LCD screen, enable controller, and * turn on the backlight. */ if (old_state != C_ENABLE) { fbi->state = C_ENABLE; pxafb_enable_controller(fbi); __pxafb_lcd_power(fbi, 1); __pxafb_backlight_power(fbi, 1); } break; } mutex_unlock(&fbi->ctrlr_lock); } /* * Our LCD controller task (which is called when we blank or unblank) * via keventd. */ static void pxafb_task(struct work_struct *work) { struct pxafb_info *fbi = container_of(work, struct pxafb_info, task); u_int state = xchg(&fbi->task_state, -1); set_ctrlr_state(fbi, state); } #ifdef CONFIG_CPU_FREQ /* * CPU clock speed change handler. We need to adjust the LCD timing * parameters when the CPU clock is adjusted by the power management * subsystem. * * TODO: Determine why f->new != 10*get_lclk_frequency_10khz() */ static int pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct pxafb_info *fbi = TO_INF(nb, freq_transition); /* TODO struct cpufreq_freqs *f = data; */ u_int pcd; switch (val) { case CPUFREQ_PRECHANGE: #ifdef CONFIG_FB_PXA_OVERLAY if (!(fbi->overlay[0].usage || fbi->overlay[1].usage)) #endif set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); break; case CPUFREQ_POSTCHANGE: pcd = get_pcd(fbi, fbi->fb.var.pixclock); set_hsync_time(fbi, pcd); fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); break; } return 0; } static int pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) { struct pxafb_info *fbi = TO_INF(nb, freq_policy); struct fb_var_screeninfo *var = &fbi->fb.var; struct cpufreq_policy *policy = data; switch (val) { case CPUFREQ_ADJUST: case CPUFREQ_INCOMPATIBLE: pr_debug("min dma period: %d ps, " "new clock %d kHz\n", pxafb_display_dma_period(var), policy->max); /* TODO: fill in min/max values */ break; } return 0; } #endif #ifdef CONFIG_PM /* * Power management hooks. Note that we won't be called from IRQ context, * unlike the blank functions above, so we may sleep. */ static int pxafb_suspend(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); set_ctrlr_state(fbi, C_DISABLE_PM); return 0; } static int pxafb_resume(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); set_ctrlr_state(fbi, C_ENABLE_PM); return 0; } static const struct dev_pm_ops pxafb_pm_ops = { .suspend = pxafb_suspend, .resume = pxafb_resume, }; #endif static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi) { int size = PAGE_ALIGN(fbi->video_mem_size); fbi->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (fbi->video_mem == NULL) return -ENOMEM; fbi->video_mem_phys = virt_to_phys(fbi->video_mem); fbi->video_mem_size = size; fbi->fb.fix.smem_start = fbi->video_mem_phys; fbi->fb.fix.smem_len = fbi->video_mem_size; fbi->fb.screen_base = fbi->video_mem; return fbi->video_mem ? 0 : -ENOMEM; } static void pxafb_decode_mach_info(struct pxafb_info *fbi, struct pxafb_mach_info *inf) { unsigned int lcd_conn = inf->lcd_conn; struct pxafb_mode_info *m; int i; fbi->cmap_inverse = inf->cmap_inverse; fbi->cmap_static = inf->cmap_static; fbi->lccr4 = inf->lccr4; switch (lcd_conn & LCD_TYPE_MASK) { case LCD_TYPE_MONO_STN: fbi->lccr0 = LCCR0_CMS; break; case LCD_TYPE_MONO_DSTN: fbi->lccr0 = LCCR0_CMS | LCCR0_SDS; break; case LCD_TYPE_COLOR_STN: fbi->lccr0 = 0; break; case LCD_TYPE_COLOR_DSTN: fbi->lccr0 = LCCR0_SDS; break; case LCD_TYPE_COLOR_TFT: fbi->lccr0 = LCCR0_PAS; break; case LCD_TYPE_SMART_PANEL: fbi->lccr0 = LCCR0_LCDT | LCCR0_PAS; break; default: /* fall back to backward compatibility way */ fbi->lccr0 = inf->lccr0; fbi->lccr3 = inf->lccr3; goto decode_mode; } if (lcd_conn == LCD_MONO_STN_8BPP) fbi->lccr0 |= LCCR0_DPD; fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0; fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; decode_mode: pxafb_setmode(&fbi->fb.var, &inf->modes[0]); /* decide video memory size as follows: * 1. default to mode of maximum resolution * 2. allow platform to override * 3. allow module parameter to override */ for (i = 0, m = &inf->modes[0]; i < inf->num_modes; i++, m++) fbi->video_mem_size = max_t(size_t, fbi->video_mem_size, m->xres * m->yres * m->bpp / 8); if (inf->video_mem_size > fbi->video_mem_size) fbi->video_mem_size = inf->video_mem_size; if (video_mem_size > fbi->video_mem_size) fbi->video_mem_size = video_mem_size; } static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev) { struct pxafb_info *fbi; void *addr; struct pxafb_mach_info *inf = dev->platform_data; /* Alloc the pxafb_info and pseudo_palette in one step */ fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); if (!fbi) return NULL; memset(fbi, 0, sizeof(struct pxafb_info)); fbi->dev = dev; fbi->clk = clk_get(dev, NULL); if (IS_ERR(fbi->clk)) { kfree(fbi); return NULL; } strcpy(fbi->fb.fix.id, PXA_NAME); fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS; fbi->fb.fix.type_aux = 0; fbi->fb.fix.xpanstep = 0; fbi->fb.fix.ypanstep = 1; fbi->fb.fix.ywrapstep = 0; fbi->fb.fix.accel = FB_ACCEL_NONE; fbi->fb.var.nonstd = 0; fbi->fb.var.activate = FB_ACTIVATE_NOW; fbi->fb.var.height = -1; fbi->fb.var.width = -1; fbi->fb.var.accel_flags = FB_ACCELF_TEXT; fbi->fb.var.vmode = FB_VMODE_NONINTERLACED; fbi->fb.fbops = &pxafb_ops; fbi->fb.flags = FBINFO_DEFAULT; fbi->fb.node = -1; addr = fbi; addr = addr + sizeof(struct pxafb_info); fbi->fb.pseudo_palette = addr; fbi->state = C_STARTUP; fbi->task_state = (u_char)-1; pxafb_decode_mach_info(fbi, inf); #ifdef CONFIG_FB_PXA_OVERLAY /* place overlay(s) on top of base */ if (pxafb_overlay_supported()) fbi->lccr0 |= LCCR0_OUC; #endif init_waitqueue_head(&fbi->ctrlr_wait); INIT_WORK(&fbi->task, pxafb_task); mutex_init(&fbi->ctrlr_lock); init_completion(&fbi->disable_done); return fbi; } #ifdef CONFIG_FB_PXA_PARAMETERS static int __devinit parse_opt_mode(struct device *dev, const char *this_opt) { struct pxafb_mach_info *inf = dev->platform_data; const char *name = this_opt+5; unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0; unsigned int xres = 0, yres = 0, bpp = 0; int yres_specified = 0; int i; for (i = namelen-1; i >= 0; i--) { switch (name[i]) { case '-': namelen = i; if (!bpp_specified && !yres_specified) { bpp = simple_strtoul(&name[i+1], NULL, 0); bpp_specified = 1; } else goto done; break; case 'x': if (!yres_specified) { yres = simple_strtoul(&name[i+1], NULL, 0); yres_specified = 1; } else goto done; break; case '0' ... '9': break; default: goto done; } } if (i < 0 && yres_specified) { xres = simple_strtoul(name, NULL, 0); res_specified = 1; } done: if (res_specified) { dev_info(dev, "overriding resolution: %dx%d\n", xres, yres); inf->modes[0].xres = xres; inf->modes[0].yres = yres; } if (bpp_specified) switch (bpp) { case 1: case 2: case 4: case 8: case 16: inf->modes[0].bpp = bpp; dev_info(dev, "overriding bit depth: %d\n", bpp); break; default: dev_err(dev, "Depth %d is not valid\n", bpp); return -EINVAL; } return 0; } static int __devinit parse_opt(struct device *dev, char *this_opt) { struct pxafb_mach_info *inf = dev->platform_data; struct pxafb_mode_info *mode = &inf->modes[0]; char s[64]; s[0] = '\0'; if (!strncmp(this_opt, "vmem:", 5)) { video_mem_size = memparse(this_opt + 5, NULL); } else if (!strncmp(this_opt, "mode:", 5)) { return parse_opt_mode(dev, this_opt); } else if (!strncmp(this_opt, "pixclock:", 9)) { mode->pixclock = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "pixclock: %ld\n", mode->pixclock); } else if (!strncmp(this_opt, "left:", 5)) { mode->left_margin = simple_strtoul(this_opt+5, NULL, 0); sprintf(s, "left: %u\n", mode->left_margin); } else if (!strncmp(this_opt, "right:", 6)) { mode->right_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "right: %u\n", mode->right_margin); } else if (!strncmp(this_opt, "upper:", 6)) { mode->upper_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "upper: %u\n", mode->upper_margin); } else if (!strncmp(this_opt, "lower:", 6)) { mode->lower_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "lower: %u\n", mode->lower_margin); } else if (!strncmp(this_opt, "hsynclen:", 9)) { mode->hsync_len = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "hsynclen: %u\n", mode->hsync_len); } else if (!strncmp(this_opt, "vsynclen:", 9)) { mode->vsync_len = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "vsynclen: %u\n", mode->vsync_len); } else if (!strncmp(this_opt, "hsync:", 6)) { if (simple_strtoul(this_opt+6, NULL, 0) == 0) { sprintf(s, "hsync: Active Low\n"); mode->sync &= ~FB_SYNC_HOR_HIGH_ACT; } else { sprintf(s, "hsync: Active High\n"); mode->sync |= FB_SYNC_HOR_HIGH_ACT; } } else if (!strncmp(this_opt, "vsync:", 6)) { if (simple_strtoul(this_opt+6, NULL, 0) == 0) { sprintf(s, "vsync: Active Low\n"); mode->sync &= ~FB_SYNC_VERT_HIGH_ACT; } else { sprintf(s, "vsync: Active High\n"); mode->sync |= FB_SYNC_VERT_HIGH_ACT; } } else if (!strncmp(this_opt, "dpc:", 4)) { if (simple_strtoul(this_opt+4, NULL, 0) == 0) { sprintf(s, "double pixel clock: false\n"); inf->lccr3 &= ~LCCR3_DPC; } else { sprintf(s, "double pixel clock: true\n"); inf->lccr3 |= LCCR3_DPC; } } else if (!strncmp(this_opt, "outputen:", 9)) { if (simple_strtoul(this_opt+9, NULL, 0) == 0) { sprintf(s, "output enable: active low\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL; } else { sprintf(s, "output enable: active high\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH; } } else if (!strncmp(this_opt, "pixclockpol:", 12)) { if (simple_strtoul(this_opt+12, NULL, 0) == 0) { sprintf(s, "pixel clock polarity: falling edge\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg; } else { sprintf(s, "pixel clock polarity: rising edge\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg; } } else if (!strncmp(this_opt, "color", 5)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color; } else if (!strncmp(this_opt, "mono", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono; } else if (!strncmp(this_opt, "active", 6)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act; } else if (!strncmp(this_opt, "passive", 7)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas; } else if (!strncmp(this_opt, "single", 6)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl; } else if (!strncmp(this_opt, "dual", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual; } else if (!strncmp(this_opt, "4pix", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono; } else if (!strncmp(this_opt, "8pix", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono; } else { dev_err(dev, "unknown option: %s\n", this_opt); return -EINVAL; } if (s[0] != '\0') dev_info(dev, "override %s", s); return 0; } static int __devinit pxafb_parse_options(struct device *dev, char *options) { char *this_opt; int ret; if (!options || !*options) return 0; dev_dbg(dev, "options are \"%s\"\n", options ? options : "null"); /* could be made table driven or similar?... */ while ((this_opt = strsep(&options, ",")) != NULL) { ret = parse_opt(dev, this_opt); if (ret) return ret; } return 0; } static char g_options[256] __devinitdata = ""; #ifndef MODULE static int __init pxafb_setup_options(void) { char *options = NULL; if (fb_get_options("pxafb", &options)) return -ENODEV; if (options) strlcpy(g_options, options, sizeof(g_options)); return 0; } #else #define pxafb_setup_options() (0) module_param_string(options, g_options, sizeof(g_options), 0); MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)"); #endif #else #define pxafb_parse_options(...) (0) #define pxafb_setup_options() (0) #endif #ifdef DEBUG_VAR /* Check for various illegal bit-combinations. Currently only * a warning is given. */ static void __devinit pxafb_check_options(struct device *dev, struct pxafb_mach_info *inf) { if (inf->lcd_conn) return; if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) dev_warn(dev, "machine LCCR0 setting contains " "illegal bits: %08x\n", inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) dev_warn(dev, "machine LCCR3 setting contains " "illegal bits: %08x\n", inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); if (inf->lccr0 & LCCR0_DPD && ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) dev_warn(dev, "Double Pixel Data (DPD) mode is " "only valid in passive mono" " single panel mode\n"); if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) dev_warn(dev, "Dual panel only valid in passive mode\n"); if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && (inf->modes->upper_margin || inf->modes->lower_margin)) dev_warn(dev, "Upper and lower margins must be 0 in " "passive mode\n"); } #else #define pxafb_check_options(...) do {} while (0) #endif static int __devinit pxafb_probe(struct platform_device *dev) { struct pxafb_info *fbi; struct pxafb_mach_info *inf; struct resource *r; int irq, ret; dev_dbg(&dev->dev, "pxafb_probe\n"); inf = dev->dev.platform_data; ret = -ENOMEM; fbi = NULL; if (!inf) goto failed; ret = pxafb_parse_options(&dev->dev, g_options); if (ret < 0) goto failed; pxafb_check_options(&dev->dev, inf); dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", inf->modes->xres, inf->modes->yres, inf->modes->bpp); if (inf->modes->xres == 0 || inf->modes->yres == 0 || inf->modes->bpp == 0) { dev_err(&dev->dev, "Invalid resolution or bit depth\n"); ret = -EINVAL; goto failed; } fbi = pxafb_init_fbinfo(&dev->dev); if (!fbi) { /* only reason for pxafb_init_fbinfo to fail is kmalloc */ dev_err(&dev->dev, "Failed to initialize framebuffer device\n"); ret = -ENOMEM; goto failed; } if (cpu_is_pxa3xx() && inf->acceleration_enabled) fbi->fb.fix.accel = FB_ACCEL_PXA3XX; fbi->backlight_power = inf->pxafb_backlight_power; fbi->lcd_power = inf->pxafb_lcd_power; r = platform_get_resource(dev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&dev->dev, "no I/O memory resource defined\n"); ret = -ENODEV; goto failed_fbi; } r = request_mem_region(r->start, resource_size(r), dev->name); if (r == NULL) { dev_err(&dev->dev, "failed to request I/O memory\n"); ret = -EBUSY; goto failed_fbi; } fbi->mmio_base = ioremap(r->start, resource_size(r)); if (fbi->mmio_base == NULL) { dev_err(&dev->dev, "failed to map I/O memory\n"); ret = -EBUSY; goto failed_free_res; } fbi->dma_buff_size = PAGE_ALIGN(sizeof(struct pxafb_dma_buff)); fbi->dma_buff = dma_alloc_coherent(fbi->dev, fbi->dma_buff_size, &fbi->dma_buff_phys, GFP_KERNEL); if (fbi->dma_buff == NULL) { dev_err(&dev->dev, "failed to allocate memory for DMA\n"); ret = -ENOMEM; goto failed_free_io; } ret = pxafb_init_video_memory(fbi); if (ret) { dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); ret = -ENOMEM; goto failed_free_dma; } irq = platform_get_irq(dev, 0); if (irq < 0) { dev_err(&dev->dev, "no IRQ defined\n"); ret = -ENODEV; goto failed_free_mem; } ret = request_irq(irq, pxafb_handle_irq, 0, "LCD", fbi); if (ret) { dev_err(&dev->dev, "request_irq failed: %d\n", ret); ret = -EBUSY; goto failed_free_mem; } ret = pxafb_smart_init(fbi); if (ret) { dev_err(&dev->dev, "failed to initialize smartpanel\n"); goto failed_free_irq; } /* * This makes sure that our colour bitfield * descriptors are correctly initialised. */ ret = pxafb_check_var(&fbi->fb.var, &fbi->fb); if (ret) { dev_err(&dev->dev, "failed to get suitable mode\n"); goto failed_free_irq; } ret = pxafb_set_par(&fbi->fb); if (ret) { dev_err(&dev->dev, "Failed to set parameters\n"); goto failed_free_irq; } platform_set_drvdata(dev, fbi); ret = register_framebuffer(&fbi->fb); if (ret < 0) { dev_err(&dev->dev, "Failed to register framebuffer device: %d\n", ret); goto failed_free_cmap; } pxafb_overlay_init(fbi); #ifdef CONFIG_CPU_FREQ fbi->freq_transition.notifier_call = pxafb_freq_transition; fbi->freq_policy.notifier_call = pxafb_freq_policy; cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER); #endif /* * Ok, now enable the LCD controller */ set_ctrlr_state(fbi, C_ENABLE); return 0; failed_free_cmap: if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); failed_free_irq: free_irq(irq, fbi); failed_free_mem: free_pages_exact(fbi->video_mem, fbi->video_mem_size); failed_free_dma: dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, fbi->dma_buff_phys); failed_free_io: iounmap(fbi->mmio_base); failed_free_res: release_mem_region(r->start, resource_size(r)); failed_fbi: clk_put(fbi->clk); platform_set_drvdata(dev, NULL); kfree(fbi); failed: return ret; } static int __devexit pxafb_remove(struct platform_device *dev) { struct pxafb_info *fbi = platform_get_drvdata(dev); struct resource *r; int irq; struct fb_info *info; if (!fbi) return 0; info = &fbi->fb; pxafb_overlay_exit(fbi); unregister_framebuffer(info); pxafb_disable_controller(fbi); if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); irq = platform_get_irq(dev, 0); free_irq(irq, fbi); free_pages_exact(fbi->video_mem, fbi->video_mem_size); dma_free_writecombine(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, fbi->dma_buff_phys); iounmap(fbi->mmio_base); r = platform_get_resource(dev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); clk_put(fbi->clk); kfree(fbi); return 0; } static struct platform_driver pxafb_driver = { .probe = pxafb_probe, .remove = __devexit_p(pxafb_remove), .driver = { .owner = THIS_MODULE, .name = "pxa2xx-fb", #ifdef CONFIG_PM .pm = &pxafb_pm_ops, #endif }, }; static int __init pxafb_init(void) { if (pxafb_setup_options()) return -EINVAL; return platform_driver_register(&pxafb_driver); } static void __exit pxafb_exit(void) { platform_driver_unregister(&pxafb_driver); } module_init(pxafb_init); module_exit(pxafb_exit); MODULE_DESCRIPTION("loadable framebuffer driver for PXA"); MODULE_LICENSE("GPL");
gpl-2.0
ChangYeoun/Kite_Stock
drivers/media/video/tcm825x.c
5196
22018
/* * drivers/media/video/tcm825x.c * * TCM825X camera sensor driver. * * Copyright (C) 2007 Nokia Corporation. * * Contact: Sakari Ailus <sakari.ailus@nokia.com> * * Based on code from David Cohen <david.cohen@indt.org.br> * * This driver was based on ov9640 sensor driver from MontaVista * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/i2c.h> #include <linux/module.h> #include <media/v4l2-int-device.h> #include "tcm825x.h" /* * The sensor has two fps modes: the lower one just gives half the fps * at the same xclk than the high one. */ #define MAX_FPS 30 #define MIN_FPS 8 #define MAX_HALF_FPS (MAX_FPS / 2) #define HIGH_FPS_MODE_LOWER_LIMIT 14 #define DEFAULT_FPS MAX_HALF_FPS struct tcm825x_sensor { const struct tcm825x_platform_data *platform_data; struct v4l2_int_device *v4l2_int_device; struct i2c_client *i2c_client; struct v4l2_pix_format pix; struct v4l2_fract timeperframe; }; /* list of image formats supported by TCM825X sensor */ static const struct v4l2_fmtdesc tcm825x_formats[] = { { .description = "YUYV (YUV 4:2:2), packed", .pixelformat = V4L2_PIX_FMT_UYVY, }, { /* Note: V4L2 defines RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3 * * We interpret RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3 */ .description = "RGB565, le", .pixelformat = V4L2_PIX_FMT_RGB565, }, }; #define TCM825X_NUM_CAPTURE_FORMATS ARRAY_SIZE(tcm825x_formats) /* * TCM825X register configuration for all combinations of pixel format and * image size */ static const struct tcm825x_reg subqcif = { 0x20, TCM825X_PICSIZ }; static const struct tcm825x_reg qcif = { 0x18, TCM825X_PICSIZ }; static const struct tcm825x_reg cif = { 0x14, TCM825X_PICSIZ }; static const struct tcm825x_reg qqvga = { 0x0c, TCM825X_PICSIZ }; static const struct tcm825x_reg qvga = { 0x04, TCM825X_PICSIZ }; static const struct tcm825x_reg vga = { 0x00, TCM825X_PICSIZ }; static const struct tcm825x_reg yuv422 = { 0x00, TCM825X_PICFMT }; static const struct tcm825x_reg rgb565 = { 0x02, TCM825X_PICFMT }; /* Our own specific controls */ #define V4L2_CID_ALC V4L2_CID_PRIVATE_BASE #define V4L2_CID_H_EDGE_EN V4L2_CID_PRIVATE_BASE + 1 #define V4L2_CID_V_EDGE_EN V4L2_CID_PRIVATE_BASE + 2 #define V4L2_CID_LENS V4L2_CID_PRIVATE_BASE + 3 #define V4L2_CID_MAX_EXPOSURE_TIME V4L2_CID_PRIVATE_BASE + 4 #define V4L2_CID_LAST_PRIV V4L2_CID_MAX_EXPOSURE_TIME /* Video controls */ static struct vcontrol { struct v4l2_queryctrl qc; u16 reg; u16 start_bit; } video_control[] = { { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0, .maximum = 63, .step = 1, }, .reg = TCM825X_AG, .start_bit = 0, }, { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0, .maximum = 255, .step = 1, }, .reg = TCM825X_MRG, .start_bit = 0, }, { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0, .maximum = 255, .step = 1, }, .reg = TCM825X_MBG, .start_bit = 0, }, { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto White Balance", .minimum = 0, .maximum = 1, .step = 0, }, .reg = TCM825X_AWBSW, .start_bit = 7, }, { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure Time", .minimum = 0, .maximum = 0x1fff, .step = 1, }, .reg = TCM825X_ESRSPD_U, .start_bit = 0, }, { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Image", .minimum = 0, .maximum = 1, .step = 0, }, .reg = TCM825X_H_INV, .start_bit = 6, }, { { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Vertical Flip", .minimum = 0, .maximum = 1, .step = 0, }, .reg = TCM825X_V_INV, .start_bit = 7, }, /* Private controls */ { { .id = V4L2_CID_ALC, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Luminance Control", .minimum = 0, .maximum = 1, .step = 0, }, .reg = TCM825X_ALCSW, .start_bit = 7, }, { { .id = V4L2_CID_H_EDGE_EN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Horizontal Edge Enhancement", .minimum = 0, .maximum = 0xff, .step = 1, }, .reg = TCM825X_HDTG, .start_bit = 0, }, { { .id = V4L2_CID_V_EDGE_EN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Vertical Edge Enhancement", .minimum = 0, .maximum = 0xff, .step = 1, }, .reg = TCM825X_VDTG, .start_bit = 0, }, { { .id = V4L2_CID_LENS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Lens Shading Compensation", .minimum = 0, .maximum = 0x3f, .step = 1, }, .reg = TCM825X_LENS, .start_bit = 0, }, { { .id = V4L2_CID_MAX_EXPOSURE_TIME, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Maximum Exposure Time", .minimum = 0, .maximum = 0x3, .step = 1, }, .reg = TCM825X_ESRLIM, .start_bit = 5, }, }; static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] = { &subqcif, &qqvga, &qcif, &qvga, &cif, &vga }; static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] = { &yuv422, &rgb565 }; /* * Read a value from a register in an TCM825X sensor device. The value is * returned in 'val'. * Returns zero if successful, or non-zero otherwise. */ static int tcm825x_read_reg(struct i2c_client *client, int reg) { int err; struct i2c_msg msg[2]; u8 reg_buf, data_buf = 0; if (!client->adapter) return -ENODEV; msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = &reg_buf; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].len = 1; msg[1].buf = &data_buf; reg_buf = reg; err = i2c_transfer(client->adapter, msg, 2); if (err < 0) return err; return data_buf; } /* * Write a value to a register in an TCM825X sensor device. * Returns zero if successful, or non-zero otherwise. */ static int tcm825x_write_reg(struct i2c_client *client, u8 reg, u8 val) { int err; struct i2c_msg msg[1]; unsigned char data[2]; if (!client->adapter) return -ENODEV; msg->addr = client->addr; msg->flags = 0; msg->len = 2; msg->buf = data; data[0] = reg; data[1] = val; err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) return 0; return err; } static int __tcm825x_write_reg_mask(struct i2c_client *client, u8 reg, u8 val, u8 mask) { int rc; /* need to do read - modify - write */ rc = tcm825x_read_reg(client, reg); if (rc < 0) return rc; rc &= (~mask); /* Clear the masked bits */ val &= mask; /* Enforce mask on value */ val |= rc; /* write the new value to the register */ rc = tcm825x_write_reg(client, reg, val); if (rc) return rc; return 0; } #define tcm825x_write_reg_mask(client, regmask, val) \ __tcm825x_write_reg_mask(client, TCM825X_ADDR((regmask)), val, \ TCM825X_MASK((regmask))) /* * Initialize a list of TCM825X registers. * The list of registers is terminated by the pair of values * { TCM825X_REG_TERM, TCM825X_VAL_TERM }. * Returns zero if successful, or non-zero otherwise. */ static int tcm825x_write_default_regs(struct i2c_client *client, const struct tcm825x_reg *reglist) { int err; const struct tcm825x_reg *next = reglist; while (!((next->reg == TCM825X_REG_TERM) && (next->val == TCM825X_VAL_TERM))) { err = tcm825x_write_reg(client, next->reg, next->val); if (err) { dev_err(&client->dev, "register writing failed\n"); return err; } next++; } return 0; } static struct vcontrol *find_vctrl(int id) { int i; if (id < V4L2_CID_BASE) return NULL; for (i = 0; i < ARRAY_SIZE(video_control); i++) if (video_control[i].qc.id == id) return &video_control[i]; return NULL; } /* * Find the best match for a requested image capture size. The best match * is chosen as the nearest match that has the same number or fewer pixels * as the requested size, or the smallest image size if the requested size * has fewer pixels than the smallest image. */ static enum image_size tcm825x_find_size(struct v4l2_int_device *s, unsigned int width, unsigned int height) { enum image_size isize; unsigned long pixels = width * height; struct tcm825x_sensor *sensor = s->priv; for (isize = subQCIF; isize < VGA; isize++) { if (tcm825x_sizes[isize + 1].height * tcm825x_sizes[isize + 1].width > pixels) { dev_dbg(&sensor->i2c_client->dev, "size %d\n", isize); return isize; } } dev_dbg(&sensor->i2c_client->dev, "format default VGA\n"); return VGA; } /* * Configure the TCM825X for current image size, pixel format, and * frame period. fper is the frame period (in seconds) expressed as a * fraction. Returns zero if successful, or non-zero otherwise. The * actual frame period is returned in fper. */ static int tcm825x_configure(struct v4l2_int_device *s) { struct tcm825x_sensor *sensor = s->priv; struct v4l2_pix_format *pix = &sensor->pix; enum image_size isize = tcm825x_find_size(s, pix->width, pix->height); struct v4l2_fract *fper = &sensor->timeperframe; enum pixel_format pfmt; int err; u32 tgt_fps; u8 val; /* common register initialization */ err = tcm825x_write_default_regs( sensor->i2c_client, sensor->platform_data->default_regs()); if (err) return err; /* configure image size */ val = tcm825x_siz_reg[isize]->val; dev_dbg(&sensor->i2c_client->dev, "configuring image size %d\n", isize); err = tcm825x_write_reg_mask(sensor->i2c_client, tcm825x_siz_reg[isize]->reg, val); if (err) return err; /* configure pixel format */ switch (pix->pixelformat) { default: case V4L2_PIX_FMT_RGB565: pfmt = RGB565; break; case V4L2_PIX_FMT_UYVY: pfmt = YUV422; break; } dev_dbg(&sensor->i2c_client->dev, "configuring pixel format %d\n", pfmt); val = tcm825x_fmt_reg[pfmt]->val; err = tcm825x_write_reg_mask(sensor->i2c_client, tcm825x_fmt_reg[pfmt]->reg, val); if (err) return err; /* * For frame rate < 15, the FPS reg (addr 0x02, bit 7) must be * set. Frame rate will be halved from the normal. */ tgt_fps = fper->denominator / fper->numerator; if (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) { val = tcm825x_read_reg(sensor->i2c_client, 0x02); val |= 0x80; tcm825x_write_reg(sensor->i2c_client, 0x02, val); } return 0; } static int ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qc) { struct vcontrol *control; control = find_vctrl(qc->id); if (control == NULL) return -EINVAL; *qc = control->qc; return 0; } static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc) { struct tcm825x_sensor *sensor = s->priv; struct i2c_client *client = sensor->i2c_client; int val, r; struct vcontrol *lvc; /* exposure time is special, spread across 2 registers */ if (vc->id == V4L2_CID_EXPOSURE) { int val_lower, val_upper; val_upper = tcm825x_read_reg(client, TCM825X_ADDR(TCM825X_ESRSPD_U)); if (val_upper < 0) return val_upper; val_lower = tcm825x_read_reg(client, TCM825X_ADDR(TCM825X_ESRSPD_L)); if (val_lower < 0) return val_lower; vc->value = ((val_upper & 0x1f) << 8) | (val_lower); return 0; } lvc = find_vctrl(vc->id); if (lvc == NULL) return -EINVAL; r = tcm825x_read_reg(client, TCM825X_ADDR(lvc->reg)); if (r < 0) return r; val = r & TCM825X_MASK(lvc->reg); val >>= lvc->start_bit; if (val < 0) return val; if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP) val ^= sensor->platform_data->is_upside_down(); vc->value = val; return 0; } static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc) { struct tcm825x_sensor *sensor = s->priv; struct i2c_client *client = sensor->i2c_client; struct vcontrol *lvc; int val = vc->value; /* exposure time is special, spread across 2 registers */ if (vc->id == V4L2_CID_EXPOSURE) { int val_lower, val_upper; val_lower = val & TCM825X_MASK(TCM825X_ESRSPD_L); val_upper = (val >> 8) & TCM825X_MASK(TCM825X_ESRSPD_U); if (tcm825x_write_reg_mask(client, TCM825X_ESRSPD_U, val_upper)) return -EIO; if (tcm825x_write_reg_mask(client, TCM825X_ESRSPD_L, val_lower)) return -EIO; return 0; } lvc = find_vctrl(vc->id); if (lvc == NULL) return -EINVAL; if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP) val ^= sensor->platform_data->is_upside_down(); val = val << lvc->start_bit; if (tcm825x_write_reg_mask(client, lvc->reg, val)) return -EIO; return 0; } static int ioctl_enum_fmt_cap(struct v4l2_int_device *s, struct v4l2_fmtdesc *fmt) { int index = fmt->index; switch (fmt->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (index >= TCM825X_NUM_CAPTURE_FORMATS) return -EINVAL; break; default: return -EINVAL; } fmt->flags = tcm825x_formats[index].flags; strlcpy(fmt->description, tcm825x_formats[index].description, sizeof(fmt->description)); fmt->pixelformat = tcm825x_formats[index].pixelformat; return 0; } static int ioctl_try_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) { struct tcm825x_sensor *sensor = s->priv; enum image_size isize; int ifmt; struct v4l2_pix_format *pix = &f->fmt.pix; isize = tcm825x_find_size(s, pix->width, pix->height); dev_dbg(&sensor->i2c_client->dev, "isize = %d num_capture = %lu\n", isize, (unsigned long)TCM825X_NUM_CAPTURE_FORMATS); pix->width = tcm825x_sizes[isize].width; pix->height = tcm825x_sizes[isize].height; for (ifmt = 0; ifmt < TCM825X_NUM_CAPTURE_FORMATS; ifmt++) if (pix->pixelformat == tcm825x_formats[ifmt].pixelformat) break; if (ifmt == TCM825X_NUM_CAPTURE_FORMATS) ifmt = 0; /* Default = YUV 4:2:2 */ pix->pixelformat = tcm825x_formats[ifmt].pixelformat; pix->field = V4L2_FIELD_NONE; pix->bytesperline = pix->width * TCM825X_BYTES_PER_PIXEL; pix->sizeimage = pix->bytesperline * pix->height; pix->priv = 0; dev_dbg(&sensor->i2c_client->dev, "format = 0x%08x\n", pix->pixelformat); switch (pix->pixelformat) { case V4L2_PIX_FMT_UYVY: default: pix->colorspace = V4L2_COLORSPACE_JPEG; break; case V4L2_PIX_FMT_RGB565: pix->colorspace = V4L2_COLORSPACE_SRGB; break; } return 0; } static int ioctl_s_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) { struct tcm825x_sensor *sensor = s->priv; struct v4l2_pix_format *pix = &f->fmt.pix; int rval; rval = ioctl_try_fmt_cap(s, f); if (rval) return rval; rval = tcm825x_configure(s); sensor->pix = *pix; return rval; } static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) { struct tcm825x_sensor *sensor = s->priv; f->fmt.pix = sensor->pix; return 0; } static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { struct tcm825x_sensor *sensor = s->priv; struct v4l2_captureparm *cparm = &a->parm.capture; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memset(a, 0, sizeof(*a)); a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cparm->capability = V4L2_CAP_TIMEPERFRAME; cparm->timeperframe = sensor->timeperframe; return 0; } static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { struct tcm825x_sensor *sensor = s->priv; struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe; u32 tgt_fps; /* target frames per secound */ int rval; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if ((timeperframe->numerator == 0) || (timeperframe->denominator == 0)) { timeperframe->denominator = DEFAULT_FPS; timeperframe->numerator = 1; } tgt_fps = timeperframe->denominator / timeperframe->numerator; if (tgt_fps > MAX_FPS) { timeperframe->denominator = MAX_FPS; timeperframe->numerator = 1; } else if (tgt_fps < MIN_FPS) { timeperframe->denominator = MIN_FPS; timeperframe->numerator = 1; } sensor->timeperframe = *timeperframe; rval = tcm825x_configure(s); return rval; } static int ioctl_s_power(struct v4l2_int_device *s, int on) { struct tcm825x_sensor *sensor = s->priv; return sensor->platform_data->power_set(on); } /* * Given the image capture format in pix, the nominal frame period in * timeperframe, calculate the required xclk frequency. * * TCM825X input frequency characteristics are: * Minimum 11.9 MHz, Typical 24.57 MHz and maximum 25/27 MHz */ static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p) { struct tcm825x_sensor *sensor = s->priv; struct v4l2_fract *timeperframe = &sensor->timeperframe; u32 tgt_xclk; /* target xclk */ u32 tgt_fps; /* target frames per secound */ int rval; rval = sensor->platform_data->ifparm(p); if (rval) return rval; tgt_fps = timeperframe->denominator / timeperframe->numerator; tgt_xclk = (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) ? (2457 * tgt_fps) / MAX_HALF_FPS : (2457 * tgt_fps) / MAX_FPS; tgt_xclk *= 10000; tgt_xclk = min(tgt_xclk, (u32)TCM825X_XCLK_MAX); tgt_xclk = max(tgt_xclk, (u32)TCM825X_XCLK_MIN); p->u.bt656.clock_curr = tgt_xclk; return 0; } static int ioctl_g_needs_reset(struct v4l2_int_device *s, void *buf) { struct tcm825x_sensor *sensor = s->priv; return sensor->platform_data->needs_reset(s, buf, &sensor->pix); } static int ioctl_reset(struct v4l2_int_device *s) { return -EBUSY; } static int ioctl_init(struct v4l2_int_device *s) { return tcm825x_configure(s); } static int ioctl_dev_exit(struct v4l2_int_device *s) { return 0; } static int ioctl_dev_init(struct v4l2_int_device *s) { struct tcm825x_sensor *sensor = s->priv; int r; r = tcm825x_read_reg(sensor->i2c_client, 0x01); if (r < 0) return r; if (r == 0) { dev_err(&sensor->i2c_client->dev, "device not detected\n"); return -EIO; } return 0; } static struct v4l2_int_ioctl_desc tcm825x_ioctl_desc[] = { { vidioc_int_dev_init_num, (v4l2_int_ioctl_func *)ioctl_dev_init }, { vidioc_int_dev_exit_num, (v4l2_int_ioctl_func *)ioctl_dev_exit }, { vidioc_int_s_power_num, (v4l2_int_ioctl_func *)ioctl_s_power }, { vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func *)ioctl_g_ifparm }, { vidioc_int_g_needs_reset_num, (v4l2_int_ioctl_func *)ioctl_g_needs_reset }, { vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset }, { vidioc_int_init_num, (v4l2_int_ioctl_func *)ioctl_init }, { vidioc_int_enum_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap }, { vidioc_int_try_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_try_fmt_cap }, { vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_g_fmt_cap }, { vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_s_fmt_cap }, { vidioc_int_g_parm_num, (v4l2_int_ioctl_func *)ioctl_g_parm }, { vidioc_int_s_parm_num, (v4l2_int_ioctl_func *)ioctl_s_parm }, { vidioc_int_queryctrl_num, (v4l2_int_ioctl_func *)ioctl_queryctrl }, { vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func *)ioctl_g_ctrl }, { vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func *)ioctl_s_ctrl }, }; static struct v4l2_int_slave tcm825x_slave = { .ioctls = tcm825x_ioctl_desc, .num_ioctls = ARRAY_SIZE(tcm825x_ioctl_desc), }; static struct tcm825x_sensor tcm825x; static struct v4l2_int_device tcm825x_int_device = { .module = THIS_MODULE, .name = TCM825X_NAME, .priv = &tcm825x, .type = v4l2_int_type_slave, .u = { .slave = &tcm825x_slave, }, }; static int tcm825x_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct tcm825x_sensor *sensor = &tcm825x; if (i2c_get_clientdata(client)) return -EBUSY; sensor->platform_data = client->dev.platform_data; if (sensor->platform_data == NULL || !sensor->platform_data->is_okay()) return -ENODEV; sensor->v4l2_int_device = &tcm825x_int_device; sensor->i2c_client = client; i2c_set_clientdata(client, sensor); /* Make the default capture format QVGA RGB565 */ sensor->pix.width = tcm825x_sizes[QVGA].width; sensor->pix.height = tcm825x_sizes[QVGA].height; sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565; return v4l2_int_device_register(sensor->v4l2_int_device); } static int tcm825x_remove(struct i2c_client *client) { struct tcm825x_sensor *sensor = i2c_get_clientdata(client); if (!client->adapter) return -ENODEV; /* our client isn't attached */ v4l2_int_device_unregister(sensor->v4l2_int_device); return 0; } static const struct i2c_device_id tcm825x_id[] = { { "tcm825x", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tcm825x_id); static struct i2c_driver tcm825x_i2c_driver = { .driver = { .name = TCM825X_NAME, }, .probe = tcm825x_probe, .remove = tcm825x_remove, .id_table = tcm825x_id, }; static struct tcm825x_sensor tcm825x = { .timeperframe = { .numerator = 1, .denominator = DEFAULT_FPS, }, }; static int __init tcm825x_init(void) { int rval; rval = i2c_add_driver(&tcm825x_i2c_driver); if (rval) printk(KERN_INFO "%s: failed registering " TCM825X_NAME "\n", __func__); return rval; } static void __exit tcm825x_exit(void) { i2c_del_driver(&tcm825x_i2c_driver); } /* * FIXME: Menelaus isn't ready (?) at module_init stage, so use * late_initcall for now. */ late_initcall(tcm825x_init); module_exit(tcm825x_exit); MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>"); MODULE_DESCRIPTION("TCM825x camera sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
84506232/AK-OnePone
drivers/rtc/rtc-ep93xx.c
5452
5415
/* * A driver for the RTC embedded in the Cirrus Logic EP93XX processors * Copyright (c) 2006 Tower Technologies * * Author: Alessandro Zummo <a.zummo@towertech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gfp.h> #define EP93XX_RTC_DATA 0x000 #define EP93XX_RTC_MATCH 0x004 #define EP93XX_RTC_STATUS 0x008 #define EP93XX_RTC_STATUS_INTR (1<<0) #define EP93XX_RTC_LOAD 0x00C #define EP93XX_RTC_CONTROL 0x010 #define EP93XX_RTC_CONTROL_MIE (1<<0) #define EP93XX_RTC_SWCOMP 0x108 #define EP93XX_RTC_SWCOMP_DEL_MASK 0x001f0000 #define EP93XX_RTC_SWCOMP_DEL_SHIFT 16 #define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff #define EP93XX_RTC_SWCOMP_INT_SHIFT 0 #define DRV_VERSION "0.3" /* * struct device dev.platform_data is used to store our private data * because struct rtc_device does not have a variable to hold it. */ struct ep93xx_rtc { void __iomem *mmio_base; struct rtc_device *rtc; }; static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, unsigned short *delete) { struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; unsigned long comp; comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP); if (preload) *preload = (comp & EP93XX_RTC_SWCOMP_INT_MASK) >> EP93XX_RTC_SWCOMP_INT_SHIFT; if (delete) *delete = (comp & EP93XX_RTC_SWCOMP_DEL_MASK) >> EP93XX_RTC_SWCOMP_DEL_SHIFT; return 0; } static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; unsigned long time; time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA); rtc_time_to_tm(time, tm); return 0; } static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs) { struct ep93xx_rtc *ep93xx_rtc = dev->platform_data; __raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD); return 0; } static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq) { unsigned short preload, delete; ep93xx_rtc_get_swcomp(dev, &preload, &delete); seq_printf(seq, "preload\t\t: %d\n", preload); seq_printf(seq, "delete\t\t: %d\n", delete); return 0; } static const struct rtc_class_ops ep93xx_rtc_ops = { .read_time = ep93xx_rtc_read_time, .set_mmss = ep93xx_rtc_set_mmss, .proc = ep93xx_rtc_proc, }; static ssize_t ep93xx_rtc_show_comp_preload(struct device *dev, struct device_attribute *attr, char *buf) { unsigned short preload; ep93xx_rtc_get_swcomp(dev, &preload, NULL); return sprintf(buf, "%d\n", preload); } static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_rtc_show_comp_preload, NULL); static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev, struct device_attribute *attr, char *buf) { unsigned short delete; ep93xx_rtc_get_swcomp(dev, NULL, &delete); return sprintf(buf, "%d\n", delete); } static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL); static struct attribute *ep93xx_rtc_attrs[] = { &dev_attr_comp_preload.attr, &dev_attr_comp_delete.attr, NULL }; static const struct attribute_group ep93xx_rtc_sysfs_files = { .attrs = ep93xx_rtc_attrs, }; static int __init ep93xx_rtc_probe(struct platform_device *pdev) { struct ep93xx_rtc *ep93xx_rtc; struct resource *res; int err; ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); if (!ep93xx_rtc) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) return -EBUSY; ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ep93xx_rtc->mmio_base) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; platform_set_drvdata(pdev, ep93xx_rtc); ep93xx_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); if (IS_ERR(ep93xx_rtc->rtc)) { err = PTR_ERR(ep93xx_rtc->rtc); goto exit; } err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); if (err) goto fail; return 0; fail: rtc_device_unregister(ep93xx_rtc->rtc); exit: platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; return err; } static int __exit ep93xx_rtc_remove(struct platform_device *pdev) { struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); platform_set_drvdata(pdev, NULL); rtc_device_unregister(ep93xx_rtc->rtc); pdev->dev.platform_data = NULL; return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:ep93xx-rtc"); static struct platform_driver ep93xx_rtc_driver = { .driver = { .name = "ep93xx-rtc", .owner = THIS_MODULE, }, .remove = __exit_p(ep93xx_rtc_remove), }; static int __init ep93xx_rtc_init(void) { return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe); } static void __exit ep93xx_rtc_exit(void) { platform_driver_unregister(&ep93xx_rtc_driver); } MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("EP93XX RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_init(ep93xx_rtc_init); module_exit(ep93xx_rtc_exit);
gpl-2.0
invisiblek/android_kernel_samsung_msm8960
drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
8268
24566
/****************************************************************************** Copyright(c) 2004 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen <jkmaline@cc.hut.fi> Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ******************************************************************************/ #include <linux/wireless.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/module.h> #include "ieee80211.h" struct modes_unit { char *mode_string; int mode_size; }; struct modes_unit ieee80211_modes[] = { {"a",1}, {"b",1}, {"g",1}, {"?",1}, {"N-24G",5}, {"N-5G",4}, }; #define iwe_stream_add_event_rsl iwe_stream_add_event #define MAX_CUSTOM_LEN 64 static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee, char *start, char *stop, struct ieee80211_network *network, struct iw_request_info *info) { char custom[MAX_CUSTOM_LEN]; char proto_name[IFNAMSIZ]; char *pname = proto_name; char *p; struct iw_event iwe; int i, j; u16 max_rate, rate; static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; /* First entry *MUST* be the AP MAC address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN); /* Remaining entries will be displayed in the order we provide them */ /* Add the ESSID */ iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; // if (network->flags & NETWORK_EMPTY_ESSID) { if (network->ssid_len == 0) { iwe.u.data.length = sizeof("<hidden>"); start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>"); } else { iwe.u.data.length = min(network->ssid_len, (u8)32); start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); } /* Add the protocol name */ iwe.cmd = SIOCGIWNAME; for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) { if(network->mode&(1<<i)) { sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size); pname +=ieee80211_modes[i].mode_size; } } *pname = '\0'; snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name); start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN); /* Add mode */ iwe.cmd = SIOCGIWMODE; if (network->capability & (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) { if (network->capability & WLAN_CAPABILITY_BSS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN); } /* Add frequency/channel */ iwe.cmd = SIOCGIWFREQ; /* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode); iwe.u.freq.e = 3; */ iwe.u.freq.m = network->channel; iwe.u.freq.e = 0; iwe.u.freq.i = 0; start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN); /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; if (network->capability & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid); /* Add basic and extended rates */ max_rate = 0; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); for (i = 0, j = 0; i < network->rates_len; ) { if (j < network->rates_ex_len && ((network->rates_ex[j] & 0x7F) < (network->rates[i] & 0x7F))) rate = network->rates_ex[j++] & 0x7F; else rate = network->rates[i++] & 0x7F; if (rate > max_rate) max_rate = rate; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); } for (; j < network->rates_ex_len; j++) { rate = network->rates_ex[j] & 0x7F; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); if (rate > max_rate) max_rate = rate; } if (network->mode >= IEEE_N_24G)//add N rate here; { PHT_CAPABILITY_ELE ht_cap = NULL; bool is40M = false, isShortGI = false; u8 max_mcs = 0; if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4)) ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[4]; else ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[0]; is40M = (ht_cap->ChlWidth)?1:0; isShortGI = (ht_cap->ChlWidth)? ((ht_cap->ShortGI40Mhz)?1:0): ((ht_cap->ShortGI20Mhz)?1:0); max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL); rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f]; if (rate > max_rate) max_rate = rate; } iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = max_rate * 500000; start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_PARAM_LEN); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); /* Add quality statistics */ /* TODO: Fix these values... */ iwe.cmd = IWEVQUAL; iwe.u.qual.qual = network->stats.signal; iwe.u.qual.level = network->stats.rssi; iwe.u.qual.noise = network->stats.noise; iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK; if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID; iwe.u.qual.updated = 7; start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN); iwe.cmd = IWEVCUSTOM; p = custom; iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); #if (WIRELESS_EXT < 18) if (ieee->wpa_enabled && network->wpa_ie_len){ char buf[MAX_WPA_IE_LEN * 2 + 30]; // printk("WPA IE\n"); u8 *p = buf; p += sprintf(p, "wpa_ie="); for (i = 0; i < network->wpa_ie_len; i++) { p += sprintf(p, "%02x", network->wpa_ie[i]); } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = strlen(buf); start = iwe_stream_add_point(info, start, stop, &iwe, buf); } if (ieee->wpa_enabled && network->rsn_ie_len){ char buf[MAX_WPA_IE_LEN * 2 + 30]; u8 *p = buf; p += sprintf(p, "rsn_ie="); for (i = 0; i < network->rsn_ie_len; i++) { p += sprintf(p, "%02x", network->rsn_ie[i]); } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; iwe.u.data.length = strlen(buf); start = iwe_stream_add_point(info, start, stop, &iwe, buf); } #else memset(&iwe, 0, sizeof(iwe)); if (network->wpa_ie_len) { char buf[MAX_WPA_IE_LEN]; memcpy(buf, network->wpa_ie, network->wpa_ie_len); iwe.cmd = IWEVGENIE; iwe.u.data.length = network->wpa_ie_len; start = iwe_stream_add_point(info, start, stop, &iwe, buf); } memset(&iwe, 0, sizeof(iwe)); if (network->rsn_ie_len) { char buf[MAX_WPA_IE_LEN]; memcpy(buf, network->rsn_ie, network->rsn_ie_len); iwe.cmd = IWEVGENIE; iwe.u.data.length = network->rsn_ie_len; start = iwe_stream_add_point(info, start, stop, &iwe, buf); } #endif /* Add EXTRA: Age to display seconds since last beacon/probe response * for given network. */ iwe.cmd = IWEVCUSTOM; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100)); iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(info, start, stop, &iwe, custom); return start; } int ieee80211_wx_get_scan(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ieee80211_network *network; unsigned long flags; char *ev = extra; // char *stop = ev + IW_SCAN_MAX_DATA; char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA; //char *stop = ev + IW_SCAN_MAX_DATA; int i = 0; int err = 0; IEEE80211_DEBUG_WX("Getting scan\n"); down(&ieee->wx_sem); spin_lock_irqsave(&ieee->lock, flags); list_for_each_entry(network, &ieee->network_list, list) { i++; if((stop-ev)<200) { err = -E2BIG; break; } if (ieee->scan_age == 0 || time_after(network->last_scanned + ieee->scan_age, jiffies)) ev = rtl819x_translate_scan(ieee, ev, stop, network, info); else IEEE80211_DEBUG_SCAN( "Not showing network '%s (" "%pM)' due to age (%lums).\n", escape_essid(network->ssid, network->ssid_len), network->bssid, (jiffies - network->last_scanned) / (HZ / 100)); } spin_unlock_irqrestore(&ieee->lock, flags); up(&ieee->wx_sem); wrqu->data.length = ev - extra; wrqu->data.flags = 0; IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); return err; } int ieee80211_wx_set_encode(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &(wrqu->encoding); struct net_device *dev = ieee->dev; struct ieee80211_security sec = { .flags = 0 }; int i, key, key_provided, len; struct ieee80211_crypt_data **crypt; IEEE80211_DEBUG_WX("SET_ENCODE\n"); key = erq->flags & IW_ENCODE_INDEX; if (key) { if (key > WEP_KEYS) return -EINVAL; key--; key_provided = 1; } else { key_provided = 0; key = ieee->tx_keyidx; } IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ? "provided" : "default"); crypt = &ieee->crypt[key]; if (erq->flags & IW_ENCODE_DISABLED) { if (key_provided && *crypt) { IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n", key); ieee80211_crypt_delayed_deinit(ieee, crypt); } else IEEE80211_DEBUG_WX("Disabling encryption.\n"); /* Check all the keys to see if any are still configured, * and if no key index was provided, de-init them all */ for (i = 0; i < WEP_KEYS; i++) { if (ieee->crypt[i] != NULL) { if (key_provided) break; ieee80211_crypt_delayed_deinit( ieee, &ieee->crypt[i]); } } if (i == WEP_KEYS) { sec.enabled = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_ENABLED | SEC_LEVEL; } goto done; } sec.enabled = 1; sec.flags |= SEC_ENABLED; if (*crypt != NULL && (*crypt)->ops != NULL && strcmp((*crypt)->ops->name, "WEP") != 0) { /* changing to use WEP; deinit previously used algorithm * on this key */ ieee80211_crypt_delayed_deinit(ieee, crypt); } if (*crypt == NULL) { struct ieee80211_crypt_data *new_crypt; /* take WEP into use */ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) return -ENOMEM; new_crypt->ops = ieee80211_get_crypto_ops("WEP"); if (!new_crypt->ops) { request_module("ieee80211_crypt_wep"); new_crypt->ops = ieee80211_get_crypto_ops("WEP"); } if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { kfree(new_crypt); new_crypt = NULL; printk(KERN_WARNING "%s: could not initialize WEP: " "load module ieee80211_crypt_wep\n", dev->name); return -EOPNOTSUPP; } *crypt = new_crypt; } /* If a new key was provided, set it up */ if (erq->length > 0) { len = erq->length <= 5 ? 5 : 13; memcpy(sec.keys[key], keybuf, erq->length); if (len > erq->length) memset(sec.keys[key] + erq->length, 0, len - erq->length); IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n", key, escape_essid(sec.keys[key], len), erq->length, len); sec.key_sizes[key] = len; (*crypt)->ops->set_key(sec.keys[key], len, NULL, (*crypt)->priv); sec.flags |= (1 << key); /* This ensures a key will be activated if no key is * explicitely set */ if (key == sec.active_key) sec.flags |= SEC_ACTIVE_KEY; ieee->tx_keyidx = key; } else { len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, NULL, (*crypt)->priv); if (len == 0) { /* Set a default key of all 0 */ printk("Setting key %d to all zero.\n", key); IEEE80211_DEBUG_WX("Setting key %d to all zero.\n", key); memset(sec.keys[key], 0, 13); (*crypt)->ops->set_key(sec.keys[key], 13, NULL, (*crypt)->priv); sec.key_sizes[key] = 13; sec.flags |= (1 << key); } /* No key data - just set the default TX key index */ if (key_provided) { IEEE80211_DEBUG_WX( "Setting key %d to default Tx key.\n", key); ieee->tx_keyidx = key; sec.active_key = key; sec.flags |= SEC_ACTIVE_KEY; } } done: ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; sec.flags |= SEC_AUTH_MODE; IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ? "OPEN" : "SHARED KEY"); /* For now we just support WEP, so only set that security level... * TODO: When WPA is added this is one place that needs to change */ sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ if (ieee->set_security) ieee->set_security(dev, &sec); /* Do not reset port if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. If your hardware requires a reset after WEP * configuration (for example... Prism2), implement the reset_port in * the callbacks structures used to initialize the 802.11 stack. */ if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(dev)) { printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); return -EINVAL; } return 0; } int ieee80211_wx_get_encode(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &(wrqu->encoding); int len, key; struct ieee80211_crypt_data *crypt; IEEE80211_DEBUG_WX("GET_ENCODE\n"); if(ieee->iw_mode == IW_MODE_MONITOR) return -1; key = erq->flags & IW_ENCODE_INDEX; if (key) { if (key > WEP_KEYS) return -EINVAL; key--; } else key = ieee->tx_keyidx; crypt = ieee->crypt[key]; erq->flags = key + 1; if (crypt == NULL || crypt->ops == NULL) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv); erq->length = (len >= 0 ? len : 0); erq->flags |= IW_ENCODE_ENABLED; if (ieee->open_wep) erq->flags |= IW_ENCODE_OPEN; else erq->flags |= IW_ENCODE_RESTRICTED; return 0; } #if (WIRELESS_EXT >= 18) int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct net_device *dev = ieee->dev; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx; int group_key = 0; const char *alg, *module; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; struct ieee80211_security sec = { .flags = 0, }; //printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg); idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (idx < 1 || idx > WEP_KEYS) return -EINVAL; idx--; } else idx = ieee->tx_keyidx; if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { crypt = &ieee->crypt[idx]; group_key = 1; } else { /* some Cisco APs use idx>0 for unicast in dynamic WEP */ //printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg); if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP) return -EINVAL; if (ieee->iw_mode == IW_MODE_INFRA) crypt = &ieee->crypt[idx]; else return -EINVAL; } sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT; if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) { if (*crypt) ieee80211_crypt_delayed_deinit(ieee, crypt); for (i = 0; i < WEP_KEYS; i++) if (ieee->crypt[i] != NULL) break; if (i == WEP_KEYS) { sec.enabled = 0; // sec.encrypt = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_LEVEL; } //printk("disabled: flag:%x\n", encoding->flags); goto done; } sec.enabled = 1; // sec.encrypt = 1; switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; module = "ieee80211_crypt_wep"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; module = "ieee80211_crypt_tkip"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; module = "ieee80211_crypt_ccmp"; break; default: IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); ret = -EINVAL; goto done; } printk("alg name:%s\n",alg); ops = ieee80211_get_crypto_ops(alg); if (ops == NULL) { request_module(module); ops = ieee80211_get_crypto_ops(alg); } if (ops == NULL) { IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); printk("========>unknown crypto alg %d\n", ext->alg); ret = -EINVAL; goto done; } if (*crypt == NULL || (*crypt)->ops != ops) { struct ieee80211_crypt_data *new_crypt; ieee80211_crypt_delayed_deinit(ieee, crypt); new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } new_crypt->ops = ops; if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); ret = -EINVAL; goto done; } *crypt = new_crypt; } if (ext->key_len > 0 && (*crypt)->ops->set_key && (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, (*crypt)->priv) < 0) { IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name); printk("key setting failed\n"); ret = -EINVAL; goto done; } //skip_host_crypt: //printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags); if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { ieee->tx_keyidx = idx; sec.active_key = idx; sec.flags |= SEC_ACTIVE_KEY; } if (ext->alg != IW_ENCODE_ALG_NONE) { //memcpy(sec.keys[idx], ext->key, ext->key_len); sec.key_sizes[idx] = ext->key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { // sec.encode_alg[idx] = SEC_ALG_WEP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } else if (ext->alg == IW_ENCODE_ALG_TKIP) { // sec.encode_alg[idx] = SEC_ALG_TKIP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_2; } else if (ext->alg == IW_ENCODE_ALG_CCMP) { // sec.encode_alg[idx] = SEC_ALG_CCMP; sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_3; } /* Don't set sec level for group keys. */ if (group_key) sec.flags &= ~SEC_LEVEL; } done: if (ieee->set_security) ieee->set_security(ieee->dev, &sec); if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(dev)) { IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name); return -EINVAL; } return ret; } int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct ieee80211_crypt_data *crypt; int idx, max_key_len; max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) return -EINVAL; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (idx < 1 || idx > WEP_KEYS) return -EINVAL; idx--; } else idx = ieee->tx_keyidx; if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) && ext->alg != IW_ENCODE_ALG_WEP) if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA) return -EINVAL; crypt = ieee->crypt[idx]; encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); if (crypt == NULL || crypt->ops == NULL ) { ext->alg = IW_ENCODE_ALG_NONE; ext->key_len = 0; encoding->flags |= IW_ENCODE_DISABLED; } else { if (strcmp(crypt->ops->name, "WEP") == 0 ) ext->alg = IW_ENCODE_ALG_WEP; else if (strcmp(crypt->ops->name, "TKIP")) ext->alg = IW_ENCODE_ALG_TKIP; else if (strcmp(crypt->ops->name, "CCMP")) ext->alg = IW_ENCODE_ALG_CCMP; else return -EINVAL; ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN, NULL, crypt->priv); encoding->flags |= IW_ENCODE_ENABLED; if (ext->key_len && (ext->alg == IW_ENCODE_ALG_TKIP || ext->alg == IW_ENCODE_ALG_CCMP)) ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; } return 0; } int ieee80211_wx_set_mlme(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_mlme *mlme = (struct iw_mlme *) extra; switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: ieee80211_disassociate(ieee); break; default: return -EOPNOTSUPP; } return 0; } int ieee80211_wx_set_auth(struct ieee80211_device *ieee, struct iw_request_info *info, struct iw_param *data, char *extra) { switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: /*need to support wpa2 here*/ //printk("wpa version:%x\n", data->value); break; case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * * Host AP driver does not use these parameters and allows * * wpa_supplicant to control them internally. * */ break; case IW_AUTH_TKIP_COUNTERMEASURES: ieee->tkip_countermeasures = data->value; break; case IW_AUTH_DROP_UNENCRYPTED: ieee->drop_unencrypted = data->value; break; case IW_AUTH_80211_AUTH_ALG: //printk("======>%s():data->value is %d\n",__FUNCTION__,data->value); // ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0; if(data->value & IW_AUTH_ALG_SHARED_KEY){ ieee->open_wep = 0; ieee->auth_mode = 1; } else if(data->value & IW_AUTH_ALG_OPEN_SYSTEM){ ieee->open_wep = 1; ieee->auth_mode = 0; } else if(data->value & IW_AUTH_ALG_LEAP){ ieee->open_wep = 1; ieee->auth_mode = 2; //printk("hahahaa:LEAP\n"); } else return -EINVAL; //printk("open_wep:%d\n", ieee->open_wep); break; case IW_AUTH_WPA_ENABLED: ieee->wpa_enabled = (data->value)?1:0; //printk("enalbe wpa:%d\n", ieee->wpa_enabled); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: ieee->ieee802_1x = data->value; break; case IW_AUTH_PRIVACY_INVOKED: ieee->privacy_invoked = data->value; break; default: return -EOPNOTSUPP; } return 0; } #endif int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len) { u8 *buf; if (len>MAX_WPA_IE_LEN || (len && ie == NULL)) { // printk("return error out, len:%d\n", len); return -EINVAL; } if (len) { if (len != ie[1]+2) { printk("len:%zu, ie:%d\n", len, ie[1]); return -EINVAL; } buf = kmemdup(ie, len, GFP_KERNEL); if (buf == NULL) return -ENOMEM; kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = len; } else{ kfree(ieee->wpa_ie); ieee->wpa_ie = NULL; ieee->wpa_ie_len = 0; } return 0; } EXPORT_SYMBOL(ieee80211_wx_set_gen_ie); #if (WIRELESS_EXT >= 18) EXPORT_SYMBOL(ieee80211_wx_set_mlme); EXPORT_SYMBOL(ieee80211_wx_set_auth); EXPORT_SYMBOL(ieee80211_wx_set_encode_ext); EXPORT_SYMBOL(ieee80211_wx_get_encode_ext); #endif EXPORT_SYMBOL(ieee80211_wx_get_scan); EXPORT_SYMBOL(ieee80211_wx_set_encode); EXPORT_SYMBOL(ieee80211_wx_get_encode);
gpl-2.0
moksha11/nvm_3.9
arch/sh/drivers/heartbeat.c
12876
4267
/* * Generic heartbeat driver for regular LED banks * * Copyright (C) 2007 - 2010 Paul Mundt * * Most SH reference boards include a number of individual LEDs that can * be independently controlled (either via a pre-defined hardware * function or via the LED class, if desired -- the hardware tends to * encapsulate some of the same "triggers" that the LED class supports, * so there's not too much value in it). * * Additionally, most of these boards also have a LED bank that we've * traditionally used for strobing the load average. This use case is * handled by this driver, rather than giving each LED bit position its * own struct device. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/heartbeat.h> #define DRV_NAME "heartbeat" #define DRV_VERSION "0.1.2" static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; static inline void heartbeat_toggle_bit(struct heartbeat_data *hd, unsigned bit, unsigned int inverted) { unsigned int new; new = (1 << hd->bit_pos[bit]); if (inverted) new = ~new; new &= hd->mask; switch (hd->regsize) { case 32: new |= ioread32(hd->base) & ~hd->mask; iowrite32(new, hd->base); break; case 16: new |= ioread16(hd->base) & ~hd->mask; iowrite16(new, hd->base); break; default: new |= ioread8(hd->base) & ~hd->mask; iowrite8(new, hd->base); break; } } static void heartbeat_timer(unsigned long data) { struct heartbeat_data *hd = (struct heartbeat_data *)data; static unsigned bit = 0, up = 1; heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED); bit += up; if ((bit == 0) || (bit == (hd->nr_bits)-1)) up = -up; mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) / ((avenrun[0] / 5) + (3 << FSHIFT))))); } static int heartbeat_drv_probe(struct platform_device *pdev) { struct resource *res; struct heartbeat_data *hd; int i; if (unlikely(pdev->num_resources != 1)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "invalid resource\n"); return -EINVAL; } if (pdev->dev.platform_data) { hd = pdev->dev.platform_data; } else { hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL); if (unlikely(!hd)) return -ENOMEM; } hd->base = ioremap_nocache(res->start, resource_size(res)); if (unlikely(!hd->base)) { dev_err(&pdev->dev, "ioremap failed\n"); if (!pdev->dev.platform_data) kfree(hd); return -ENXIO; } if (!hd->nr_bits) { hd->bit_pos = default_bit_pos; hd->nr_bits = ARRAY_SIZE(default_bit_pos); } hd->mask = 0; for (i = 0; i < hd->nr_bits; i++) hd->mask |= (1 << hd->bit_pos[i]); if (!hd->regsize) { switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: hd->regsize = 32; break; case IORESOURCE_MEM_16BIT: hd->regsize = 16; break; case IORESOURCE_MEM_8BIT: default: hd->regsize = 8; break; } } setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd); platform_set_drvdata(pdev, hd); return mod_timer(&hd->timer, jiffies + 1); } static int heartbeat_drv_remove(struct platform_device *pdev) { struct heartbeat_data *hd = platform_get_drvdata(pdev); del_timer_sync(&hd->timer); iounmap(hd->base); platform_set_drvdata(pdev, NULL); if (!pdev->dev.platform_data) kfree(hd); return 0; } static struct platform_driver heartbeat_driver = { .probe = heartbeat_drv_probe, .remove = heartbeat_drv_remove, .driver = { .name = DRV_NAME, }, }; static int __init heartbeat_init(void) { printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION); return platform_driver_register(&heartbeat_driver); } static void __exit heartbeat_exit(void) { platform_driver_unregister(&heartbeat_driver); } module_init(heartbeat_init); module_exit(heartbeat_exit); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("Paul Mundt"); MODULE_LICENSE("GPL v2");
gpl-2.0
spock1104/android_kernel_zte_nex
drivers/media/dvb/dvb-core/dvb_filter.c
14668
12922
#include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include "dvb_filter.h" #if 0 static unsigned int bitrates[3][16] = {{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,0}, {0,32,48,56,64,80,96,112,128,160,192,224,256,320,384,0}, {0,32,40,48,56,64,80,96,112,128,160,192,224,256,320,0}}; #endif static u32 freq[4] = {480, 441, 320, 0}; static unsigned int ac3_bitrates[32] = {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640, 0,0,0,0,0,0,0,0,0,0,0,0,0}; static u32 ac3_frames[3][32] = {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024, 1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0}, {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114, 1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0}, {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344, 1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}}; #if 0 static void setup_ts2pes(ipack *pa, ipack *pv, u16 *pida, u16 *pidv, void (*pes_write)(u8 *buf, int count, void *data), void *priv) { dvb_filter_ipack_init(pa, IPACKS, pes_write); dvb_filter_ipack_init(pv, IPACKS, pes_write); pa->pid = pida; pv->pid = pidv; pa->data = priv; pv->data = priv; } #endif #if 0 static void ts_to_pes(ipack *p, u8 *buf) // don't need count (=188) { u8 off = 0; if (!buf || !p ){ printk("NULL POINTER IDIOT\n"); return; } if (buf[1]&PAY_START) { if (p->plength == MMAX_PLENGTH-6 && p->found>6){ p->plength = p->found-6; p->found = 0; send_ipack(p); dvb_filter_ipack_reset(p); } } if (buf[3] & ADAPT_FIELD) { // adaptation field? off = buf[4] + 1; if (off+4 > 187) return; } dvb_filter_instant_repack(buf+4+off, TS_SIZE-4-off, p); } #endif #if 0 /* needs 5 byte input, returns picture coding type*/ static int read_picture_header(u8 *headr, struct mpg_picture *pic, int field, int pr) { u8 pct; if (pr) printk( "Pic header: "); pic->temporal_reference[field] = (( headr[0] << 2 ) | (headr[1] & 0x03) )& 0x03ff; if (pr) printk( " temp ref: 0x%04x", pic->temporal_reference[field]); pct = ( headr[1] >> 2 ) & 0x07; pic->picture_coding_type[field] = pct; if (pr) { switch(pct){ case I_FRAME: printk( " I-FRAME"); break; case B_FRAME: printk( " B-FRAME"); break; case P_FRAME: printk( " P-FRAME"); break; } } pic->vinfo.vbv_delay = (( headr[1] >> 5 ) | ( headr[2] << 3) | ( (headr[3] & 0x1F) << 11) ) & 0xffff; if (pr) printk( " vbv delay: 0x%04x", pic->vinfo.vbv_delay); pic->picture_header_parameter = ( headr[3] & 0xe0 ) | ((headr[4] & 0x80) >> 3); if ( pct == B_FRAME ){ pic->picture_header_parameter |= ( headr[4] >> 3 ) & 0x0f; } if (pr) printk( " pic head param: 0x%x", pic->picture_header_parameter); return pct; } #endif #if 0 /* needs 4 byte input */ static int read_gop_header(u8 *headr, struct mpg_picture *pic, int pr) { if (pr) printk("GOP header: "); pic->time_code = (( headr[0] << 17 ) | ( headr[1] << 9) | ( headr[2] << 1 ) | (headr[3] &0x01)) & 0x1ffffff; if (pr) printk(" time: %d:%d.%d ", (headr[0]>>2)& 0x1F, ((headr[0]<<4)& 0x30)| ((headr[1]>>4)& 0x0F), ((headr[1]<<3)& 0x38)| ((headr[2]>>5)& 0x0F)); if ( ( headr[3] & 0x40 ) != 0 ){ pic->closed_gop = 1; } else { pic->closed_gop = 0; } if (pr) printk("closed: %d", pic->closed_gop); if ( ( headr[3] & 0x20 ) != 0 ){ pic->broken_link = 1; } else { pic->broken_link = 0; } if (pr) printk(" broken: %d\n", pic->broken_link); return 0; } #endif #if 0 /* needs 8 byte input */ static int read_sequence_header(u8 *headr, struct dvb_video_info *vi, int pr) { int sw; int form = -1; if (pr) printk("Reading sequence header\n"); vi->horizontal_size = ((headr[1] &0xF0) >> 4) | (headr[0] << 4); vi->vertical_size = ((headr[1] &0x0F) << 8) | (headr[2]); sw = (int)((headr[3]&0xF0) >> 4) ; switch( sw ){ case 1: if (pr) printk("Videostream: ASPECT: 1:1"); vi->aspect_ratio = 100; break; case 2: if (pr) printk("Videostream: ASPECT: 4:3"); vi->aspect_ratio = 133; break; case 3: if (pr) printk("Videostream: ASPECT: 16:9"); vi->aspect_ratio = 177; break; case 4: if (pr) printk("Videostream: ASPECT: 2.21:1"); vi->aspect_ratio = 221; break; case 5 ... 15: if (pr) printk("Videostream: ASPECT: reserved"); vi->aspect_ratio = 0; break; default: vi->aspect_ratio = 0; return -1; } if (pr) printk(" Size = %dx%d",vi->horizontal_size,vi->vertical_size); sw = (int)(headr[3]&0x0F); switch ( sw ) { case 1: if (pr) printk(" FRate: 23.976 fps"); vi->framerate = 23976; form = -1; break; case 2: if (pr) printk(" FRate: 24 fps"); vi->framerate = 24000; form = -1; break; case 3: if (pr) printk(" FRate: 25 fps"); vi->framerate = 25000; form = VIDEO_MODE_PAL; break; case 4: if (pr) printk(" FRate: 29.97 fps"); vi->framerate = 29970; form = VIDEO_MODE_NTSC; break; case 5: if (pr) printk(" FRate: 30 fps"); vi->framerate = 30000; form = VIDEO_MODE_NTSC; break; case 6: if (pr) printk(" FRate: 50 fps"); vi->framerate = 50000; form = VIDEO_MODE_PAL; break; case 7: if (pr) printk(" FRate: 60 fps"); vi->framerate = 60000; form = VIDEO_MODE_NTSC; break; } vi->bit_rate = (headr[4] << 10) | (headr[5] << 2) | (headr[6] & 0x03); vi->vbv_buffer_size = (( headr[6] & 0xF8) >> 3 ) | (( headr[7] & 0x1F )<< 5); if (pr){ printk(" BRate: %d Mbit/s",4*(vi->bit_rate)/10000); printk(" vbvbuffer %d",16*1024*(vi->vbv_buffer_size)); printk("\n"); } vi->video_format = form; return 0; } #endif #if 0 static int get_vinfo(u8 *mbuf, int count, struct dvb_video_info *vi, int pr) { u8 *headr; int found = 0; int c = 0; while (found < 4 && c+4 < count){ u8 *b; b = mbuf+c; if ( b[0] == 0x00 && b[1] == 0x00 && b[2] == 0x01 && b[3] == 0xb3) found = 4; else { c++; } } if (! found) return -1; c += 4; if (c+12 >= count) return -1; headr = mbuf+c; if (read_sequence_header(headr, vi, pr) < 0) return -1; vi->off = c-4; return 0; } #endif #if 0 static int get_ainfo(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr) { u8 *headr; int found = 0; int c = 0; int fr = 0; while (found < 2 && c < count){ u8 b[2]; memcpy( b, mbuf+c, 2); if ( b[0] == 0xff && (b[1] & 0xf8) == 0xf8) found = 2; else { c++; } } if (!found) return -1; if (c+3 >= count) return -1; headr = mbuf+c; ai->layer = (headr[1] & 0x06) >> 1; if (pr) printk("Audiostream: Layer: %d", 4-ai->layer); ai->bit_rate = bitrates[(3-ai->layer)][(headr[2] >> 4 )]*1000; if (pr){ if (ai->bit_rate == 0) printk(" Bit rate: free"); else if (ai->bit_rate == 0xf) printk(" BRate: reserved"); else printk(" BRate: %d kb/s", ai->bit_rate/1000); } fr = (headr[2] & 0x0c ) >> 2; ai->frequency = freq[fr]*100; if (pr){ if (ai->frequency == 3) printk(" Freq: reserved\n"); else printk(" Freq: %d kHz\n",ai->frequency); } ai->off = c; return 0; } #endif int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr) { u8 *headr; int found = 0; int c = 0; u8 frame = 0; int fr = 0; while ( !found && c < count){ u8 *b = mbuf+c; if ( b[0] == 0x0b && b[1] == 0x77 ) found = 1; else { c++; } } if (!found) return -1; if (pr) printk("Audiostream: AC3"); ai->off = c; if (c+5 >= count) return -1; ai->layer = 0; // 0 for AC3 headr = mbuf+c+2; frame = (headr[2]&0x3f); ai->bit_rate = ac3_bitrates[frame >> 1]*1000; if (pr) printk(" BRate: %d kb/s", (int) ai->bit_rate/1000); ai->frequency = (headr[2] & 0xc0 ) >> 6; fr = (headr[2] & 0xc0 ) >> 6; ai->frequency = freq[fr]*100; if (pr) printk (" Freq: %d Hz\n", (int) ai->frequency); ai->framesize = ac3_frames[fr][frame >> 1]; if ((frame & 1) && (fr == 1)) ai->framesize++; ai->framesize = ai->framesize << 1; if (pr) printk (" Framesize %d\n",(int) ai->framesize); return 0; } EXPORT_SYMBOL(dvb_filter_get_ac3info); #if 0 static u8 *skip_pes_header(u8 **bufp) { u8 *inbuf = *bufp; u8 *buf = inbuf; u8 *pts = NULL; int skip = 0; static const int mpeg1_skip_table[16] = { 1, 0xffff, 5, 10, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }; if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */ if (buf[7] & PTS_ONLY) pts = buf+9; else pts = NULL; buf = inbuf + 9 + inbuf[8]; } else { /* mpeg1 */ for (buf = inbuf + 6; *buf == 0xff; buf++) if (buf == inbuf + 6 + 16) { break; } if ((*buf & 0xc0) == 0x40) buf += 2; skip = mpeg1_skip_table [*buf >> 4]; if (skip == 5 || skip == 10) pts = buf; else pts = NULL; buf += mpeg1_skip_table [*buf >> 4]; } *bufp = buf; return pts; } #endif #if 0 static void initialize_quant_matrix( u32 *matrix ) { int i; matrix[0] = 0x08101013; matrix[1] = 0x10131616; matrix[2] = 0x16161616; matrix[3] = 0x1a181a1b; matrix[4] = 0x1b1b1a1a; matrix[5] = 0x1a1a1b1b; matrix[6] = 0x1b1d1d1d; matrix[7] = 0x2222221d; matrix[8] = 0x1d1d1b1b; matrix[9] = 0x1d1d2020; matrix[10] = 0x22222526; matrix[11] = 0x25232322; matrix[12] = 0x23262628; matrix[13] = 0x28283030; matrix[14] = 0x2e2e3838; matrix[15] = 0x3a454553; for ( i = 16 ; i < 32 ; i++ ) matrix[i] = 0x10101010; } #endif #if 0 static void initialize_mpg_picture(struct mpg_picture *pic) { int i; /* set MPEG1 */ pic->mpeg1_flag = 1; pic->profile_and_level = 0x4A ; /* MP@LL */ pic->progressive_sequence = 1; pic->low_delay = 0; pic->sequence_display_extension_flag = 0; for ( i = 0 ; i < 4 ; i++ ){ pic->frame_centre_horizontal_offset[i] = 0; pic->frame_centre_vertical_offset[i] = 0; } pic->last_frame_centre_horizontal_offset = 0; pic->last_frame_centre_vertical_offset = 0; pic->picture_display_extension_flag[0] = 0; pic->picture_display_extension_flag[1] = 0; pic->sequence_header_flag = 0; pic->gop_flag = 0; pic->sequence_end_flag = 0; } #endif #if 0 static void mpg_set_picture_parameter( int32_t field_type, struct mpg_picture *pic ) { int16_t last_h_offset; int16_t last_v_offset; int16_t *p_h_offset; int16_t *p_v_offset; if ( pic->mpeg1_flag ){ pic->picture_structure[field_type] = VIDEO_FRAME_PICTURE; pic->top_field_first = 0; pic->repeat_first_field = 0; pic->progressive_frame = 1; pic->picture_coding_parameter = 0x000010; } /* Reset flag */ pic->picture_display_extension_flag[field_type] = 0; last_h_offset = pic->last_frame_centre_horizontal_offset; last_v_offset = pic->last_frame_centre_vertical_offset; if ( field_type == FIRST_FIELD ){ p_h_offset = pic->frame_centre_horizontal_offset; p_v_offset = pic->frame_centre_vertical_offset; *p_h_offset = last_h_offset; *(p_h_offset + 1) = last_h_offset; *(p_h_offset + 2) = last_h_offset; *p_v_offset = last_v_offset; *(p_v_offset + 1) = last_v_offset; *(p_v_offset + 2) = last_v_offset; } else { pic->frame_centre_horizontal_offset[3] = last_h_offset; pic->frame_centre_vertical_offset[3] = last_v_offset; } } #endif #if 0 static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_type) { pic->picture_header = 0; pic->sequence_header_data = ( INIT_HORIZONTAL_SIZE << 20 ) | ( INIT_VERTICAL_SIZE << 8 ) | ( INIT_ASPECT_RATIO << 4 ) | ( INIT_FRAME_RATE ); pic->mpeg1_flag = 0; pic->vinfo.horizontal_size = INIT_DISP_HORIZONTAL_SIZE; pic->vinfo.vertical_size = INIT_DISP_VERTICAL_SIZE; pic->picture_display_extension_flag[field_type] = 0; pic->pts_flag[field_type] = 0; pic->sequence_gop_header = 0; pic->picture_header = 0; pic->sequence_header_flag = 0; pic->gop_flag = 0; pic->sequence_end_flag = 0; pic->sequence_display_extension_flag = 0; pic->last_frame_centre_horizontal_offset = 0; pic->last_frame_centre_vertical_offset = 0; pic->channel = chan; } #endif void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid, dvb_filter_pes2ts_cb_t *cb, void *priv) { unsigned char *buf=p2ts->buf; buf[0]=0x47; buf[1]=(pid>>8); buf[2]=pid&0xff; p2ts->cc=0; p2ts->cb=cb; p2ts->priv=priv; } EXPORT_SYMBOL(dvb_filter_pes2ts_init); int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes, int len, int payload_start) { unsigned char *buf=p2ts->buf; int ret=0, rest; //len=6+((pes[4]<<8)|pes[5]); if (payload_start) buf[1]|=0x40; else buf[1]&=~0x40; while (len>=184) { buf[3]=0x10|((p2ts->cc++)&0x0f); memcpy(buf+4, pes, 184); if ((ret=p2ts->cb(p2ts->priv, buf))) return ret; len-=184; pes+=184; buf[1]&=~0x40; } if (!len) return 0; buf[3]=0x30|((p2ts->cc++)&0x0f); rest=183-len; if (rest) { buf[5]=0x00; if (rest-1) memset(buf+6, 0xff, rest-1); } buf[4]=rest; memcpy(buf+5+rest, pes, len); return p2ts->cb(p2ts->priv, buf); } EXPORT_SYMBOL(dvb_filter_pes2ts);
gpl-2.0
pepedog/linux-imx6-3.14
net/batman-adv/gateway_client.c
77
24602
/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "main.h" #include "sysfs.h" #include "gateway_client.h" #include "gateway_common.h" #include "hard-interface.h" #include "originator.h" #include "translation-table.h" #include "routing.h" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <linux/if_vlan.h> /* These are the offsets of the "hw type" and "hw address length" in the dhcp * packet starting at the beginning of the dhcp header */ #define BATADV_DHCP_HTYPE_OFFSET 1 #define BATADV_DHCP_HLEN_OFFSET 2 /* Value of htype representing Ethernet */ #define BATADV_DHCP_HTYPE_ETHERNET 0x01 /* This is the offset of the "chaddr" field in the dhcp packet starting at the * beginning of the dhcp header */ #define BATADV_DHCP_CHADDR_OFFSET 28 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) { if (atomic_dec_and_test(&gw_node->refcount)) { batadv_orig_node_free_ref(gw_node->orig_node); kfree_rcu(gw_node, rcu); } } static struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; rcu_read_lock(); gw_node = rcu_dereference(bat_priv->gw.curr_gw); if (!gw_node) goto out; if (!atomic_inc_not_zero(&gw_node->refcount)) gw_node = NULL; out: rcu_read_unlock(); return gw_node; } struct batadv_orig_node * batadv_gw_get_selected_orig(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; struct batadv_orig_node *orig_node = NULL; gw_node = batadv_gw_get_selected_gw_node(bat_priv); if (!gw_node) goto out; rcu_read_lock(); orig_node = gw_node->orig_node; if (!orig_node) goto unlock; if (!atomic_inc_not_zero(&orig_node->refcount)) orig_node = NULL; unlock: rcu_read_unlock(); out: if (gw_node) batadv_gw_node_free_ref(gw_node); return orig_node; } static void batadv_gw_select(struct batadv_priv *bat_priv, struct batadv_gw_node *new_gw_node) { struct batadv_gw_node *curr_gw_node; spin_lock_bh(&bat_priv->gw.list_lock); if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) new_gw_node = NULL; curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); if (curr_gw_node) batadv_gw_node_free_ref(curr_gw_node); spin_unlock_bh(&bat_priv->gw.list_lock); } /** * batadv_gw_reselect - force a gateway reselection * @bat_priv: the bat priv with all the soft interface information * * Set a flag to remind the GW component to perform a new gateway reselection. * However this function does not ensure that the current gateway is going to be * deselected. The reselection mechanism may elect the same gateway once again. * * This means that invoking batadv_gw_reselect() does not guarantee a gateway * change and therefore a uevent is not necessarily expected. */ void batadv_gw_reselect(struct batadv_priv *bat_priv) { atomic_set(&bat_priv->gw.reselect, 1); } static struct batadv_gw_node * batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; uint32_t max_gw_factor = 0, tmp_gw_factor = 0; uint32_t gw_divisor; uint8_t max_tq = 0; uint8_t tq_avg; struct batadv_orig_node *orig_node; gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; gw_divisor *= 64; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; orig_node = gw_node->orig_node; router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router) continue; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto next; if (!atomic_inc_not_zero(&gw_node->refcount)) goto next; tq_avg = router_ifinfo->bat_iv.tq_avg; switch (atomic_read(&bat_priv->gw_sel_class)) { case 1: /* fast connection */ tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; tmp_gw_factor /= gw_divisor; if ((tmp_gw_factor > max_gw_factor) || ((tmp_gw_factor == max_gw_factor) && (tq_avg > max_tq))) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; default: /* 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) */ if (tq_avg > max_tq) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; } if (tq_avg > max_tq) max_tq = tq_avg; if (tmp_gw_factor > max_gw_factor) max_gw_factor = tmp_gw_factor; batadv_gw_node_free_ref(gw_node); next: batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } rcu_read_unlock(); return curr_gw; } /** * batadv_gw_check_client_stop - check if client mode has been switched off * @bat_priv: the bat priv with all the soft interface information * * This function assumes the caller has checked that the gw state *is actually * changing*. This function is not supposed to be called when there is no state * change. */ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) return; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) return; /* deselect the current gateway so that next time that client mode is * enabled a proper GW_ADD event can be sent */ batadv_gw_select(bat_priv, NULL); /* if batman-adv is switching the gw client mode off and a gateway was * already selected, send a DEL uevent */ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); batadv_gw_node_free_ref(curr_gw); } void batadv_gw_election(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; char gw_addr[18] = { '\0' }; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) goto out; next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) goto out; if (next_gw) { sprintf(gw_addr, "%pM", next_gw->orig_node->orig); router = batadv_orig_router_get(next_gw->orig_node, BATADV_IF_DEFAULT); if (!router) { batadv_gw_reselect(bat_priv); goto out; } router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) { batadv_gw_reselect(bat_priv); goto out; } } if ((curr_gw) && (!next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Removing selected gateway - no gateway in range\n"); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); } else if ((!curr_gw) && (next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD, gw_addr); } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE, gw_addr); } batadv_gw_select(bat_priv, next_gw); out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (next_gw) batadv_gw_node_free_ref(next_gw); if (router) batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } void batadv_gw_check_election(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_neigh_ifinfo *router_orig_tq = NULL; struct batadv_neigh_ifinfo *router_gw_tq = NULL; struct batadv_orig_node *curr_gw_orig; struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL; uint8_t gw_tq_avg, orig_tq_avg; curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); if (!curr_gw_orig) goto reselect; router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); if (!router_gw) goto reselect; router_gw_tq = batadv_neigh_ifinfo_get(router_gw, BATADV_IF_DEFAULT); if (!router_gw_tq) goto reselect; /* this node already is the gateway */ if (curr_gw_orig == orig_node) goto out; router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router_orig) goto out; router_orig_tq = batadv_neigh_ifinfo_get(router_orig, BATADV_IF_DEFAULT); if (!router_orig_tq) goto out; gw_tq_avg = router_gw_tq->bat_iv.tq_avg; orig_tq_avg = router_orig_tq->bat_iv.tq_avg; /* the TQ value has to be better */ if (orig_tq_avg < gw_tq_avg) goto out; /* if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be */ if ((atomic_read(&bat_priv->gw_sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", gw_tq_avg, orig_tq_avg); reselect: batadv_gw_reselect(bat_priv); out: if (curr_gw_orig) batadv_orig_node_free_ref(curr_gw_orig); if (router_gw) batadv_neigh_node_free_ref(router_gw); if (router_orig) batadv_neigh_node_free_ref(router_orig); if (router_gw_tq) batadv_neigh_ifinfo_free_ref(router_gw_tq); if (router_orig_tq) batadv_neigh_ifinfo_free_ref(router_orig_tq); return; } /** * batadv_gw_node_add - add gateway node to list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ static void batadv_gw_node_add(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node; if (gateway->bandwidth_down == 0) return; if (!atomic_inc_not_zero(&orig_node->refcount)) return; gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); if (!gw_node) { batadv_orig_node_free_ref(orig_node); return; } INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); spin_unlock_bh(&bat_priv->gw.list_lock); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", orig_node->orig, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); } /** * batadv_gw_node_get - retrieve gateway node from list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * * Returns gateway node if found or NULL otherwise. */ static struct batadv_gw_node * batadv_gw_node_get(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_gw_node *gw_node_tmp, *gw_node = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) { if (gw_node_tmp->orig_node != orig_node) continue; if (gw_node_tmp->deleted) continue; if (!atomic_inc_not_zero(&gw_node_tmp->refcount)) continue; gw_node = gw_node_tmp; break; } rcu_read_unlock(); return gw_node; } /** * batadv_gw_node_update - update list of available gateways with changed * bandwidth information * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ void batadv_gw_node_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node, *curr_gw = NULL; gw_node = batadv_gw_node_get(bat_priv, orig_node); if (!gw_node) { batadv_gw_node_add(bat_priv, orig_node, gateway); goto out; } if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) && (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n", orig_node->orig, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); gw_node->deleted = 0; if (ntohl(gateway->bandwidth_down) == 0) { gw_node->deleted = jiffies; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway %pM removed from gateway list\n", orig_node->orig); /* Note: We don't need a NULL check here, since curr_gw never * gets dereferenced. */ curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (gw_node == curr_gw) batadv_gw_reselect(bat_priv); } out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); } void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_tvlv_gateway_data gateway; gateway.bandwidth_down = 0; gateway.bandwidth_up = 0; batadv_gw_node_update(bat_priv, orig_node, &gateway); } void batadv_gw_node_purge(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node, *curr_gw; struct hlist_node *node_tmp; unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); int do_reselect = 0; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); spin_lock_bh(&bat_priv->gw.list_lock); hlist_for_each_entry_safe(gw_node, node_tmp, &bat_priv->gw.list, list) { if (((!gw_node->deleted) || (time_before(jiffies, gw_node->deleted + timeout))) && atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) continue; if (curr_gw == gw_node) do_reselect = 1; hlist_del_rcu(&gw_node->list); batadv_gw_node_free_ref(gw_node); } spin_unlock_bh(&bat_priv->gw.list_lock); /* gw_reselect() needs to acquire the gw_list_lock */ if (do_reselect) batadv_gw_reselect(bat_priv); if (curr_gw) batadv_gw_node_free_ref(curr_gw); } /* fails if orig_node has no router */ static int batadv_write_buffer_text(struct batadv_priv *bat_priv, struct seq_file *seq, const struct batadv_gw_node *gw_node) { struct batadv_gw_node *curr_gw; struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo = NULL; int ret = -1; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); if (!router) goto out; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", (curr_gw == gw_node ? "=>" : " "), gw_node->orig_node->orig, router_ifinfo->bat_iv.tq_avg, router->addr, router->if_incoming->net_dev->name, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10); if (curr_gw) batadv_gw_node_free_ref(curr_gw); out: if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); if (router) batadv_neigh_node_free_ref(router); return ret; } int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hard_iface *primary_if; struct batadv_gw_node *gw_node; int gw_count = 0; primary_if = batadv_seq_print_text_primary_if_get(seq); if (!primary_if) goto out; seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name); rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; /* fails if orig_node has no router */ if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0) continue; gw_count++; } rcu_read_unlock(); if (gw_count == 0) seq_puts(seq, "No gateways in range ...\n"); out: if (primary_if) batadv_hardif_free_ref(primary_if); return 0; } /** * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message * @skb: the packet to check * @header_len: a pointer to the batman-adv header size * @chaddr: buffer where the client address will be stored. Valid * only if the function returns BATADV_DHCP_TO_CLIENT * * Returns: * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error * while parsing it * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client * * This function may re-allocate the data buffer of the skb passed as argument. */ enum batadv_dhcp_recipient batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, uint8_t *chaddr) { enum batadv_dhcp_recipient ret = BATADV_DHCP_NO; struct ethhdr *ethhdr; struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; struct udphdr *udphdr; struct vlan_ethhdr *vhdr; int chaddr_offset; __be16 proto; uint8_t *p; /* check for ethernet header */ if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) return BATADV_DHCP_NO; ethhdr = (struct ethhdr *)skb->data; proto = ethhdr->h_proto; *header_len += ETH_HLEN; /* check for initial vlan header */ if (proto == htons(ETH_P_8021Q)) { if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) return BATADV_DHCP_NO; vhdr = (struct vlan_ethhdr *)skb->data; proto = vhdr->h_vlan_encapsulated_proto; *header_len += VLAN_HLEN; } /* check for ip header */ switch (proto) { case htons(ETH_P_IP): if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) return BATADV_DHCP_NO; iphdr = (struct iphdr *)(skb->data + *header_len); *header_len += iphdr->ihl * 4; /* check for udp header */ if (iphdr->protocol != IPPROTO_UDP) return BATADV_DHCP_NO; break; case htons(ETH_P_IPV6): if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) return BATADV_DHCP_NO; ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); *header_len += sizeof(*ipv6hdr); /* check for udp header */ if (ipv6hdr->nexthdr != IPPROTO_UDP) return BATADV_DHCP_NO; break; default: return BATADV_DHCP_NO; } if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) return BATADV_DHCP_NO; /* skb->data might have been reallocated by pskb_may_pull() */ ethhdr = (struct ethhdr *)skb->data; if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); udphdr = (struct udphdr *)(skb->data + *header_len); *header_len += sizeof(*udphdr); /* check for bootp port */ switch (proto) { case htons(ETH_P_IP): if (udphdr->dest == htons(67)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(67)) ret = BATADV_DHCP_TO_CLIENT; break; case htons(ETH_P_IPV6): if (udphdr->dest == htons(547)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(547)) ret = BATADV_DHCP_TO_CLIENT; break; } chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ if (ret == BATADV_DHCP_TO_CLIENT && pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) return BATADV_DHCP_NO; /* check if the DHCP packet carries a valid Ethernet address */ p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET; if (*p != ETH_ALEN) return BATADV_DHCP_NO; memcpy(chaddr, skb->data + chaddr_offset, ETH_ALEN); } return ret; } /** * batadv_gw_out_of_range - check if the dhcp request destination is the best gw * @bat_priv: the bat priv with all the soft interface information * @skb: the outgoing packet * * Check if the skb is a DHCP request and if it is sent to the current best GW * server. Due to topology changes it may be the case that the GW server * previously selected is not the best one anymore. * * Returns true if the packet destination is unicast and it is not the best gw, * false otherwise. * * This call might reallocate skb data. * Must be invoked only when the DHCP packet is going TO a DHCP SERVER. */ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; struct ethhdr *ethhdr = (struct ethhdr *)skb->data; bool out_of_range = false; uint8_t curr_tq_avg; unsigned short vid; vid = batadv_get_vid(skb, 0); orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) goto out; gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); if (!gw_node->bandwidth_down == 0) goto out; switch (atomic_read(&bat_priv->gw_mode)) { case BATADV_GW_MODE_SERVER: /* If we are a GW then we are our best GW. We can artificially * set the tq towards ourself as the maximum value */ curr_tq_avg = BATADV_TQ_MAX_VALUE; break; case BATADV_GW_MODE_CLIENT: curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) goto out; /* packet is going to our gateway */ if (curr_gw->orig_node == orig_dst_node) goto out; /* If the dhcp packet has been sent to a different gw, * we have to evaluate whether the old gw is still * reliable enough */ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node, NULL); if (!neigh_curr) goto out; curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr, BATADV_IF_DEFAULT); if (!curr_ifinfo) goto out; curr_tq_avg = curr_ifinfo->bat_iv.tq_avg; batadv_neigh_ifinfo_free_ref(curr_ifinfo); break; case BATADV_GW_MODE_OFF: default: goto out; } neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL); if (!neigh_old) goto out; old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT); if (!old_ifinfo) goto out; if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD) out_of_range = true; batadv_neigh_ifinfo_free_ref(old_ifinfo); out: if (orig_dst_node) batadv_orig_node_free_ref(orig_dst_node); if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); if (neigh_old) batadv_neigh_node_free_ref(neigh_old); if (neigh_curr) batadv_neigh_node_free_ref(neigh_curr); return out_of_range; }
gpl-2.0
stevenrennermpls/lavfilters
common/baseclasses/transfrm.cpp
77
27582
//------------------------------------------------------------------------------ // File: Transfrm.cpp // // Desc: DirectShow base classes - implements class for simple transform // filters such as video decompressors. // // Copyright (c) 1992-2001 Microsoft Corporation. All rights reserved. //------------------------------------------------------------------------------ #include <streams.h> #include <measure.h> // ================================================================= // Implements the CTransformFilter class // ================================================================= CTransformFilter::CTransformFilter(__in_opt LPCTSTR pName, __inout_opt LPUNKNOWN pUnk, REFCLSID clsid) : CBaseFilter(pName,pUnk,&m_csFilter, clsid), m_pInput(NULL), m_pOutput(NULL), m_bEOSDelivered(FALSE), m_bQualityChanged(FALSE), m_bSampleSkipped(FALSE) { #ifdef PERF RegisterPerfId(); #endif // PERF } #ifdef UNICODE CTransformFilter::CTransformFilter(__in_opt LPCSTR pName, __inout_opt LPUNKNOWN pUnk, REFCLSID clsid) : CBaseFilter(pName,pUnk,&m_csFilter, clsid), m_pInput(NULL), m_pOutput(NULL), m_bEOSDelivered(FALSE), m_bQualityChanged(FALSE), m_bSampleSkipped(FALSE) { #ifdef PERF RegisterPerfId(); #endif // PERF } #endif // destructor CTransformFilter::~CTransformFilter() { // Delete the pins delete m_pInput; delete m_pOutput; } // Transform place holder - should never be called HRESULT CTransformFilter::Transform(IMediaSample * pIn, IMediaSample *pOut) { UNREFERENCED_PARAMETER(pIn); UNREFERENCED_PARAMETER(pOut); DbgBreak("CTransformFilter::Transform() should never be called"); return E_UNEXPECTED; } // return the number of pins we provide int CTransformFilter::GetPinCount() { return 2; } // return a non-addrefed CBasePin * for the user to addref if he holds onto it // for longer than his pointer to us. We create the pins dynamically when they // are asked for rather than in the constructor. This is because we want to // give the derived class an oppportunity to return different pin objects // We return the objects as and when they are needed. If either of these fails // then we return NULL, the assumption being that the caller will realise the // whole deal is off and destroy us - which in turn will delete everything. CBasePin * CTransformFilter::GetPin(int n) { HRESULT hr = S_OK; // Create an input pin if necessary if (m_pInput == NULL) { m_pInput = new CTransformInputPin(NAME("Transform input pin"), this, // Owner filter &hr, // Result code L"XForm In"); // Pin name // Can't fail ASSERT(SUCCEEDED(hr)); if (m_pInput == NULL) { return NULL; } m_pOutput = (CTransformOutputPin *) new CTransformOutputPin(NAME("Transform output pin"), this, // Owner filter &hr, // Result code L"XForm Out"); // Pin name // Can't fail ASSERT(SUCCEEDED(hr)); if (m_pOutput == NULL) { delete m_pInput; m_pInput = NULL; } } // Return the appropriate pin if (n == 0) { return m_pInput; } else if (n == 1) { return m_pOutput; } else { return NULL; } } // // FindPin // // If Id is In or Out then return the IPin* for that pin // creating the pin if need be. Otherwise return NULL with an error. STDMETHODIMP CTransformFilter::FindPin(LPCWSTR Id, __deref_out IPin **ppPin) { CheckPointer(ppPin,E_POINTER); ValidateReadWritePtr(ppPin,sizeof(IPin *)); if (0==lstrcmpW(Id,L"In")) { *ppPin = GetPin(0); } else if (0==lstrcmpW(Id,L"Out")) { *ppPin = GetPin(1); } else { *ppPin = NULL; return VFW_E_NOT_FOUND; } HRESULT hr = NOERROR; // AddRef() returned pointer - but GetPin could fail if memory is low. if (*ppPin) { (*ppPin)->AddRef(); } else { hr = E_OUTOFMEMORY; // probably. There's no pin anyway. } return hr; } // override these two functions if you want to inform something // about entry to or exit from streaming state. HRESULT CTransformFilter::StartStreaming() { return NOERROR; } HRESULT CTransformFilter::StopStreaming() { return NOERROR; } // override this to grab extra interfaces on connection HRESULT CTransformFilter::CheckConnect(PIN_DIRECTION dir, IPin *pPin) { UNREFERENCED_PARAMETER(dir); UNREFERENCED_PARAMETER(pPin); return NOERROR; } // place holder to allow derived classes to release any extra interfaces HRESULT CTransformFilter::BreakConnect(PIN_DIRECTION dir) { UNREFERENCED_PARAMETER(dir); return NOERROR; } // Let derived classes know about connection completion HRESULT CTransformFilter::CompleteConnect(PIN_DIRECTION direction,IPin *pReceivePin) { UNREFERENCED_PARAMETER(direction); UNREFERENCED_PARAMETER(pReceivePin); return NOERROR; } // override this to know when the media type is really set HRESULT CTransformFilter::SetMediaType(PIN_DIRECTION direction,const CMediaType *pmt) { UNREFERENCED_PARAMETER(direction); UNREFERENCED_PARAMETER(pmt); return NOERROR; } // Set up our output sample HRESULT CTransformFilter::InitializeOutputSample(IMediaSample *pSample, __deref_out IMediaSample **ppOutSample) { IMediaSample *pOutSample; // default - times are the same AM_SAMPLE2_PROPERTIES * const pProps = m_pInput->SampleProps(); DWORD dwFlags = m_bSampleSkipped ? AM_GBF_PREVFRAMESKIPPED : 0; // This will prevent the image renderer from switching us to DirectDraw // when we can't do it without skipping frames because we're not on a // keyframe. If it really has to switch us, it still will, but then we // will have to wait for the next keyframe if (!(pProps->dwSampleFlags & AM_SAMPLE_SPLICEPOINT)) { dwFlags |= AM_GBF_NOTASYNCPOINT; } ASSERT(m_pOutput->m_pAllocator != NULL); HRESULT hr = m_pOutput->m_pAllocator->GetBuffer( &pOutSample , pProps->dwSampleFlags & AM_SAMPLE_TIMEVALID ? &pProps->tStart : NULL , pProps->dwSampleFlags & AM_SAMPLE_STOPVALID ? &pProps->tStop : NULL , dwFlags ); *ppOutSample = pOutSample; if (FAILED(hr)) { return hr; } ASSERT(pOutSample); IMediaSample2 *pOutSample2; if (SUCCEEDED(pOutSample->QueryInterface(IID_IMediaSample2, (void **)&pOutSample2))) { /* Modify it */ AM_SAMPLE2_PROPERTIES OutProps; EXECUTE_ASSERT(SUCCEEDED(pOutSample2->GetProperties( FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, tStart), (PBYTE)&OutProps) )); OutProps.dwTypeSpecificFlags = pProps->dwTypeSpecificFlags; OutProps.dwSampleFlags = (OutProps.dwSampleFlags & AM_SAMPLE_TYPECHANGED) | (pProps->dwSampleFlags & ~AM_SAMPLE_TYPECHANGED); OutProps.tStart = pProps->tStart; OutProps.tStop = pProps->tStop; OutProps.cbData = FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, dwStreamId); hr = pOutSample2->SetProperties( FIELD_OFFSET(AM_SAMPLE2_PROPERTIES, dwStreamId), (PBYTE)&OutProps ); if (pProps->dwSampleFlags & AM_SAMPLE_DATADISCONTINUITY) { m_bSampleSkipped = FALSE; } pOutSample2->Release(); } else { if (pProps->dwSampleFlags & AM_SAMPLE_TIMEVALID) { pOutSample->SetTime(&pProps->tStart, &pProps->tStop); } if (pProps->dwSampleFlags & AM_SAMPLE_SPLICEPOINT) { pOutSample->SetSyncPoint(TRUE); } if (pProps->dwSampleFlags & AM_SAMPLE_DATADISCONTINUITY) { pOutSample->SetDiscontinuity(TRUE); m_bSampleSkipped = FALSE; } // Copy the media times LONGLONG MediaStart, MediaEnd; if (pSample->GetMediaTime(&MediaStart,&MediaEnd) == NOERROR) { pOutSample->SetMediaTime(&MediaStart,&MediaEnd); } } return S_OK; } // override this to customize the transform process HRESULT CTransformFilter::Receive(IMediaSample *pSample) { /* Check for other streams and pass them on */ AM_SAMPLE2_PROPERTIES * const pProps = m_pInput->SampleProps(); if (pProps->dwStreamId != AM_STREAM_MEDIA) { return m_pOutput->m_pInputPin->Receive(pSample); } HRESULT hr; ASSERT(pSample); IMediaSample * pOutSample; // If no output to deliver to then no point sending us data ASSERT (m_pOutput != NULL) ; // Set up the output sample hr = InitializeOutputSample(pSample, &pOutSample); if (FAILED(hr)) { return hr; } // Start timing the transform (if PERF is defined) MSR_START(m_idTransform); // have the derived class transform the data hr = Transform(pSample, pOutSample); // Stop the clock and log it (if PERF is defined) MSR_STOP(m_idTransform); if (FAILED(hr)) { DbgLog((LOG_TRACE,1,TEXT("Error from transform"))); } else { // the Transform() function can return S_FALSE to indicate that the // sample should not be delivered; we only deliver the sample if it's // really S_OK (same as NOERROR, of course.) if (hr == NOERROR) { hr = m_pOutput->m_pInputPin->Receive(pOutSample); m_bSampleSkipped = FALSE; // last thing no longer dropped } else { // S_FALSE returned from Transform is a PRIVATE agreement // We should return NOERROR from Receive() in this cause because returning S_FALSE // from Receive() means that this is the end of the stream and no more data should // be sent. if (S_FALSE == hr) { // Release the sample before calling notify to avoid // deadlocks if the sample holds a lock on the system // such as DirectDraw buffers do pOutSample->Release(); m_bSampleSkipped = TRUE; if (!m_bQualityChanged) { NotifyEvent(EC_QUALITY_CHANGE,0,0); m_bQualityChanged = TRUE; } return NOERROR; } } } // release the output buffer. If the connected pin still needs it, // it will have addrefed it itself. pOutSample->Release(); return hr; } // Return S_FALSE to mean "pass the note on upstream" // Return NOERROR (Same as S_OK) // to mean "I've done something about it, don't pass it on" HRESULT CTransformFilter::AlterQuality(Quality q) { UNREFERENCED_PARAMETER(q); return S_FALSE; } // EndOfStream received. Default behaviour is to deliver straight // downstream, since we have no queued data. If you overrode Receive // and have queue data, then you need to handle this and deliver EOS after // all queued data is sent HRESULT CTransformFilter::EndOfStream(void) { HRESULT hr = NOERROR; if (m_pOutput != NULL) { hr = m_pOutput->DeliverEndOfStream(); } return hr; } // enter flush state. Receives already blocked // must override this if you have queued data or a worker thread HRESULT CTransformFilter::BeginFlush(void) { HRESULT hr = NOERROR; if (m_pOutput != NULL) { // block receives -- done by caller (CBaseInputPin::BeginFlush) // discard queued data -- we have no queued data // free anyone blocked on receive - not possible in this filter // call downstream hr = m_pOutput->DeliverBeginFlush(); } return hr; } // leave flush state. must override this if you have queued data // or a worker thread HRESULT CTransformFilter::EndFlush(void) { // sync with pushing thread -- we have no worker thread // ensure no more data to go downstream -- we have no queued data // call EndFlush on downstream pins ASSERT (m_pOutput != NULL); return m_pOutput->DeliverEndFlush(); // caller (the input pin's method) will unblock Receives } // override these so that the derived filter can catch them STDMETHODIMP CTransformFilter::Stop() { CAutoLock lck1(&m_csFilter); if (m_State == State_Stopped) { return NOERROR; } // Succeed the Stop if we are not completely connected ASSERT(m_pInput == NULL || m_pOutput != NULL); if (m_pInput == NULL || m_pInput->IsConnected() == FALSE || m_pOutput->IsConnected() == FALSE) { m_State = State_Stopped; m_bEOSDelivered = FALSE; return NOERROR; } ASSERT(m_pInput); ASSERT(m_pOutput); // decommit the input pin before locking or we can deadlock m_pInput->Inactive(); // synchronize with Receive calls CAutoLock lck2(&m_csReceive); m_pOutput->Inactive(); // allow a class derived from CTransformFilter // to know about starting and stopping streaming HRESULT hr = StopStreaming(); if (SUCCEEDED(hr)) { // complete the state transition m_State = State_Stopped; m_bEOSDelivered = FALSE; } return hr; } STDMETHODIMP CTransformFilter::Pause() { CAutoLock lck(&m_csFilter); HRESULT hr = NOERROR; if (m_State == State_Paused) { // (This space left deliberately blank) } // If we have no input pin or it isn't yet connected then when we are // asked to pause we deliver an end of stream to the downstream filter. // This makes sure that it doesn't sit there forever waiting for // samples which we cannot ever deliver without an input connection. else if (m_pInput == NULL || m_pInput->IsConnected() == FALSE) { if (m_pOutput && m_bEOSDelivered == FALSE) { m_pOutput->DeliverEndOfStream(); m_bEOSDelivered = TRUE; } m_State = State_Paused; } // We may have an input connection but no output connection // However, if we have an input pin we do have an output pin else if (m_pOutput->IsConnected() == FALSE) { m_State = State_Paused; } else { if (m_State == State_Stopped) { // allow a class derived from CTransformFilter // to know about starting and stopping streaming CAutoLock lck2(&m_csReceive); hr = StartStreaming(); } if (SUCCEEDED(hr)) { hr = CBaseFilter::Pause(); } } m_bSampleSkipped = FALSE; m_bQualityChanged = FALSE; return hr; } HRESULT CTransformFilter::NewSegment( REFERENCE_TIME tStart, REFERENCE_TIME tStop, double dRate) { if (m_pOutput != NULL) { return m_pOutput->DeliverNewSegment(tStart, tStop, dRate); } return S_OK; } // Check streaming status HRESULT CTransformInputPin::CheckStreaming() { ASSERT(m_pTransformFilter->m_pOutput != NULL); if (!m_pTransformFilter->m_pOutput->IsConnected()) { return VFW_E_NOT_CONNECTED; } else { // Shouldn't be able to get any data if we're not connected! ASSERT(IsConnected()); // we're flushing if (m_bFlushing) { return S_FALSE; } // Don't process stuff in Stopped state if (IsStopped()) { return VFW_E_WRONG_STATE; } if (m_bRunTimeError) { return VFW_E_RUNTIME_ERROR; } return S_OK; } } // ================================================================= // Implements the CTransformInputPin class // ================================================================= // constructor CTransformInputPin::CTransformInputPin( __in_opt LPCTSTR pObjectName, __inout CTransformFilter *pTransformFilter, __inout HRESULT * phr, __in_opt LPCWSTR pName) : CBaseInputPin(pObjectName, pTransformFilter, &pTransformFilter->m_csFilter, phr, pName) { DbgLog((LOG_TRACE,2,TEXT("CTransformInputPin::CTransformInputPin"))); m_pTransformFilter = pTransformFilter; } #ifdef UNICODE CTransformInputPin::CTransformInputPin( __in_opt LPCSTR pObjectName, __inout CTransformFilter *pTransformFilter, __inout HRESULT * phr, __in_opt LPCWSTR pName) : CBaseInputPin(pObjectName, pTransformFilter, &pTransformFilter->m_csFilter, phr, pName) { DbgLog((LOG_TRACE,2,TEXT("CTransformInputPin::CTransformInputPin"))); m_pTransformFilter = pTransformFilter; } #endif // provides derived filter a chance to grab extra interfaces HRESULT CTransformInputPin::CheckConnect(IPin *pPin) { HRESULT hr = m_pTransformFilter->CheckConnect(PINDIR_INPUT,pPin); if (FAILED(hr)) { return hr; } return CBaseInputPin::CheckConnect(pPin); } // provides derived filter a chance to release it's extra interfaces HRESULT CTransformInputPin::BreakConnect() { // Can't disconnect unless stopped ASSERT(IsStopped()); m_pTransformFilter->BreakConnect(PINDIR_INPUT); return CBaseInputPin::BreakConnect(); } // Let derived class know when the input pin is connected HRESULT CTransformInputPin::CompleteConnect(IPin *pReceivePin) { HRESULT hr = m_pTransformFilter->CompleteConnect(PINDIR_INPUT,pReceivePin); if (FAILED(hr)) { return hr; } return CBaseInputPin::CompleteConnect(pReceivePin); } // check that we can support a given media type HRESULT CTransformInputPin::CheckMediaType(const CMediaType* pmt) { // Check the input type HRESULT hr = m_pTransformFilter->CheckInputType(pmt); if (S_OK != hr) { return hr; } // if the output pin is still connected, then we have // to check the transform not just the input format if ((m_pTransformFilter->m_pOutput != NULL) && (m_pTransformFilter->m_pOutput->IsConnected())) { return m_pTransformFilter->CheckTransform( pmt, &m_pTransformFilter->m_pOutput->CurrentMediaType()); } else { return hr; } } // set the media type for this connection HRESULT CTransformInputPin::SetMediaType(const CMediaType* mtIn) { // Set the base class media type (should always succeed) HRESULT hr = CBasePin::SetMediaType(mtIn); if (FAILED(hr)) { return hr; } // check the transform can be done (should always succeed) ASSERT(SUCCEEDED(m_pTransformFilter->CheckInputType(mtIn))); return m_pTransformFilter->SetMediaType(PINDIR_INPUT,mtIn); } // ================================================================= // Implements IMemInputPin interface // ================================================================= // provide EndOfStream that passes straight downstream // (there is no queued data) STDMETHODIMP CTransformInputPin::EndOfStream(void) { CAutoLock lck(&m_pTransformFilter->m_csReceive); HRESULT hr = CheckStreaming(); if (S_OK == hr) { hr = m_pTransformFilter->EndOfStream(); } return hr; } // enter flushing state. Call default handler to block Receives, then // pass to overridable method in filter STDMETHODIMP CTransformInputPin::BeginFlush(void) { CAutoLock lck(&m_pTransformFilter->m_csFilter); // Are we actually doing anything? ASSERT(m_pTransformFilter->m_pOutput != NULL); if (!IsConnected() || !m_pTransformFilter->m_pOutput->IsConnected()) { return VFW_E_NOT_CONNECTED; } HRESULT hr = CBaseInputPin::BeginFlush(); if (FAILED(hr)) { return hr; } return m_pTransformFilter->BeginFlush(); } // leave flushing state. // Pass to overridable method in filter, then call base class // to unblock receives (finally) STDMETHODIMP CTransformInputPin::EndFlush(void) { CAutoLock lck(&m_pTransformFilter->m_csFilter); // Are we actually doing anything? ASSERT(m_pTransformFilter->m_pOutput != NULL); if (!IsConnected() || !m_pTransformFilter->m_pOutput->IsConnected()) { return VFW_E_NOT_CONNECTED; } HRESULT hr = m_pTransformFilter->EndFlush(); if (FAILED(hr)) { return hr; } return CBaseInputPin::EndFlush(); } // here's the next block of data from the stream. // AddRef it yourself if you need to hold it beyond the end // of this call. HRESULT CTransformInputPin::Receive(IMediaSample * pSample) { HRESULT hr; CAutoLock lck(&m_pTransformFilter->m_csReceive); ASSERT(pSample); // check all is well with the base class hr = CBaseInputPin::Receive(pSample); if (S_OK == hr) { hr = m_pTransformFilter->Receive(pSample); } return hr; } // override to pass downstream STDMETHODIMP CTransformInputPin::NewSegment( REFERENCE_TIME tStart, REFERENCE_TIME tStop, double dRate) { // Save the values in the pin CBasePin::NewSegment(tStart, tStop, dRate); return m_pTransformFilter->NewSegment(tStart, tStop, dRate); } // ================================================================= // Implements the CTransformOutputPin class // ================================================================= // constructor CTransformOutputPin::CTransformOutputPin( __in_opt LPCTSTR pObjectName, __inout CTransformFilter *pTransformFilter, __inout HRESULT * phr, __in_opt LPCWSTR pPinName) : CBaseOutputPin(pObjectName, pTransformFilter, &pTransformFilter->m_csFilter, phr, pPinName), m_pPosition(NULL) { DbgLog((LOG_TRACE,2,TEXT("CTransformOutputPin::CTransformOutputPin"))); m_pTransformFilter = pTransformFilter; } #ifdef UNICODE CTransformOutputPin::CTransformOutputPin( __in_opt LPCSTR pObjectName, __inout CTransformFilter *pTransformFilter, __inout HRESULT * phr, __in_opt LPCWSTR pPinName) : CBaseOutputPin(pObjectName, pTransformFilter, &pTransformFilter->m_csFilter, phr, pPinName), m_pPosition(NULL) { DbgLog((LOG_TRACE,2,TEXT("CTransformOutputPin::CTransformOutputPin"))); m_pTransformFilter = pTransformFilter; } #endif // destructor CTransformOutputPin::~CTransformOutputPin() { DbgLog((LOG_TRACE,2,TEXT("CTransformOutputPin::~CTransformOutputPin"))); if (m_pPosition) m_pPosition->Release(); } // overriden to expose IMediaPosition and IMediaSeeking control interfaces STDMETHODIMP CTransformOutputPin::NonDelegatingQueryInterface(REFIID riid, __deref_out void **ppv) { CheckPointer(ppv,E_POINTER); ValidateReadWritePtr(ppv,sizeof(PVOID)); *ppv = NULL; if (riid == IID_IMediaPosition || riid == IID_IMediaSeeking) { // we should have an input pin by now ASSERT(m_pTransformFilter->m_pInput != NULL); if (m_pPosition == NULL) { HRESULT hr = CreatePosPassThru( GetOwner(), FALSE, (IPin *)m_pTransformFilter->m_pInput, &m_pPosition); if (FAILED(hr)) { return hr; } } return m_pPosition->QueryInterface(riid, ppv); } else { return CBaseOutputPin::NonDelegatingQueryInterface(riid, ppv); } } // provides derived filter a chance to grab extra interfaces HRESULT CTransformOutputPin::CheckConnect(IPin *pPin) { // we should have an input connection first ASSERT(m_pTransformFilter->m_pInput != NULL); if ((m_pTransformFilter->m_pInput->IsConnected() == FALSE)) { return E_UNEXPECTED; } HRESULT hr = m_pTransformFilter->CheckConnect(PINDIR_OUTPUT,pPin); if (FAILED(hr)) { return hr; } return CBaseOutputPin::CheckConnect(pPin); } // provides derived filter a chance to release it's extra interfaces HRESULT CTransformOutputPin::BreakConnect() { // Can't disconnect unless stopped ASSERT(IsStopped()); m_pTransformFilter->BreakConnect(PINDIR_OUTPUT); return CBaseOutputPin::BreakConnect(); } // Let derived class know when the output pin is connected HRESULT CTransformOutputPin::CompleteConnect(IPin *pReceivePin) { HRESULT hr = m_pTransformFilter->CompleteConnect(PINDIR_OUTPUT,pReceivePin); if (FAILED(hr)) { return hr; } return CBaseOutputPin::CompleteConnect(pReceivePin); } // check a given transform - must have selected input type first HRESULT CTransformOutputPin::CheckMediaType(const CMediaType* pmtOut) { // must have selected input first ASSERT(m_pTransformFilter->m_pInput != NULL); if ((m_pTransformFilter->m_pInput->IsConnected() == FALSE)) { return E_INVALIDARG; } return m_pTransformFilter->CheckTransform( &m_pTransformFilter->m_pInput->CurrentMediaType(), pmtOut); } // called after we have agreed a media type to actually set it in which case // we run the CheckTransform function to get the output format type again HRESULT CTransformOutputPin::SetMediaType(const CMediaType* pmtOut) { HRESULT hr = NOERROR; ASSERT(m_pTransformFilter->m_pInput != NULL); ASSERT(m_pTransformFilter->m_pInput->CurrentMediaType().IsValid()); // Set the base class media type (should always succeed) hr = CBasePin::SetMediaType(pmtOut); if (FAILED(hr)) { return hr; } #ifdef DEBUG if (FAILED(m_pTransformFilter->CheckTransform(&m_pTransformFilter-> m_pInput->CurrentMediaType(),pmtOut))) { DbgLog((LOG_ERROR,0,TEXT("*** This filter is accepting an output media type"))); DbgLog((LOG_ERROR,0,TEXT(" that it can't currently transform to. I hope"))); DbgLog((LOG_ERROR,0,TEXT(" it's smart enough to reconnect its input."))); } #endif return m_pTransformFilter->SetMediaType(PINDIR_OUTPUT,pmtOut); } // pass the buffer size decision through to the main transform class HRESULT CTransformOutputPin::DecideBufferSize( IMemAllocator * pAllocator, __inout ALLOCATOR_PROPERTIES* pProp) { return m_pTransformFilter->DecideBufferSize(pAllocator, pProp); } // return a specific media type indexed by iPosition HRESULT CTransformOutputPin::GetMediaType( int iPosition, __inout CMediaType *pMediaType) { ASSERT(m_pTransformFilter->m_pInput != NULL); // We don't have any media types if our input is not connected if (m_pTransformFilter->m_pInput->IsConnected()) { return m_pTransformFilter->GetMediaType(iPosition,pMediaType); } else { return VFW_S_NO_MORE_ITEMS; } } // Override this if you can do something constructive to act on the // quality message. Consider passing it upstream as well // Pass the quality mesage on upstream. STDMETHODIMP CTransformOutputPin::Notify(IBaseFilter * pSender, Quality q) { UNREFERENCED_PARAMETER(pSender); ValidateReadPtr(pSender,sizeof(IBaseFilter)); // First see if we want to handle this ourselves HRESULT hr = m_pTransformFilter->AlterQuality(q); if (hr!=S_FALSE) { return hr; // either S_OK or a failure } // S_FALSE means we pass the message on. // Find the quality sink for our input pin and send it there ASSERT(m_pTransformFilter->m_pInput != NULL); return m_pTransformFilter->m_pInput->PassNotify(q); } // Notify // the following removes a very large number of level 4 warnings from the microsoft // compiler output, which are not useful at all in this case. #pragma warning(disable:4514)
gpl-2.0
smksyj/linux_modified_mlock
drivers/staging/comedi/drivers/ni_at_ao.c
77
10825
/* * ni_at_ao.c * Driver for NI AT-AO-6/10 boards * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: ni_at_ao * Description: National Instruments AT-AO-6/10 * Devices: (National Instruments) AT-AO-6 [at-ao-6] * (National Instruments) AT-AO-10 [at-ao-10] * Status: should work * Author: David A. Schleef <ds@schleef.org> * Updated: Sun Dec 26 12:26:28 EST 2004 * * Configuration options: * [0] - I/O port base address * [1] - IRQ (unused) * [2] - DMA (unused) * [3] - analog output range, set by jumpers on hardware * 0 for -10 to 10V bipolar * 1 for 0V to 10V unipolar */ #include <linux/module.h> #include "../comedidev.h" #include "8253.h" /* * Register map * * Register-level programming information can be found in NI * document 320379.pdf. */ #define ATAO_DIO_REG 0x00 #define ATAO_CFG2_REG 0x02 #define ATAO_CFG2_CALLD_NOP (0 << 14) #define ATAO_CFG2_CALLD(x) ((((x) >> 3) + 1) << 14) #define ATAO_CFG2_FFRTEN (1 << 13) #define ATAO_CFG2_DACS(x) (1 << (((x) / 2) + 8)) #define ATAO_CFG2_LDAC(x) (1 << (((x) / 2) + 3)) #define ATAO_CFG2_PROMEN (1 << 2) #define ATAO_CFG2_SCLK (1 << 1) #define ATAO_CFG2_SDATA (1 << 0) #define ATAO_CFG3_REG 0x04 #define ATAO_CFG3_DMAMODE (1 << 6) #define ATAO_CFG3_CLKOUT (1 << 5) #define ATAO_CFG3_RCLKEN (1 << 4) #define ATAO_CFG3_DOUTEN2 (1 << 3) #define ATAO_CFG3_DOUTEN1 (1 << 2) #define ATAO_CFG3_EN2_5V (1 << 1) #define ATAO_CFG3_SCANEN (1 << 0) #define ATAO_82C53_BASE 0x06 #define ATAO_CFG1_REG 0x0a #define ATAO_CFG1_EXTINT2EN (1 << 15) #define ATAO_CFG1_EXTINT1EN (1 << 14) #define ATAO_CFG1_CNTINT2EN (1 << 13) #define ATAO_CFG1_CNTINT1EN (1 << 12) #define ATAO_CFG1_TCINTEN (1 << 11) #define ATAO_CFG1_CNT1SRC (1 << 10) #define ATAO_CFG1_CNT2SRC (1 << 9) #define ATAO_CFG1_FIFOEN (1 << 8) #define ATAO_CFG1_GRP2WR (1 << 7) #define ATAO_CFG1_EXTUPDEN (1 << 6) #define ATAO_CFG1_DMARQ (1 << 5) #define ATAO_CFG1_DMAEN (1 << 4) #define ATAO_CFG1_CH(x) (((x) & 0xf) << 0) #define ATAO_STATUS_REG 0x0a #define ATAO_STATUS_FH (1 << 6) #define ATAO_STATUS_FE (1 << 5) #define ATAO_STATUS_FF (1 << 4) #define ATAO_STATUS_INT2 (1 << 3) #define ATAO_STATUS_INT1 (1 << 2) #define ATAO_STATUS_TCINT (1 << 1) #define ATAO_STATUS_PROMOUT (1 << 0) #define ATAO_FIFO_WRITE_REG 0x0c #define ATAO_FIFO_CLEAR_REG 0x0c #define ATAO_AO_REG(x) (0x0c + ((x) * 2)) /* registers with _2_ are accessed when GRP2WR is set in CFG1 */ #define ATAO_2_DMATCCLR_REG 0x00 #define ATAO_2_INT1CLR_REG 0x02 #define ATAO_2_INT2CLR_REG 0x04 #define ATAO_2_RTSISHFT_REG 0x06 #define ATAO_2_RTSISHFT_RSI (1 << 0) #define ATAO_2_RTSISTRB_REG 0x07 struct atao_board { const char *name; int n_ao_chans; }; static const struct atao_board atao_boards[] = { { .name = "at-ao-6", .n_ao_chans = 6, }, { .name = "at-ao-10", .n_ao_chans = 10, }, }; struct atao_private { unsigned short cfg1; unsigned short cfg3; /* Used for caldac readback */ unsigned char caldac[21]; }; static void atao_select_reg_group(struct comedi_device *dev, int group) { struct atao_private *devpriv = dev->private; if (group) devpriv->cfg1 |= ATAO_CFG1_GRP2WR; else devpriv->cfg1 &= ~ATAO_CFG1_GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG); } static int atao_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int val = s->readback[chan]; int i; if (chan == 0) atao_select_reg_group(dev, 1); for (i = 0; i < insn->n; i++) { val = data[i]; /* the hardware expects two's complement values */ outw(comedi_offset_munge(s, val), dev->iobase + ATAO_AO_REG(chan)); } s->readback[chan] = val; if (chan == 0) atao_select_reg_group(dev, 0); return insn->n; } static int atao_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) outw(s->state, dev->iobase + ATAO_DIO_REG); data[1] = inw(dev->iobase + ATAO_DIO_REG); return insn->n; } static int atao_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct atao_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int mask; int ret; if (chan < 4) mask = 0x0f; else mask = 0xf0; ret = comedi_dio_insn_config(dev, s, insn, data, mask); if (ret) return ret; if (s->io_bits & 0x0f) devpriv->cfg3 |= ATAO_CFG3_DOUTEN1; else devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN1; if (s->io_bits & 0xf0) devpriv->cfg3 |= ATAO_CFG3_DOUTEN2; else devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN2; outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG); return insn->n; } /* * There are three DAC8800 TrimDACs on the board. These are 8-channel, * 8-bit DACs that are used to calibrate the Analog Output channels. * The factory default calibration values are stored in the EEPROM. * The TrimDACs, and EEPROM addresses, are mapped as: * * Channel EEPROM Description * ----------------- ------ ----------------------------------- * 0 - DAC0 Chan 0 0x30 AO Channel 0 Offset * 1 - DAC0 Chan 1 0x31 AO Channel 0 Gain * 2 - DAC0 Chan 2 0x32 AO Channel 1 Offset * 3 - DAC0 Chan 3 0x33 AO Channel 1 Gain * 4 - DAC0 Chan 4 0x34 AO Channel 2 Offset * 5 - DAC0 Chan 5 0x35 AO Channel 2 Gain * 6 - DAC0 Chan 6 0x36 AO Channel 3 Offset * 7 - DAC0 Chan 7 0x37 AO Channel 3 Gain * 8 - DAC1 Chan 0 0x38 AO Channel 4 Offset * 9 - DAC1 Chan 1 0x39 AO Channel 4 Gain * 10 - DAC1 Chan 2 0x3a AO Channel 5 Offset * 11 - DAC1 Chan 3 0x3b AO Channel 5 Gain * 12 - DAC1 Chan 4 0x3c 2.5V Offset * 13 - DAC1 Chan 5 0x3d AO Channel 6 Offset (at-ao-10 only) * 14 - DAC1 Chan 6 0x3e AO Channel 6 Gain (at-ao-10 only) * 15 - DAC1 Chan 7 0x3f AO Channel 7 Offset (at-ao-10 only) * 16 - DAC2 Chan 0 0x40 AO Channel 7 Gain (at-ao-10 only) * 17 - DAC2 Chan 1 0x41 AO Channel 8 Offset (at-ao-10 only) * 18 - DAC2 Chan 2 0x42 AO Channel 8 Gain (at-ao-10 only) * 19 - DAC2 Chan 3 0x43 AO Channel 9 Offset (at-ao-10 only) * 20 - DAC2 Chan 4 0x44 AO Channel 9 Gain (at-ao-10 only) * DAC2 Chan 5 0x45 Reserved * DAC2 Chan 6 0x46 Reserved * DAC2 Chan 7 0x47 Reserved */ static int atao_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); if (insn->n) { unsigned int val = data[insn->n - 1]; unsigned int bitstring = ((chan & 0x7) << 8) | val; unsigned int bits; int bit; /* write the channel and last data value to the caldac */ /* clock the bitstring to the caldac; MSB -> LSB */ for (bit = 1 << 10; bit; bit >>= 1) { bits = (bit & bitstring) ? ATAO_CFG2_SDATA : 0; outw(bits, dev->iobase + ATAO_CFG2_REG); outw(bits | ATAO_CFG2_SCLK, dev->iobase + ATAO_CFG2_REG); } /* strobe the caldac to load the value */ outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG); outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG); s->readback[chan] = val; } return insn->n; } static void atao_reset(struct comedi_device *dev) { struct atao_private *devpriv = dev->private; unsigned long timer_base = dev->iobase + ATAO_82C53_BASE; /* This is the reset sequence described in the manual */ devpriv->cfg1 = 0; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG); /* Put outputs of counter 1 and counter 2 in a high state */ i8254_set_mode(timer_base, 0, 0, I8254_MODE4 | I8254_BINARY); i8254_set_mode(timer_base, 0, 1, I8254_MODE4 | I8254_BINARY); i8254_write(timer_base, 0, 0, 0x0003); outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG); devpriv->cfg3 = 0; outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG); inw(dev->iobase + ATAO_FIFO_CLEAR_REG); atao_select_reg_group(dev, 1); outw(0, dev->iobase + ATAO_2_INT1CLR_REG); outw(0, dev->iobase + ATAO_2_INT2CLR_REG); outw(0, dev->iobase + ATAO_2_DMATCCLR_REG); atao_select_reg_group(dev, 0); } static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it) { const struct atao_board *board = dev->board_ptr; struct atao_private *devpriv; struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], 0x20); if (ret) return ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; /* Analog Output subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = board->n_ao_chans; s->maxdata = 0x0fff; s->range_table = it->options[3] ? &range_unipolar10 : &range_bipolar10; s->insn_write = atao_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; /* Digital I/O subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = atao_dio_insn_bits; s->insn_config = atao_dio_insn_config; /* caldac subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL; s->n_chan = (board->n_ao_chans * 2) + 1; s->maxdata = 0xff; s->insn_write = atao_calib_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; /* EEPROM subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_UNUSED; atao_reset(dev); return 0; } static struct comedi_driver ni_at_ao_driver = { .driver_name = "ni_at_ao", .module = THIS_MODULE, .attach = atao_attach, .detach = comedi_legacy_detach, .board_name = &atao_boards[0].name, .offset = sizeof(struct atao_board), .num_names = ARRAY_SIZE(atao_boards), }; module_comedi_driver(ni_at_ao_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for NI AT-AO-6/10 boards"); MODULE_LICENSE("GPL");
gpl-2.0
wikimedia/operations-debs-linux
drivers/staging/comedi/drivers/rtd520.c
77
41020
/* * comedi/drivers/rtd520.c * Comedi driver for Real Time Devices (RTD) PCI4520/DM7520 * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2001 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: rtd520 * Description: Real Time Devices PCI4520/DM7520 * Devices: (Real Time Devices) DM7520HR-1 [DM7520] * (Real Time Devices) DM7520HR-8 [DM7520] * (Real Time Devices) PCI4520 [PCI4520] * (Real Time Devices) PCI4520-8 [PCI4520] * Author: Dan Christian * Status: Works. Only tested on DM7520-8. Not SMP safe. * * Configuration options: not applicable, uses PCI auto config */ /* * Created by Dan Christian, NASA Ames Research Center. * * The PCI4520 is a PCI card. The DM7520 is a PC/104-plus card. * Both have: * 8/16 12 bit ADC with FIFO and channel gain table * 8 bits high speed digital out (for external MUX) (or 8 in or 8 out) * 8 bits high speed digital in with FIFO and interrupt on change (or 8 IO) * 2 12 bit DACs with FIFOs * 2 bits output * 2 bits input * bus mastering DMA * timers: ADC sample, pacer, burst, about, delay, DA1, DA2 * sample counter * 3 user timer/counters (8254) * external interrupt * * The DM7520 has slightly fewer features (fewer gain steps). * * These boards can support external multiplexors and multi-board * synchronization, but this driver doesn't support that. * * Board docs: http://www.rtdusa.com/PC104/DM/analog%20IO/dm7520.htm * Data sheet: http://www.rtdusa.com/pdf/dm7520.pdf * Example source: http://www.rtdusa.com/examples/dm/dm7520.zip * Call them and ask for the register level manual. * PCI chip: http://www.plxtech.com/products/io/pci9080 * * Notes: * This board is memory mapped. There is some IO stuff, but it isn't needed. * * I use a pretty loose naming style within the driver (rtd_blah). * All externally visible names should be rtd520_blah. * I use camelCase for structures (and inside them). * I may also use upper CamelCase for function names (old habit). * * This board is somewhat related to the RTD PCI4400 board. * * I borrowed heavily from the ni_mio_common, ni_atmio16d, mite, and * das1800, since they have the best documented code. Driver cb_pcidas64.c * uses the same DMA controller. * * As far as I can tell, the About interrupt doesn't work if Sample is * also enabled. It turns out that About really isn't needed, since * we always count down samples read. * * There was some timer/counter code, but it didn't follow the right API. */ /* * driver status: * * Analog-In supports instruction and command mode. * * With DMA, you can sample at 1.15Mhz with 70% idle on a 400Mhz K6-2 * (single channel, 64K read buffer). I get random system lockups when * using DMA with ALI-15xx based systems. I haven't been able to test * any other chipsets. The lockups happen soon after the start of an * acquistion, not in the middle of a long run. * * Without DMA, you can do 620Khz sampling with 20% idle on a 400Mhz K6-2 * (with a 256K read buffer). * * Digital-IO and Analog-Out only support instruction mode. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/interrupt.h> #include "../comedidev.h" #include "comedi_fc.h" #include "plx9080.h" /* * Local Address Space 0 Offsets */ #define LAS0_USER_IO 0x0008 /* User I/O */ #define LAS0_ADC 0x0010 /* FIFO Status/Software A/D Start */ #define FS_DAC1_NOT_EMPTY (1 << 0) /* DAC1 FIFO not empty */ #define FS_DAC1_HEMPTY (1 << 1) /* DAC1 FIFO half empty */ #define FS_DAC1_NOT_FULL (1 << 2) /* DAC1 FIFO not full */ #define FS_DAC2_NOT_EMPTY (1 << 4) /* DAC2 FIFO not empty */ #define FS_DAC2_HEMPTY (1 << 5) /* DAC2 FIFO half empty */ #define FS_DAC2_NOT_FULL (1 << 6) /* DAC2 FIFO not full */ #define FS_ADC_NOT_EMPTY (1 << 8) /* ADC FIFO not empty */ #define FS_ADC_HEMPTY (1 << 9) /* ADC FIFO half empty */ #define FS_ADC_NOT_FULL (1 << 10) /* ADC FIFO not full */ #define FS_DIN_NOT_EMPTY (1 << 12) /* DIN FIFO not empty */ #define FS_DIN_HEMPTY (1 << 13) /* DIN FIFO half empty */ #define FS_DIN_NOT_FULL (1 << 14) /* DIN FIFO not full */ #define LAS0_DAC1 0x0014 /* Software D/A1 Update (w) */ #define LAS0_DAC2 0x0018 /* Software D/A2 Update (w) */ #define LAS0_DAC 0x0024 /* Software Simultaneous Update (w) */ #define LAS0_PACER 0x0028 /* Software Pacer Start/Stop */ #define LAS0_TIMER 0x002c /* Timer Status/HDIN Software Trig. */ #define LAS0_IT 0x0030 /* Interrupt Status/Enable */ #define IRQM_ADC_FIFO_WRITE (1 << 0) /* ADC FIFO Write */ #define IRQM_CGT_RESET (1 << 1) /* Reset CGT */ #define IRQM_CGT_PAUSE (1 << 3) /* Pause CGT */ #define IRQM_ADC_ABOUT_CNT (1 << 4) /* About Counter out */ #define IRQM_ADC_DELAY_CNT (1 << 5) /* Delay Counter out */ #define IRQM_ADC_SAMPLE_CNT (1 << 6) /* ADC Sample Counter */ #define IRQM_DAC1_UCNT (1 << 7) /* DAC1 Update Counter */ #define IRQM_DAC2_UCNT (1 << 8) /* DAC2 Update Counter */ #define IRQM_UTC1 (1 << 9) /* User TC1 out */ #define IRQM_UTC1_INV (1 << 10) /* User TC1 out, inverted */ #define IRQM_UTC2 (1 << 11) /* User TC2 out */ #define IRQM_DIGITAL_IT (1 << 12) /* Digital Interrupt */ #define IRQM_EXTERNAL_IT (1 << 13) /* External Interrupt */ #define IRQM_ETRIG_RISING (1 << 14) /* Ext Trigger rising-edge */ #define IRQM_ETRIG_FALLING (1 << 15) /* Ext Trigger falling-edge */ #define LAS0_CLEAR 0x0034 /* Clear/Set Interrupt Clear Mask */ #define LAS0_OVERRUN 0x0038 /* Pending interrupts/Clear Overrun */ #define LAS0_PCLK 0x0040 /* Pacer Clock (24bit) */ #define LAS0_BCLK 0x0044 /* Burst Clock (10bit) */ #define LAS0_ADC_SCNT 0x0048 /* A/D Sample counter (10bit) */ #define LAS0_DAC1_UCNT 0x004c /* D/A1 Update counter (10 bit) */ #define LAS0_DAC2_UCNT 0x0050 /* D/A2 Update counter (10 bit) */ #define LAS0_DCNT 0x0054 /* Delay counter (16 bit) */ #define LAS0_ACNT 0x0058 /* About counter (16 bit) */ #define LAS0_DAC_CLK 0x005c /* DAC clock (16bit) */ #define LAS0_UTC0 0x0060 /* 8254 TC Counter 0 */ #define LAS0_UTC1 0x0064 /* 8254 TC Counter 1 */ #define LAS0_UTC2 0x0068 /* 8254 TC Counter 2 */ #define LAS0_UTC_CTRL 0x006c /* 8254 TC Control */ #define LAS0_DIO0 0x0070 /* Digital I/O Port 0 */ #define LAS0_DIO1 0x0074 /* Digital I/O Port 1 */ #define LAS0_DIO0_CTRL 0x0078 /* Digital I/O Control */ #define LAS0_DIO_STATUS 0x007c /* Digital I/O Status */ #define LAS0_BOARD_RESET 0x0100 /* Board reset */ #define LAS0_DMA0_SRC 0x0104 /* DMA 0 Sources select */ #define LAS0_DMA1_SRC 0x0108 /* DMA 1 Sources select */ #define LAS0_ADC_CONVERSION 0x010c /* A/D Conversion Signal select */ #define LAS0_BURST_START 0x0110 /* Burst Clock Start Trigger select */ #define LAS0_PACER_START 0x0114 /* Pacer Clock Start Trigger select */ #define LAS0_PACER_STOP 0x0118 /* Pacer Clock Stop Trigger select */ #define LAS0_ACNT_STOP_ENABLE 0x011c /* About Counter Stop Enable */ #define LAS0_PACER_REPEAT 0x0120 /* Pacer Start Trigger Mode select */ #define LAS0_DIN_START 0x0124 /* HiSpd DI Sampling Signal select */ #define LAS0_DIN_FIFO_CLEAR 0x0128 /* Digital Input FIFO Clear */ #define LAS0_ADC_FIFO_CLEAR 0x012c /* A/D FIFO Clear */ #define LAS0_CGT_WRITE 0x0130 /* Channel Gain Table Write */ #define LAS0_CGL_WRITE 0x0134 /* Channel Gain Latch Write */ #define LAS0_CG_DATA 0x0138 /* Digital Table Write */ #define LAS0_CGT_ENABLE 0x013c /* Channel Gain Table Enable */ #define LAS0_CG_ENABLE 0x0140 /* Digital Table Enable */ #define LAS0_CGT_PAUSE 0x0144 /* Table Pause Enable */ #define LAS0_CGT_RESET 0x0148 /* Reset Channel Gain Table */ #define LAS0_CGT_CLEAR 0x014c /* Clear Channel Gain Table */ #define LAS0_DAC1_CTRL 0x0150 /* D/A1 output type/range */ #define LAS0_DAC1_SRC 0x0154 /* D/A1 update source */ #define LAS0_DAC1_CYCLE 0x0158 /* D/A1 cycle mode */ #define LAS0_DAC1_RESET 0x015c /* D/A1 FIFO reset */ #define LAS0_DAC1_FIFO_CLEAR 0x0160 /* D/A1 FIFO clear */ #define LAS0_DAC2_CTRL 0x0164 /* D/A2 output type/range */ #define LAS0_DAC2_SRC 0x0168 /* D/A2 update source */ #define LAS0_DAC2_CYCLE 0x016c /* D/A2 cycle mode */ #define LAS0_DAC2_RESET 0x0170 /* D/A2 FIFO reset */ #define LAS0_DAC2_FIFO_CLEAR 0x0174 /* D/A2 FIFO clear */ #define LAS0_ADC_SCNT_SRC 0x0178 /* A/D Sample Counter Source select */ #define LAS0_PACER_SELECT 0x0180 /* Pacer Clock select */ #define LAS0_SBUS0_SRC 0x0184 /* SyncBus 0 Source select */ #define LAS0_SBUS0_ENABLE 0x0188 /* SyncBus 0 enable */ #define LAS0_SBUS1_SRC 0x018c /* SyncBus 1 Source select */ #define LAS0_SBUS1_ENABLE 0x0190 /* SyncBus 1 enable */ #define LAS0_SBUS2_SRC 0x0198 /* SyncBus 2 Source select */ #define LAS0_SBUS2_ENABLE 0x019c /* SyncBus 2 enable */ #define LAS0_ETRG_POLARITY 0x01a4 /* Ext. Trigger polarity select */ #define LAS0_EINT_POLARITY 0x01a8 /* Ext. Interrupt polarity select */ #define LAS0_UTC0_CLOCK 0x01ac /* UTC0 Clock select */ #define LAS0_UTC0_GATE 0x01b0 /* UTC0 Gate select */ #define LAS0_UTC1_CLOCK 0x01b4 /* UTC1 Clock select */ #define LAS0_UTC1_GATE 0x01b8 /* UTC1 Gate select */ #define LAS0_UTC2_CLOCK 0x01bc /* UTC2 Clock select */ #define LAS0_UTC2_GATE 0x01c0 /* UTC2 Gate select */ #define LAS0_UOUT0_SELECT 0x01c4 /* User Output 0 source select */ #define LAS0_UOUT1_SELECT 0x01c8 /* User Output 1 source select */ #define LAS0_DMA0_RESET 0x01cc /* DMA0 Request state machine reset */ #define LAS0_DMA1_RESET 0x01d0 /* DMA1 Request state machine reset */ /* * Local Address Space 1 Offsets */ #define LAS1_ADC_FIFO 0x0000 /* A/D FIFO (16bit) */ #define LAS1_HDIO_FIFO 0x0004 /* HiSpd DI FIFO (16bit) */ #define LAS1_DAC1_FIFO 0x0008 /* D/A1 FIFO (16bit) */ #define LAS1_DAC2_FIFO 0x000c /* D/A2 FIFO (16bit) */ /*====================================================================== Driver specific stuff (tunable) ======================================================================*/ /* We really only need 2 buffers. More than that means being much smarter about knowing which ones are full. */ #define DMA_CHAIN_COUNT 2 /* max DMA segments/buffers in a ring (min 2) */ /* Target period for periodic transfers. This sets the user read latency. */ /* Note: There are certain rates where we give this up and transfer 1/2 FIFO */ /* If this is too low, efficiency is poor */ #define TRANS_TARGET_PERIOD 10000000 /* 10 ms (in nanoseconds) */ /* Set a practical limit on how long a list to support (affects memory use) */ /* The board support a channel list up to the FIFO length (1K or 8K) */ #define RTD_MAX_CHANLIST 128 /* max channel list that we allow */ /*====================================================================== Board specific stuff ======================================================================*/ #define RTD_CLOCK_RATE 8000000 /* 8Mhz onboard clock */ #define RTD_CLOCK_BASE 125 /* clock period in ns */ /* Note: these speed are slower than the spec, but fit the counter resolution*/ #define RTD_MAX_SPEED 1625 /* when sampling, in nanoseconds */ /* max speed if we don't have to wait for settling */ #define RTD_MAX_SPEED_1 875 /* if single channel, in nanoseconds */ #define RTD_MIN_SPEED 2097151875 /* (24bit counter) in nanoseconds */ /* min speed when only 1 channel (no burst counter) */ #define RTD_MIN_SPEED_1 5000000 /* 200Hz, in nanoseconds */ /* Setup continuous ring of 1/2 FIFO transfers. See RTD manual p91 */ #define DMA_MODE_BITS (\ PLX_LOCAL_BUS_16_WIDE_BITS \ | PLX_DMA_EN_READYIN_BIT \ | PLX_DMA_LOCAL_BURST_EN_BIT \ | PLX_EN_CHAIN_BIT \ | PLX_DMA_INTR_PCI_BIT \ | PLX_LOCAL_ADDR_CONST_BIT \ | PLX_DEMAND_MODE_BIT) #define DMA_TRANSFER_BITS (\ /* descriptors in PCI memory*/ PLX_DESC_IN_PCI_BIT \ /* interrupt at end of block */ | PLX_INTR_TERM_COUNT \ /* from board to PCI */ | PLX_XFER_LOCAL_TO_PCI) /*====================================================================== Comedi specific stuff ======================================================================*/ /* * The board has 3 input modes and the gains of 1,2,4,...32 (, 64, 128) */ static const struct comedi_lrange rtd_ai_7520_range = { 18, { /* +-5V input range gain steps */ BIP_RANGE(5.0), BIP_RANGE(5.0 / 2), BIP_RANGE(5.0 / 4), BIP_RANGE(5.0 / 8), BIP_RANGE(5.0 / 16), BIP_RANGE(5.0 / 32), /* +-10V input range gain steps */ BIP_RANGE(10.0), BIP_RANGE(10.0 / 2), BIP_RANGE(10.0 / 4), BIP_RANGE(10.0 / 8), BIP_RANGE(10.0 / 16), BIP_RANGE(10.0 / 32), /* +10V input range gain steps */ UNI_RANGE(10.0), UNI_RANGE(10.0 / 2), UNI_RANGE(10.0 / 4), UNI_RANGE(10.0 / 8), UNI_RANGE(10.0 / 16), UNI_RANGE(10.0 / 32), } }; /* PCI4520 has two more gains (6 more entries) */ static const struct comedi_lrange rtd_ai_4520_range = { 24, { /* +-5V input range gain steps */ BIP_RANGE(5.0), BIP_RANGE(5.0 / 2), BIP_RANGE(5.0 / 4), BIP_RANGE(5.0 / 8), BIP_RANGE(5.0 / 16), BIP_RANGE(5.0 / 32), BIP_RANGE(5.0 / 64), BIP_RANGE(5.0 / 128), /* +-10V input range gain steps */ BIP_RANGE(10.0), BIP_RANGE(10.0 / 2), BIP_RANGE(10.0 / 4), BIP_RANGE(10.0 / 8), BIP_RANGE(10.0 / 16), BIP_RANGE(10.0 / 32), BIP_RANGE(10.0 / 64), BIP_RANGE(10.0 / 128), /* +10V input range gain steps */ UNI_RANGE(10.0), UNI_RANGE(10.0 / 2), UNI_RANGE(10.0 / 4), UNI_RANGE(10.0 / 8), UNI_RANGE(10.0 / 16), UNI_RANGE(10.0 / 32), UNI_RANGE(10.0 / 64), UNI_RANGE(10.0 / 128), } }; /* Table order matches range values */ static const struct comedi_lrange rtd_ao_range = { 4, { UNI_RANGE(5), UNI_RANGE(10), BIP_RANGE(5), BIP_RANGE(10), } }; enum rtd_boardid { BOARD_DM7520, BOARD_PCI4520, }; struct rtd_boardinfo { const char *name; int range_bip10; /* start of +-10V range */ int range_uni10; /* start of +10V range */ const struct comedi_lrange *ai_range; }; static const struct rtd_boardinfo rtd520Boards[] = { [BOARD_DM7520] = { .name = "DM7520", .range_bip10 = 6, .range_uni10 = 12, .ai_range = &rtd_ai_7520_range, }, [BOARD_PCI4520] = { .name = "PCI4520", .range_bip10 = 8, .range_uni10 = 16, .ai_range = &rtd_ai_4520_range, }, }; struct rtd_private { /* memory mapped board structures */ void __iomem *las1; void __iomem *lcfg; long ai_count; /* total transfer size (samples) */ int xfer_count; /* # to transfer data. 0->1/2FIFO */ int flags; /* flag event modes */ unsigned fifosz; }; /* bit defines for "flags" */ #define SEND_EOS 0x01 /* send End Of Scan events */ #define DMA0_ACTIVE 0x02 /* DMA0 is active */ #define DMA1_ACTIVE 0x04 /* DMA1 is active */ /* Given a desired period and the clock period (both in ns), return the proper counter value (divider-1). Sets the original period to be the true value. Note: you have to check if the value is larger than the counter range! */ static int rtd_ns_to_timer_base(unsigned int *nanosec, unsigned int flags, int base) { int divider; switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: divider = (*nanosec + base / 2) / base; break; case CMDF_ROUND_DOWN: divider = (*nanosec) / base; break; case CMDF_ROUND_UP: divider = (*nanosec + base - 1) / base; break; } if (divider < 2) divider = 2; /* min is divide by 2 */ /* Note: we don't check for max, because different timers have different ranges */ *nanosec = base * divider; return divider - 1; /* countdown is divisor+1 */ } /* Given a desired period (in ns), return the proper counter value (divider-1) for the internal clock. Sets the original period to be the true value. */ static int rtd_ns_to_timer(unsigned int *ns, unsigned int flags) { return rtd_ns_to_timer_base(ns, flags, RTD_CLOCK_BASE); } /* Convert a single comedi channel-gain entry to a RTD520 table entry */ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev, unsigned int chanspec, int index) { const struct rtd_boardinfo *board = dev->board_ptr; unsigned int chan = CR_CHAN(chanspec); unsigned int range = CR_RANGE(chanspec); unsigned int aref = CR_AREF(chanspec); unsigned short r = 0; r |= chan & 0xf; /* Note: we also setup the channel list bipolar flag array */ if (range < board->range_bip10) { /* +-5 range */ r |= 0x000; r |= (range & 0x7) << 4; } else if (range < board->range_uni10) { /* +-10 range */ r |= 0x100; r |= ((range - board->range_bip10) & 0x7) << 4; } else { /* +10 range */ r |= 0x200; r |= ((range - board->range_uni10) & 0x7) << 4; } switch (aref) { case AREF_GROUND: /* on-board ground */ break; case AREF_COMMON: r |= 0x80; /* ref external analog common */ break; case AREF_DIFF: r |= 0x400; /* differential inputs */ break; case AREF_OTHER: /* ??? */ break; } return r; } /* Setup the channel-gain table from a comedi list */ static void rtd_load_channelgain_list(struct comedi_device *dev, unsigned int n_chan, unsigned int *list) { if (n_chan > 1) { /* setup channel gain table */ int ii; writel(0, dev->mmio + LAS0_CGT_CLEAR); writel(1, dev->mmio + LAS0_CGT_ENABLE); for (ii = 0; ii < n_chan; ii++) { writel(rtd_convert_chan_gain(dev, list[ii], ii), dev->mmio + LAS0_CGT_WRITE); } } else { /* just use the channel gain latch */ writel(0, dev->mmio + LAS0_CGT_ENABLE); writel(rtd_convert_chan_gain(dev, list[0], 0), dev->mmio + LAS0_CGL_WRITE); } } /* determine fifo size by doing adc conversions until the fifo half empty status flag clears */ static int rtd520_probe_fifo_depth(struct comedi_device *dev) { unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND); unsigned i; static const unsigned limit = 0x2000; unsigned fifo_size = 0; writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); rtd_load_channelgain_list(dev, 1, &chanspec); /* ADC conversion trigger source: SOFTWARE */ writel(0, dev->mmio + LAS0_ADC_CONVERSION); /* convert samples */ for (i = 0; i < limit; ++i) { unsigned fifo_status; /* trigger conversion */ writew(0, dev->mmio + LAS0_ADC); udelay(1); fifo_status = readl(dev->mmio + LAS0_ADC); if ((fifo_status & FS_ADC_HEMPTY) == 0) { fifo_size = 2 * i; break; } } if (i == limit) { dev_info(dev->class_dev, "failed to probe fifo size.\n"); return -EIO; } writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); if (fifo_size != 0x400 && fifo_size != 0x2000) { dev_info(dev->class_dev, "unexpected fifo size of %i, expected 1024 or 8192.\n", fifo_size); return -EIO; } return fifo_size; } static int rtd_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; status = readl(dev->mmio + LAS0_ADC); if (status & FS_ADC_NOT_EMPTY) return 0; return -EBUSY; } static int rtd_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct rtd_private *devpriv = dev->private; unsigned int range = CR_RANGE(insn->chanspec); int ret; int n; /* clear any old fifo data */ writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); /* write channel to multiplexer and clear channel gain table */ rtd_load_channelgain_list(dev, 1, &insn->chanspec); /* ADC conversion trigger source: SOFTWARE */ writel(0, dev->mmio + LAS0_ADC_CONVERSION); /* convert n samples */ for (n = 0; n < insn->n; n++) { unsigned short d; /* trigger conversion */ writew(0, dev->mmio + LAS0_ADC); ret = comedi_timeout(dev, s, insn, rtd_ai_eoc, 0); if (ret) return ret; /* read data */ d = readw(devpriv->las1 + LAS1_ADC_FIFO); d = d >> 3; /* low 3 bits are marker lines */ /* convert bipolar data to comedi unsigned data */ if (comedi_range_is_bipolar(s, range)) d = comedi_offset_munge(s, d); data[n] = d & s->maxdata; } /* return the number of samples read/written */ return n; } /* Get what we know is there.... Fast! This uses 1/2 the bus cycles of read_dregs (below). The manual claims that we can do a lword read, but it doesn't work here. */ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s, int count) { struct rtd_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int ii; for (ii = 0; ii < count; ii++) { unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]); unsigned short d; if (0 == devpriv->ai_count) { /* done */ d = readw(devpriv->las1 + LAS1_ADC_FIFO); continue; } d = readw(devpriv->las1 + LAS1_ADC_FIFO); d = d >> 3; /* low 3 bits are marker lines */ /* convert bipolar data to comedi unsigned data */ if (comedi_range_is_bipolar(s, range)) d = comedi_offset_munge(s, d); d &= s->maxdata; if (!comedi_buf_write_samples(s, &d, 1)) return -1; if (devpriv->ai_count > 0) /* < 0, means read forever */ devpriv->ai_count--; } return 0; } /* Handle all rtd520 interrupts. Runs atomically and is never re-entered. This is a "slow handler"; other interrupts may be active. The data conversion may someday happen in a "bottom half". */ static irqreturn_t rtd_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct rtd_private *devpriv = dev->private; u32 overrun; u16 status; u16 fifo_status; if (!dev->attached) return IRQ_NONE; fifo_status = readl(dev->mmio + LAS0_ADC); /* check for FIFO full, this automatically halts the ADC! */ if (!(fifo_status & FS_ADC_NOT_FULL)) /* 0 -> full */ goto xfer_abort; status = readw(dev->mmio + LAS0_IT); /* if interrupt was not caused by our board, or handled above */ if (0 == status) return IRQ_HANDLED; if (status & IRQM_ADC_ABOUT_CNT) { /* sample count -> read FIFO */ /* * since the priority interrupt controller may have queued * a sample counter interrupt, even though we have already * finished, we must handle the possibility that there is * no data here */ if (!(fifo_status & FS_ADC_HEMPTY)) { /* FIFO half full */ if (ai_read_n(dev, s, devpriv->fifosz / 2) < 0) goto xfer_abort; if (0 == devpriv->ai_count) goto xfer_done; } else if (devpriv->xfer_count > 0) { if (fifo_status & FS_ADC_NOT_EMPTY) { /* FIFO not empty */ if (ai_read_n(dev, s, devpriv->xfer_count) < 0) goto xfer_abort; if (0 == devpriv->ai_count) goto xfer_done; } } } overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff; if (overrun) goto xfer_abort; /* clear the interrupt */ writew(status, dev->mmio + LAS0_CLEAR); readw(dev->mmio + LAS0_CLEAR); comedi_handle_events(dev, s); return IRQ_HANDLED; xfer_abort: s->async->events |= COMEDI_CB_ERROR; xfer_done: s->async->events |= COMEDI_CB_EOA; /* clear the interrupt */ status = readw(dev->mmio + LAS0_IT); writew(status, dev->mmio + LAS0_CLEAR); readw(dev->mmio + LAS0_CLEAR); fifo_status = readl(dev->mmio + LAS0_ADC); overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff; comedi_handle_events(dev, s); return IRQ_HANDLED; } /* cmdtest tests a particular command to see if it is valid. Using the cmdtest ioctl, a user can create a valid cmd and then have it executed by the cmd ioctl (asynchronously). cmdtest returns 1,2,3,4 or 0, depending on which tests the command passes. */ static int rtd_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int arg; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->convert_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); if (cmd->scan_begin_src == TRIG_TIMER) { /* Note: these are time periods, not actual rates */ if (1 == cmd->chanlist_len) { /* no scanning */ if (cfc_check_trigger_arg_min(&cmd->scan_begin_arg, RTD_MAX_SPEED_1)) { rtd_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_UP); err |= -EINVAL; } if (cfc_check_trigger_arg_max(&cmd->scan_begin_arg, RTD_MIN_SPEED_1)) { rtd_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_DOWN); err |= -EINVAL; } } else { if (cfc_check_trigger_arg_min(&cmd->scan_begin_arg, RTD_MAX_SPEED)) { rtd_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_UP); err |= -EINVAL; } if (cfc_check_trigger_arg_max(&cmd->scan_begin_arg, RTD_MIN_SPEED)) { rtd_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_DOWN); err |= -EINVAL; } } } else { /* external trigger */ /* should be level/edge, hi/lo specification here */ /* should specify multiple external triggers */ err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9); } if (cmd->convert_src == TRIG_TIMER) { if (1 == cmd->chanlist_len) { /* no scanning */ if (cfc_check_trigger_arg_min(&cmd->convert_arg, RTD_MAX_SPEED_1)) { rtd_ns_to_timer(&cmd->convert_arg, CMDF_ROUND_UP); err |= -EINVAL; } if (cfc_check_trigger_arg_max(&cmd->convert_arg, RTD_MIN_SPEED_1)) { rtd_ns_to_timer(&cmd->convert_arg, CMDF_ROUND_DOWN); err |= -EINVAL; } } else { if (cfc_check_trigger_arg_min(&cmd->convert_arg, RTD_MAX_SPEED)) { rtd_ns_to_timer(&cmd->convert_arg, CMDF_ROUND_UP); err |= -EINVAL; } if (cfc_check_trigger_arg_max(&cmd->convert_arg, RTD_MIN_SPEED)) { rtd_ns_to_timer(&cmd->convert_arg, CMDF_ROUND_DOWN); err |= -EINVAL; } } } else { /* external trigger */ /* see above */ err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9); } err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->scan_begin_arg; rtd_ns_to_timer(&arg, cmd->flags); err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, arg); } if (cmd->convert_src == TRIG_TIMER) { arg = cmd->convert_arg; rtd_ns_to_timer(&arg, cmd->flags); err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg); if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->convert_arg * cmd->scan_end_arg; err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, arg); } } if (err) return 4; return 0; } /* Execute a analog in command with many possible triggering options. The data get stored in the async structure of the subdevice. This is usually done by an interrupt handler. Userland gets to the data using read calls. */ static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct rtd_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int timer; /* stop anything currently running */ /* pacer stop source: SOFTWARE */ writel(0, dev->mmio + LAS0_PACER_STOP); writel(0, dev->mmio + LAS0_PACER); /* stop pacer */ writel(0, dev->mmio + LAS0_ADC_CONVERSION); writew(0, dev->mmio + LAS0_IT); writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); writel(0, dev->mmio + LAS0_OVERRUN); /* start configuration */ /* load channel list and reset CGT */ rtd_load_channelgain_list(dev, cmd->chanlist_len, cmd->chanlist); /* setup the common case and override if needed */ if (cmd->chanlist_len > 1) { /* pacer start source: SOFTWARE */ writel(0, dev->mmio + LAS0_PACER_START); /* burst trigger source: PACER */ writel(1, dev->mmio + LAS0_BURST_START); /* ADC conversion trigger source: BURST */ writel(2, dev->mmio + LAS0_ADC_CONVERSION); } else { /* single channel */ /* pacer start source: SOFTWARE */ writel(0, dev->mmio + LAS0_PACER_START); /* ADC conversion trigger source: PACER */ writel(1, dev->mmio + LAS0_ADC_CONVERSION); } writel((devpriv->fifosz / 2 - 1) & 0xffff, dev->mmio + LAS0_ACNT); if (TRIG_TIMER == cmd->scan_begin_src) { /* scan_begin_arg is in nanoseconds */ /* find out how many samples to wait before transferring */ if (cmd->flags & CMDF_WAKE_EOS) { /* * this may generate un-sustainable interrupt rates * the application is responsible for doing the * right thing */ devpriv->xfer_count = cmd->chanlist_len; devpriv->flags |= SEND_EOS; } else { /* arrange to transfer data periodically */ devpriv->xfer_count = (TRANS_TARGET_PERIOD * cmd->chanlist_len) / cmd->scan_begin_arg; if (devpriv->xfer_count < cmd->chanlist_len) { /* transfer after each scan (and avoid 0) */ devpriv->xfer_count = cmd->chanlist_len; } else { /* make a multiple of scan length */ devpriv->xfer_count = (devpriv->xfer_count + cmd->chanlist_len - 1) / cmd->chanlist_len; devpriv->xfer_count *= cmd->chanlist_len; } devpriv->flags |= SEND_EOS; } if (devpriv->xfer_count >= (devpriv->fifosz / 2)) { /* out of counter range, use 1/2 fifo instead */ devpriv->xfer_count = 0; devpriv->flags &= ~SEND_EOS; } else { /* interrupt for each transfer */ writel((devpriv->xfer_count - 1) & 0xffff, dev->mmio + LAS0_ACNT); } } else { /* unknown timing, just use 1/2 FIFO */ devpriv->xfer_count = 0; devpriv->flags &= ~SEND_EOS; } /* pacer clock source: INTERNAL 8MHz */ writel(1, dev->mmio + LAS0_PACER_SELECT); /* just interrupt, don't stop */ writel(1, dev->mmio + LAS0_ACNT_STOP_ENABLE); /* BUG??? these look like enumerated values, but they are bit fields */ /* First, setup when to stop */ switch (cmd->stop_src) { case TRIG_COUNT: /* stop after N scans */ devpriv->ai_count = cmd->stop_arg * cmd->chanlist_len; if ((devpriv->xfer_count > 0) && (devpriv->xfer_count > devpriv->ai_count)) { devpriv->xfer_count = devpriv->ai_count; } break; case TRIG_NONE: /* stop when cancel is called */ devpriv->ai_count = -1; /* read forever */ break; } /* Scan timing */ switch (cmd->scan_begin_src) { case TRIG_TIMER: /* periodic scanning */ timer = rtd_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_NEAREST); /* set PACER clock */ writel(timer & 0xffffff, dev->mmio + LAS0_PCLK); break; case TRIG_EXT: /* pacer start source: EXTERNAL */ writel(1, dev->mmio + LAS0_PACER_START); break; } /* Sample timing within a scan */ switch (cmd->convert_src) { case TRIG_TIMER: /* periodic */ if (cmd->chanlist_len > 1) { /* only needed for multi-channel */ timer = rtd_ns_to_timer(&cmd->convert_arg, CMDF_ROUND_NEAREST); /* setup BURST clock */ writel(timer & 0x3ff, dev->mmio + LAS0_BCLK); } break; case TRIG_EXT: /* external */ /* burst trigger source: EXTERNAL */ writel(2, dev->mmio + LAS0_BURST_START); break; } /* end configuration */ /* This doesn't seem to work. There is no way to clear an interrupt that the priority controller has queued! */ writew(~0, dev->mmio + LAS0_CLEAR); readw(dev->mmio + LAS0_CLEAR); /* TODO: allow multiple interrupt sources */ if (devpriv->xfer_count > 0) /* transfer every N samples */ writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT); else /* 1/2 FIFO transfers */ writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT); /* BUG: start_src is ASSUMED to be TRIG_NOW */ /* BUG? it seems like things are running before the "start" */ readl(dev->mmio + LAS0_PACER); /* start pacer */ return 0; } /* Stop a running data acquisition. */ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct rtd_private *devpriv = dev->private; u32 overrun; u16 status; /* pacer stop source: SOFTWARE */ writel(0, dev->mmio + LAS0_PACER_STOP); writel(0, dev->mmio + LAS0_PACER); /* stop pacer */ writel(0, dev->mmio + LAS0_ADC_CONVERSION); writew(0, dev->mmio + LAS0_IT); devpriv->ai_count = 0; /* stop and don't transfer any more */ status = readw(dev->mmio + LAS0_IT); overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff; writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); return 0; } static int rtd_ao_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int bit = (chan == 0) ? FS_DAC1_NOT_EMPTY : FS_DAC2_NOT_EMPTY; unsigned int status; status = readl(dev->mmio + LAS0_ADC); if (status & bit) return 0; return -EBUSY; } static int rtd_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct rtd_private *devpriv = dev->private; int i; int chan = CR_CHAN(insn->chanspec); int range = CR_RANGE(insn->chanspec); int ret; /* Configure the output range (table index matches the range values) */ writew(range & 7, dev->mmio + ((chan == 0) ? LAS0_DAC1_CTRL : LAS0_DAC2_CTRL)); /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; ++i) { int val = data[i] << 3; /* VERIFY: comedi range and offset conversions */ if ((range > 1) /* bipolar */ && (data[i] < 2048)) { /* offset and sign extend */ val = (((int)data[i]) - 2048) << 3; } else { /* unipolor */ val = data[i] << 3; } /* a typical programming sequence */ writew(val, devpriv->las1 + ((chan == 0) ? LAS1_DAC1_FIFO : LAS1_DAC2_FIFO)); writew(0, dev->mmio + ((chan == 0) ? LAS0_DAC1 : LAS0_DAC2)); s->readback[chan] = data[i]; ret = comedi_timeout(dev, s, insn, rtd_ao_eoc, 0); if (ret) return ret; } /* return the number of samples read/written */ return i; } static int rtd_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) writew(s->state & 0xff, dev->mmio + LAS0_DIO0); data[1] = readw(dev->mmio + LAS0_DIO0) & 0xff; return insn->n; } static int rtd_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; /* TODO support digital match interrupts and strobes */ /* set direction */ writew(0x01, dev->mmio + LAS0_DIO_STATUS); writew(s->io_bits & 0xff, dev->mmio + LAS0_DIO0_CTRL); /* clear interrupts */ writew(0x00, dev->mmio + LAS0_DIO_STATUS); /* port1 can only be all input or all output */ /* there are also 2 user input lines and 2 user output lines */ return insn->n; } static void rtd_reset(struct comedi_device *dev) { struct rtd_private *devpriv = dev->private; writel(0, dev->mmio + LAS0_BOARD_RESET); udelay(100); /* needed? */ writel(0, devpriv->lcfg + PLX_INTRCS_REG); writew(0, dev->mmio + LAS0_IT); writew(~0, dev->mmio + LAS0_CLEAR); readw(dev->mmio + LAS0_CLEAR); } /* * initialize board, per RTD spec * also, initialize shadow registers */ static void rtd_init_board(struct comedi_device *dev) { rtd_reset(dev); writel(0, dev->mmio + LAS0_OVERRUN); writel(0, dev->mmio + LAS0_CGT_CLEAR); writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR); writel(0, dev->mmio + LAS0_DAC1_RESET); writel(0, dev->mmio + LAS0_DAC2_RESET); /* clear digital IO fifo */ writew(0, dev->mmio + LAS0_DIO_STATUS); writeb((0 << 6) | 0x30, dev->mmio + LAS0_UTC_CTRL); writeb((1 << 6) | 0x30, dev->mmio + LAS0_UTC_CTRL); writeb((2 << 6) | 0x30, dev->mmio + LAS0_UTC_CTRL); writeb((3 << 6) | 0x00, dev->mmio + LAS0_UTC_CTRL); /* TODO: set user out source ??? */ } /* The RTD driver does this */ static void rtd_pci_latency_quirk(struct comedi_device *dev, struct pci_dev *pcidev) { unsigned char pci_latency; pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < 32) { dev_info(dev->class_dev, "PCI latency changed from %d to %d\n", pci_latency, 32); pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, 32); } } static int rtd_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct rtd_boardinfo *board = NULL; struct rtd_private *devpriv; struct comedi_subdevice *s; int ret; if (context < ARRAY_SIZE(rtd520Boards)) board = &rtd520Boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; dev->mmio = pci_ioremap_bar(pcidev, 2); devpriv->las1 = pci_ioremap_bar(pcidev, 3); devpriv->lcfg = pci_ioremap_bar(pcidev, 0); if (!dev->mmio || !devpriv->las1 || !devpriv->lcfg) return -ENOMEM; rtd_pci_latency_quirk(dev, pcidev); if (pcidev->irq) { ret = request_irq(pcidev->irq, rtd_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; s = &dev->subdevices[0]; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | SDF_DIFF; s->n_chan = 16; s->maxdata = 0x0fff; s->range_table = board->ai_range; s->len_chanlist = RTD_MAX_CHANLIST; s->insn_read = rtd_ai_rinsn; if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->do_cmd = rtd_ai_cmd; s->do_cmdtest = rtd_ai_cmdtest; s->cancel = rtd_ai_cancel; } s = &dev->subdevices[1]; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 2; s->maxdata = 0x0fff; s->range_table = &rtd_ao_range; s->insn_write = rtd_ao_winsn; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; s = &dev->subdevices[2]; /* digital i/o subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; /* we only support port 0 right now. Ignoring port 1 and user IO */ s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = rtd_dio_insn_bits; s->insn_config = rtd_dio_insn_config; /* timer/counter subdevices (not currently supported) */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 3; s->maxdata = 0xffff; rtd_init_board(dev); ret = rtd520_probe_fifo_depth(dev); if (ret < 0) return ret; devpriv->fifosz = ret; if (dev->irq) writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG); return 0; } static void rtd_detach(struct comedi_device *dev) { struct rtd_private *devpriv = dev->private; if (devpriv) { /* Shut down any board ops by resetting it */ if (dev->mmio && devpriv->lcfg) rtd_reset(dev); if (dev->irq) { writel(readl(devpriv->lcfg + PLX_INTRCS_REG) & ~(ICS_PLIE | ICS_DMA0_E | ICS_DMA1_E), devpriv->lcfg + PLX_INTRCS_REG); free_irq(dev->irq, dev); } if (dev->mmio) iounmap(dev->mmio); if (devpriv->las1) iounmap(devpriv->las1); if (devpriv->lcfg) iounmap(devpriv->lcfg); } comedi_pci_disable(dev); } static struct comedi_driver rtd520_driver = { .driver_name = "rtd520", .module = THIS_MODULE, .auto_attach = rtd_auto_attach, .detach = rtd_detach, }; static int rtd520_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data); } static const struct pci_device_id rtd520_pci_table[] = { { PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 }, { PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 }, { 0 } }; MODULE_DEVICE_TABLE(pci, rtd520_pci_table); static struct pci_driver rtd520_pci_driver = { .name = "rtd520", .id_table = rtd520_pci_table, .probe = rtd520_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(rtd520_driver, rtd520_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
kendling/android_kernel_google_dragon
drivers/net/bonding/bond_debugfs.c
333
2940
#include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/netdevice.h> #include "bonding.h" #include "bond_alb.h" #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *bonding_debug_root; /* Show RLB hash table */ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) { struct bonding *bond = m->private; struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; if (BOND_MODE(bond) != BOND_MODE_ALB) return 0; seq_printf(m, "SourceIP DestinationIP " "Destination MAC DEV\n"); spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", &client_info->ip_src, &client_info->ip_dst, &client_info->mac_dst, client_info->slave->dev->name); } spin_unlock_bh(&bond->mode_lock); return 0; } static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file) { return single_open(file, bond_debug_rlb_hash_show, inode->i_private); } static const struct file_operations bond_debug_rlb_hash_fops = { .owner = THIS_MODULE, .open = bond_debug_rlb_hash_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void bond_debug_register(struct bonding *bond) { if (!bonding_debug_root) return; bond->debug_dir = debugfs_create_dir(bond->dev->name, bonding_debug_root); if (!bond->debug_dir) { netdev_warn(bond->dev, "failed to register to debugfs\n"); return; } debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir, bond, &bond_debug_rlb_hash_fops); } void bond_debug_unregister(struct bonding *bond) { if (!bonding_debug_root) return; debugfs_remove_recursive(bond->debug_dir); } void bond_debug_reregister(struct bonding *bond) { struct dentry *d; if (!bonding_debug_root) return; d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); if (d) { bond->debug_dir = d; } else { netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n"); bond_debug_unregister(bond); } } void bond_create_debugfs(void) { bonding_debug_root = debugfs_create_dir("bonding", NULL); if (!bonding_debug_root) { pr_warn("Warning: Cannot create bonding directory in debugfs\n"); } } void bond_destroy_debugfs(void) { debugfs_remove_recursive(bonding_debug_root); bonding_debug_root = NULL; } #else /* !CONFIG_DEBUG_FS */ void bond_debug_register(struct bonding *bond) { } void bond_debug_unregister(struct bonding *bond) { } void bond_debug_reregister(struct bonding *bond) { } void bond_create_debugfs(void) { } void bond_destroy_debugfs(void) { } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
dh-electronics/linux-am33x
drivers/media/usb/dvb-usb/dw2102.c
589
56017
/* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S600, S630, S650, S660, S480, S421, S632 * Prof 1100, 7500, * Geniatech SU3000, T220, * TechnoTrend S2-4600 Cards * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ts2020.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #include "m88rs2000.h" #include "tda18271.h" #include "cxd2820r.h" #include "m88ds3103.h" #include "ts2020.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif #ifndef USB_PID_DW2104 #define USB_PID_DW2104 0x2104 #endif #ifndef USB_PID_DW3101 #define USB_PID_DW3101 0x3101 #endif #ifndef USB_PID_CINERGY_S #define USB_PID_CINERGY_S 0x0064 #endif #ifndef USB_PID_TEVII_S630 #define USB_PID_TEVII_S630 0xd630 #endif #ifndef USB_PID_TEVII_S650 #define USB_PID_TEVII_S650 0xd650 #endif #ifndef USB_PID_TEVII_S660 #define USB_PID_TEVII_S660 0xd660 #endif #ifndef USB_PID_TEVII_S480_1 #define USB_PID_TEVII_S480_1 0xd481 #endif #ifndef USB_PID_TEVII_S480_2 #define USB_PID_TEVII_S480_2 0xd482 #endif #ifndef USB_PID_PROF_1100 #define USB_PID_PROF_1100 0xb012 #endif #ifndef USB_PID_TEVII_S421 #define USB_PID_TEVII_S421 0xd421 #endif #ifndef USB_PID_TEVII_S632 #define USB_PID_TEVII_S632 0xd632 #endif #ifndef USB_PID_GOTVIEW_SAT_HD #define USB_PID_GOTVIEW_SAT_HD 0x5456 #endif #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define SU3000_STREAM_CTRL (0x1900) #define DW2102_RC_QUERY (0x1a00) #define DW2102_LED_CTRL (0x1b00) #define DW2101_FIRMWARE "dvb-usb-dw2101.fw" #define DW2102_FIRMWARE "dvb-usb-dw2102.fw" #define DW2104_FIRMWARE "dvb-usb-dw2104.fw" #define DW3101_FIRMWARE "dvb-usb-dw3101.fw" #define S630_FIRMWARE "dvb-usb-s630.fw" #define S660_FIRMWARE "dvb-usb-s660.fw" #define P1100_FIRMWARE "dvb-usb-p1100.fw" #define P7500_FIRMWARE "dvb-usb-p7500.fw" #define err_str "did not find the firmware file. (%s) " \ "Please see linux/Documentation/dvb/ for more details " \ "on firmware-problems." struct dw2102_state { u8 initialized; u8 last_lock; struct i2c_client *i2c_client_tuner; /* fe hook functions*/ int (*old_set_voltage)(struct dvb_frontend *f, enum fe_sec_voltage v); int (*fe_read_status)(struct dvb_frontend *fe, enum fe_status *status); }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 " "4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 * data, u16 len, int flags) { int ret; u8 *u8buf; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; u8buf = kmalloc(len, GFP_KERNEL); if (!u8buf) return -ENOMEM; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index , u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); kfree(u8buf); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { dw210x_op_rw(d->udev, 0xb5, value + i, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { /* read from tuner */ dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int len, i, j, ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; int i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0x19 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int len, i, j, ret; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[5]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 5, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 3, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } case (DW2102_LED_CTRL): { u8 obuf[2]; obuf[0] = 5; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903, rs2000 case 0x60: ts2020, stv6110, stb6100 case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, le16_to_cpu(udev->descriptor.idProduct) == 0x7500 ? 0x92 : 0x90, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 obuf[0x40], ibuf[0x40]; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 1: switch (msg[0].addr) { case SU3000_STREAM_CTRL: obuf[0] = msg[0].buf[0] + 0x36; obuf[1] = 3; obuf[2] = 0; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: obuf[0] = 0x10; if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) err("i2c transfer failed."); msg[0].buf[1] = ibuf[0]; msg[0].buf[0] = ibuf[1]; break; default: /* always i2c write*/ obuf[0] = 0x08; obuf[1] = msg[0].addr; obuf[2] = msg[0].len; memcpy(&obuf[3], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, ibuf, 1, 0) < 0) err("i2c transfer failed."); } break; case 2: /* always i2c read */ obuf[0] = 0x09; obuf[1] = msg[0].len; obuf[2] = msg[1].len; obuf[3] = msg[0].addr; memcpy(&obuf[4], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, ibuf, msg[1].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[1].buf, &ibuf[1], msg[1].len); break; default: warn("more than 2 i2c messages at a time is not handled yet."); break; } mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm su3000_i2c_algo = { .master_xfer = su3000_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -1; } else { eepromline[i%16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -1; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { static u8 command_start[] = {0x00}; static u8 command_stop[] = {0x01}; struct i2c_msg msg = { .addr = SU3000_STREAM_CTRL, .flags = 0, .buf = onoff ? command_start : command_stop, .len = 1 }; i2c_transfer(&adap->dev->i2c_adap, &msg, 1); return 0; } static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct dw2102_state *state = (struct dw2102_state *)d->priv; u8 obuf[] = {0xde, 0}; info("%s: %d, initialized %d\n", __func__, i, state->initialized); if (i && !state->initialized) { state->initialized = 1; /* reset board */ dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); } return 0; } static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 obuf[] = { 0x1f, 0xf0 }; u8 ibuf[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x51, .flags = 0, .buf = obuf, .len = 2, }, { .addr = 0x51, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; else mac[i] = ibuf[0]; } return 0; } static int su3000_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { info("%s\n", __func__); *cold = 0; return 0; } static int dw210x_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static int s660_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct dvb_usb_adapter *d = (struct dvb_usb_adapter *)(fe->dvb->priv); struct dw2102_state *st = (struct dw2102_state *)d->dev->priv; dw210x_set_voltage(fe, voltage); if (st->old_set_voltage) st->old_set_voltage(fe, voltage); return 0; } static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon) { static u8 led_off[] = { 0 }; static u8 led_on[] = { 1 }; struct i2c_msg msg = { .addr = DW2102_LED_CTRL, .flags = 0, .buf = led_off, .len = 1 }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (offon) msg.buf = led_on; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); } static int tt_s2_4600_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct dvb_usb_adapter *d = (struct dvb_usb_adapter *)(fe->dvb->priv); struct dw2102_state *st = (struct dw2102_state *)d->dev->priv; int ret; ret = st->fe_read_status(fe, status); /* resync slave fifo when signal change from unlock to lock */ if ((*status & FE_HAS_LOCK) && (!st->last_lock)) su3000_streaming_ctrl(d, 1); st->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0; return ret; } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct ts2020_config dw2104_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1060000, }; static struct ds3000_config s660_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct ts2020_config s660_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1146000, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, .set_lock_led = dw210x_led_ctrl, }; static struct ds3000_config su3000_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct cxd2820r_config cxd2820r_config = { .i2c_address = 0x6c, /* (0xd8 >> 1) */ .ts_mode = 0x38, .ts_clock_inv = 1, }; static struct tda18271_config tda18271_config = { .output_opt = TDA18271_OUTPUT_LT_OFF, .gate = TDA18271_GATE_DIGITAL, }; static const struct m88ds3103_config tt_s2_4600_m88ds3103_config = { .i2c_addr = 0x68, .clock = 27000000, .i2c_wr_max = 33, .ts_mode = M88DS3103_TS_CI, .ts_clk = 16000, .ts_clk_pol = 0, .spec_inv = 0, .agc_inv = 0, .clock_out = M88DS3103_CLOCK_OUT_ENABLED, .envelope_mode = 0, .agc = 0x99, .lnb_hv_pol = 1, .lnb_en_pol = 0, }; static u8 m88rs2000_inittab[] = { DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0x00, 0x01, WRITE_DELAY, 0x19, 0x00, DEMOD_WRITE, 0x00, 0x00, DEMOD_WRITE, 0x9a, 0xb0, DEMOD_WRITE, 0x81, 0xc1, DEMOD_WRITE, 0x81, 0x81, DEMOD_WRITE, 0x86, 0xc6, DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0xf0, 0x80, DEMOD_WRITE, 0xf1, 0xbf, DEMOD_WRITE, 0xb0, 0x45, DEMOD_WRITE, 0xb2, 0x01, DEMOD_WRITE, 0x9a, 0xb0, 0xff, 0xaa, 0xff }; static struct m88rs2000_config s421_m88rs2000_config = { .demod_addr = 0x68, .inittab = m88rs2000_inittab, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stb6100_attach, d->fe_adap[0].fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!\n"); return 0; } } } if (demod_probe & 2) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stv6110_attach, d->fe_adap[0].fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!\n"); return 0; } } } if (demod_probe & 1) { d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!\n"); return 0; } } d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap); d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!\n"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!\n"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!\n"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!\n"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe_adap[0].fe != NULL) { info("Attached tda10023!\n"); return 0; } return -EIO; } static int zl100313_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!\n"); return 0; } } return -EIO; } static int stv0288_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (NULL == dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached stv0288+stb6000!\n"); return 0; } static int ds3000_frontend_attach(struct dvb_usb_adapter *d) { struct dw2102_state *st = d->dev->priv; u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config, &d->dev->i2c_adap); st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached ds3000+ts2020!\n"); return 0; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe == NULL) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached STV0900+STB6100A!\n"); return 0; } static int su3000_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[3] = { 0xe, 0x80, 0 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x02; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 0; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap)) { info("Attached DS3000/TS2020!\n"); return 0; } info("Failed to attach DS3000/TS2020!\n"); return -EIO; } static int t220_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[3] = { 0xe, 0x87, 0 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x86; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x80; obuf[2] = 0; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(50); obuf[0] = 0xe; obuf[1] = 0x80; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, &d->dev->i2c_adap, NULL); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap, &tda18271_config)) { info("Attached TDA18271HD/CXD2820R!\n"); return 0; } } info("Failed to attach TDA18271HD/CXD2820R!\n"); return -EIO; } static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = { 0x51 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap)) { info("Attached RS2000/TS2020!\n"); return 0; } info("Failed to attach RS2000/TS2020!\n"); return -EIO; } static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; u8 obuf[3] = { 0xe, 0x80, 0 }; u8 ibuf[] = { 0 }; struct i2c_adapter *i2c_adapter; struct i2c_client *client; struct i2c_board_info info; struct ts2020_config ts2020_config = {}; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x02; obuf[2] = 1; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 0; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 1; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); memset(&info, 0, sizeof(struct i2c_board_info)); adap->fe_adap[0].fe = dvb_attach(m88ds3103_attach, &tt_s2_4600_m88ds3103_config, &d->i2c_adap, &i2c_adapter); if (adap->fe_adap[0].fe == NULL) return -ENODEV; /* attach tuner */ ts2020_config.fe = adap->fe_adap[0].fe; strlcpy(info.type, "ts2022", I2C_NAME_SIZE); info.addr = 0x60; info.platform_data = &ts2020_config; request_module("ts2020"); client = i2c_new_device(i2c_adapter, &info); if (client == NULL || client->dev.driver == NULL) { dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } /* delegate signal strength measurement to tuner */ adap->fe_adap[0].fe->ops.read_signal_strength = adap->fe_adap[0].fe->ops.tuner_ops.get_rf_strength; state->i2c_client_tuner = client; /* hook fe: need to resync the slave fifo when signal locks */ state->fe_read_status = adap->fe_adap[0].fe->ops.read_status; adap->fe_adap[0].fe->ops.read_status = tt_s2_4600_read_status; state->last_lock = 0; return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static int dw2102_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, key[0], 0); } } return 0; } static int prof_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN, key[0]^0xff, 0); } } return 0; } static int su3000_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_TYPE_RC5, RC_SCANCODE_RC5(key[1], key[0]), 0); } } return 0; } enum dw2102_table_entry { CYPRESS_DW2102, CYPRESS_DW2101, CYPRESS_DW2104, TEVII_S650, TERRATEC_CINERGY_S, CYPRESS_DW3101, TEVII_S630, PROF_1100, TEVII_S660, PROF_7500, GENIATECH_SU3000, TERRATEC_CINERGY_S2, TEVII_S480_1, TEVII_S480_2, X3M_SPC1400HD, TEVII_S421, TEVII_S632, TERRATEC_CINERGY_S2_R2, GOTVIEW_SAT_HD, GENIATECH_T220, TECHNOTREND_S2_4600, TEVII_S482_1, TEVII_S482_2, }; static struct usb_device_id dw2102_table[] = { [CYPRESS_DW2102] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)}, [CYPRESS_DW2101] = {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, [CYPRESS_DW2104] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)}, [TEVII_S650] = {USB_DEVICE(0x9022, USB_PID_TEVII_S650)}, [TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, [CYPRESS_DW3101] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)}, [TEVII_S630] = {USB_DEVICE(0x9022, USB_PID_TEVII_S630)}, [PROF_1100] = {USB_DEVICE(0x3011, USB_PID_PROF_1100)}, [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)}, [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)}, [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)}, [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)}, [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)}, [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)}, [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)}, [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)}, [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)}, [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)}, [GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)}, [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)}, [TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_4600)}, [TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)}, [TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)}, { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; switch (le16_to_cpu(dev->descriptor.idProduct)) { case 0x2101: ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev); if (ret != 0) { err(err_str, DW2101_FIRMWARE); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p != NULL) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *) p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b , 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (le16_to_cpu(dev->descriptor.idProduct)) { case USB_PID_TEVII_S650: dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC; case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); /* break omitted intentionally */ case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_CINERGY_S: case USB_PID_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach; break; } else { /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } } case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2102_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2102_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[CYPRESS_DW2102], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[CYPRESS_DW2101], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[TERRATEC_CINERGY_S], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2104_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2104_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[CYPRESS_DW2104], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[TEVII_S650], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW3101_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw3101_frontend_attach, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[CYPRESS_DW3101], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = S630_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", .allowed_protos = RC_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = zl100313_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { {"TeVii S630 USB", {&dw2102_table[TEVII_S630], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties *p1100; static struct dvb_usb_device_description d1100 = { "Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }; static struct dvb_usb_device_properties *s660; static struct dvb_usb_device_description d660 = { "TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, {NULL}, }; static struct dvb_usb_device_description d480_1 = { "TeVii S480.1 USB", {&dw2102_table[TEVII_S480_1], NULL}, {NULL}, }; static struct dvb_usb_device_description d480_2 = { "TeVii S480.2 USB", {&dw2102_table[TEVII_S480_2], NULL}, {NULL}, }; static struct dvb_usb_device_properties *p7500; static struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }; static struct dvb_usb_device_properties *s421; static struct dvb_usb_device_description d421 = { "TeVii S421 PCI", {&dw2102_table[TEVII_S421], NULL}, {NULL}, }; static struct dvb_usb_device_description d632 = { "TeVii S632 USB", {&dw2102_table[TEVII_S632], NULL}, {NULL}, }; static struct dvb_usb_device_properties su3000_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = su3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .num_device_descs = 5, .devices = { { "SU3000HD DVB-S USB2.0", { &dw2102_table[GENIATECH_SU3000], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD", { &dw2102_table[TERRATEC_CINERGY_S2], NULL }, { NULL }, }, { "X3M TV SPC1400HD PCI", { &dw2102_table[X3M_SPC1400HD], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD Rev.2", { &dw2102_table[TERRATEC_CINERGY_S2_R2], NULL }, { NULL }, }, { "GOTVIEW Satellite HD", { &dw2102_table[GOTVIEW_SAT_HD], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties t220_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = { { .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = t220_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 1, .devices = { { "Geniatech T220 DVB-T/T2 USB2.0", { &dw2102_table[GENIATECH_T220], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties tt_s2_4600_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 250, .rc_codes = RC_MAP_TT_1500, .module_name = "dw2102", .allowed_protos = RC_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = tt_s2_4600_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 3, .devices = { { "TechnoTrend TT-connect S2-4600", { &dw2102_table[TECHNOTREND_S2_4600], NULL }, { NULL }, }, { "TeVii S482 (tuner 1)", { &dw2102_table[TEVII_S482_1], NULL }, { NULL }, }, { "TeVii S482 (tuner 2)", { &dw2102_table[TEVII_S482_2], NULL }, { NULL }, }, } }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { p1100 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p1100) return -ENOMEM; /* copy default structure */ /* fill only different fields */ p1100->firmware = P1100_FIRMWARE; p1100->devices[0] = d1100; p1100->rc.core.rc_query = prof_rc_query; p1100->rc.core.rc_codes = RC_MAP_TBS_NEC; p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach; s660 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!s660) { kfree(p1100); return -ENOMEM; } s660->firmware = S660_FIRMWARE; s660->num_device_descs = 3; s660->devices[0] = d660; s660->devices[1] = d480_1; s660->devices[2] = d480_2; s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach; p7500 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p7500) { kfree(p1100); kfree(s660); return -ENOMEM; } p7500->firmware = P7500_FIRMWARE; p7500->devices[0] = d7500; p7500->rc.core.rc_query = prof_rc_query; p7500->rc.core.rc_codes = RC_MAP_TBS_NEC; p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach; s421 = kmemdup(&su3000_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!s421) { kfree(p1100); kfree(s660); kfree(p7500); return -ENOMEM; } s421->num_device_descs = 2; s421->devices[0] = d421; s421->devices[1] = d632; s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach; if (0 == dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p1100, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, s660, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p7500, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, s421, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &su3000_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &t220_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static void dw2102_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct dw2102_state *st = (struct dw2102_state *)d->priv; struct i2c_client *client; /* remove I2C client for tuner */ client = st->i2c_client_tuner; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } dvb_usb_device_exit(intf); } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dw2102_disconnect, .id_table = dw2102_table, }; module_usb_driver(dw2102_driver); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104," " DVB-C 3101 USB2.0," " TeVii S600, S630, S650, S660, S480, S421, S632" " Prof 1100, 7500 USB2.0," " Geniatech SU3000, T220," " TechnoTrend S2-4600 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(DW2101_FIRMWARE); MODULE_FIRMWARE(DW2102_FIRMWARE); MODULE_FIRMWARE(DW2104_FIRMWARE); MODULE_FIRMWARE(DW3101_FIRMWARE); MODULE_FIRMWARE(S630_FIRMWARE); MODULE_FIRMWARE(S660_FIRMWARE); MODULE_FIRMWARE(P1100_FIRMWARE); MODULE_FIRMWARE(P7500_FIRMWARE);
gpl-2.0
antmicro/linux-sunxi
drivers/regulator/helpers.c
589
12015
/* * helpers.c -- Voltage/Current Regulator framework helper functions. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * Copyright 2008 SlimLogic Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/module.h> /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their is_enabled operation, saving some code. */ int regulator_is_enabled_regmap(struct regulator_dev *rdev) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val); if (ret != 0) return ret; val &= rdev->desc->enable_mask; if (rdev->desc->enable_is_inverted) { if (rdev->desc->enable_val) return val != rdev->desc->enable_val; return val == 0; } else { if (rdev->desc->enable_val) return val == rdev->desc->enable_val; return val != 0; } } EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap); /** * regulator_enable_regmap - standard enable() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their enable() operation, saving some code. */ int regulator_enable_regmap(struct regulator_dev *rdev) { unsigned int val; if (rdev->desc->enable_is_inverted) { val = rdev->desc->disable_val; } else { val = rdev->desc->enable_val; if (!val) val = rdev->desc->enable_mask; } return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val); } EXPORT_SYMBOL_GPL(regulator_enable_regmap); /** * regulator_disable_regmap - standard disable() for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * enable_reg and enable_mask fields in their descriptor and then use * this as their disable() operation, saving some code. */ int regulator_disable_regmap(struct regulator_dev *rdev) { unsigned int val; if (rdev->desc->enable_is_inverted) { val = rdev->desc->enable_val; if (!val) val = rdev->desc->enable_mask; } else { val = rdev->desc->disable_val; } return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val); } EXPORT_SYMBOL_GPL(regulator_disable_regmap); /** * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users * * @rdev: regulator to operate on * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this * as their get_voltage_vsel operation, saving some code. */ int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); if (ret != 0) return ret; val &= rdev->desc->vsel_mask; val >>= ffs(rdev->desc->vsel_mask) - 1; return val; } EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap); /** * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users * * @rdev: regulator to operate on * @sel: Selector to set * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this * as their set_voltage_vsel operation, saving some code. */ int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel) { int ret; sel <<= ffs(rdev->desc->vsel_mask) - 1; ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, rdev->desc->vsel_mask, sel); if (ret) return ret; if (rdev->desc->apply_bit) ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg, rdev->desc->apply_bit, rdev->desc->apply_bit); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap); /** * regulator_map_voltage_iterate - map_voltage() based on list_voltage() * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers implementing set_voltage_sel() and list_voltage() can use * this as their map_voltage() operation. It will find a suitable * voltage by calling list_voltage() until it gets something in bounds * for the requested voltages. */ int regulator_map_voltage_iterate(struct regulator_dev *rdev, int min_uV, int max_uV) { int best_val = INT_MAX; int selector = 0; int i, ret; /* Find the smallest voltage that falls within the specified * range. */ for (i = 0; i < rdev->desc->n_voltages; i++) { ret = rdev->desc->ops->list_voltage(rdev, i); if (ret < 0) continue; if (ret < best_val && ret >= min_uV && ret <= max_uV) { best_val = ret; selector = i; } } if (best_val != INT_MAX) return selector; else return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate); /** * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers that have ascendant voltage list can use this as their * map_voltage() operation. */ int regulator_map_voltage_ascend(struct regulator_dev *rdev, int min_uV, int max_uV) { int i, ret; for (i = 0; i < rdev->desc->n_voltages; i++) { ret = rdev->desc->ops->list_voltage(rdev, i); if (ret < 0) continue; if (ret > max_uV) break; if (ret >= min_uV && ret <= max_uV) return i; } return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend); /** * regulator_map_voltage_linear - map_voltage() for simple linear mappings * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers providing min_uV and uV_step in their regulator_desc can * use this as their map_voltage() operation. */ int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret, voltage; /* Allow uV_step to be 0 for fixed voltage */ if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) { if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV) return 0; else return -EINVAL; } if (!rdev->desc->uV_step) { BUG_ON(!rdev->desc->uV_step); return -EINVAL; } if (min_uV < rdev->desc->min_uV) min_uV = rdev->desc->min_uV; ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); if (ret < 0) return ret; ret += rdev->desc->linear_min_sel; /* Map back into a voltage to verify we're still in bounds */ voltage = rdev->desc->ops->list_voltage(rdev, ret); if (voltage < min_uV || voltage > max_uV) return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(regulator_map_voltage_linear); /** * regulator_map_voltage_linear_range - map_voltage() for multiple linear ranges * * @rdev: Regulator to operate on * @min_uV: Lower bound for voltage * @max_uV: Upper bound for voltage * * Drivers providing linear_ranges in their descriptor can use this as * their map_voltage() callback. */ int regulator_map_voltage_linear_range(struct regulator_dev *rdev, int min_uV, int max_uV) { const struct regulator_linear_range *range; int ret = -EINVAL; int voltage, i; if (!rdev->desc->n_linear_ranges) { BUG_ON(!rdev->desc->n_linear_ranges); return -EINVAL; } for (i = 0; i < rdev->desc->n_linear_ranges; i++) { int linear_max_uV; range = &rdev->desc->linear_ranges[i]; linear_max_uV = range->min_uV + (range->max_sel - range->min_sel) * range->uV_step; if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV)) continue; if (min_uV <= range->min_uV) min_uV = range->min_uV; /* range->uV_step == 0 means fixed voltage range */ if (range->uV_step == 0) { ret = 0; } else { ret = DIV_ROUND_UP(min_uV - range->min_uV, range->uV_step); if (ret < 0) return ret; } ret += range->min_sel; break; } if (i == rdev->desc->n_linear_ranges) return -EINVAL; /* Map back into a voltage to verify we're still in bounds */ voltage = rdev->desc->ops->list_voltage(rdev, ret); if (voltage < min_uV || voltage > max_uV) return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range); /** * regulator_list_voltage_linear - List voltages with simple calculation * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with a simple linear mapping between voltages and * selectors can set min_uV and uV_step in the regulator descriptor * and then use this function as their list_voltage() operation, */ int regulator_list_voltage_linear(struct regulator_dev *rdev, unsigned int selector) { if (selector >= rdev->desc->n_voltages) return -EINVAL; if (selector < rdev->desc->linear_min_sel) return 0; selector -= rdev->desc->linear_min_sel; return rdev->desc->min_uV + (rdev->desc->uV_step * selector); } EXPORT_SYMBOL_GPL(regulator_list_voltage_linear); /** * regulator_list_voltage_linear_range - List voltages for linear ranges * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with a series of simple linear mappings between voltages * and selectors can set linear_ranges in the regulator descriptor and * then use this function as their list_voltage() operation, */ int regulator_list_voltage_linear_range(struct regulator_dev *rdev, unsigned int selector) { const struct regulator_linear_range *range; int i; if (!rdev->desc->n_linear_ranges) { BUG_ON(!rdev->desc->n_linear_ranges); return -EINVAL; } for (i = 0; i < rdev->desc->n_linear_ranges; i++) { range = &rdev->desc->linear_ranges[i]; if (!(selector >= range->min_sel && selector <= range->max_sel)) continue; selector -= range->min_sel; return range->min_uV + (range->uV_step * selector); } return -EINVAL; } EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range); /** * regulator_list_voltage_table - List voltages with table based mapping * * @rdev: Regulator device * @selector: Selector to convert into a voltage * * Regulators with table based mapping between voltages and * selectors can set volt_table in the regulator descriptor * and then use this function as their list_voltage() operation. */ int regulator_list_voltage_table(struct regulator_dev *rdev, unsigned int selector) { if (!rdev->desc->volt_table) { BUG_ON(!rdev->desc->volt_table); return -EINVAL; } if (selector >= rdev->desc->n_voltages) return -EINVAL; return rdev->desc->volt_table[selector]; } EXPORT_SYMBOL_GPL(regulator_list_voltage_table); /** * regulator_set_bypass_regmap - Default set_bypass() using regmap * * @rdev: device to operate on. * @enable: state to set. */ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable) { unsigned int val; if (enable) { val = rdev->desc->bypass_val_on; if (!val) val = rdev->desc->bypass_mask; } else { val = rdev->desc->bypass_val_off; } return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, rdev->desc->bypass_mask, val); } EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap); /** * regulator_get_bypass_regmap - Default get_bypass() using regmap * * @rdev: device to operate on. * @enable: current state. */ int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable) { unsigned int val; int ret; ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); if (ret != 0) return ret; *enable = val & rdev->desc->bypass_mask; return 0; } EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
gpl-2.0
gnensis/linux-2.6.15
arch/mips/math-emu/dp_sub.c
845
4977
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs != ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): return ieee754dp_inf(ys ^ 1); case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs != ys) return x; else return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): /* quick fix up */ DPSIGN(y) ^= 1; return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; /* FAAL THOROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): /* normalize ym,ye */ DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): /* normalize xm,xe */ DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } /* flip sign of y and handle as add */ ys ^= 1; assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); /* provide guard,round and stick bit dpace */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; ym = XDPSRS(ym, s); ye += s; } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; xm = XDPSRS(xm, s); xe += s; } assert(xe == ye); assert(xe <= DP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ xm = XDPSRS1(xm); /* shift preserving sticky */ xe++; } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) { if (ieee754_csr.rm == IEEE754_RD) return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ else return ieee754dp_zero(0); /* other round modes => sign = 1 */ } /* normalize to rounding precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET2(xs, xe, xm, "sub", x, y); }
gpl-2.0
friedrich420/Sprint-Note-4-Android-5.1.1-Kernel
KernelN910P-5_1_1GIT/drivers/spi/spi-orion.c
845
11908
/* * Marvell Orion SPI controller driver * * Author: Shadi Ammouri <shadi@marvell.com> * Copyright (C) 2007-2008 Marvell Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spi/spi.h> #include <linux/module.h> #include <linux/of.h> #include <linux/clk.h> #include <asm/unaligned.h> #define DRIVER_NAME "orion_spi" #define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ #define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ #define ORION_SPI_IF_CTRL_REG 0x00 #define ORION_SPI_IF_CONFIG_REG 0x04 #define ORION_SPI_DATA_OUT_REG 0x08 #define ORION_SPI_DATA_IN_REG 0x0c #define ORION_SPI_INT_CAUSE_REG 0x10 #define ORION_SPI_MODE_CPOL (1 << 11) #define ORION_SPI_MODE_CPHA (1 << 12) #define ORION_SPI_IF_8_16_BIT_MODE (1 << 5) #define ORION_SPI_CLK_PRESCALE_MASK 0x1F #define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ ORION_SPI_MODE_CPHA) struct orion_spi { struct spi_master *master; void __iomem *base; unsigned int max_speed; unsigned int min_speed; struct clk *clk; }; static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg) { return orion_spi->base + reg; } static inline void orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask) { void __iomem *reg_addr = spi_reg(orion_spi, reg); u32 val; val = readl(reg_addr); val |= mask; writel(val, reg_addr); } static inline void orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask) { void __iomem *reg_addr = spi_reg(orion_spi, reg); u32 val; val = readl(reg_addr); val &= ~mask; writel(val, reg_addr); } static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size) { if (size == 16) { orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, ORION_SPI_IF_8_16_BIT_MODE); } else if (size == 8) { orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG, ORION_SPI_IF_8_16_BIT_MODE); } else { pr_debug("Bad bits per word value %d (only 8 or 16 are " "allowed).\n", size); return -EINVAL; } return 0; } static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed) { u32 tclk_hz; u32 rate; u32 prescale; u32 reg; struct orion_spi *orion_spi; orion_spi = spi_master_get_devdata(spi->master); tclk_hz = clk_get_rate(orion_spi->clk); /* * the supported rates are: 4,6,8...30 * round up as we look for equal or less speed */ rate = DIV_ROUND_UP(tclk_hz, speed); rate = roundup(rate, 2); /* check if requested speed is too small */ if (rate > 30) return -EINVAL; if (rate < 4) rate = 4; /* Convert the rate to SPI clock divisor value. */ prescale = 0x10 + rate/2; reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale); writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); return 0; } static void orion_spi_mode_set(struct spi_device *spi) { u32 reg; struct orion_spi *orion_spi; orion_spi = spi_master_get_devdata(spi->master); reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); reg &= ~ORION_SPI_MODE_MASK; if (spi->mode & SPI_CPOL) reg |= ORION_SPI_MODE_CPOL; if (spi->mode & SPI_CPHA) reg |= ORION_SPI_MODE_CPHA; writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); } /* * called only when no transfer is active on the bus */ static int orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct orion_spi *orion_spi; unsigned int speed = spi->max_speed_hz; unsigned int bits_per_word = spi->bits_per_word; int rc; orion_spi = spi_master_get_devdata(spi->master); if ((t != NULL) && t->speed_hz) speed = t->speed_hz; if ((t != NULL) && t->bits_per_word) bits_per_word = t->bits_per_word; orion_spi_mode_set(spi); rc = orion_spi_baudrate_set(spi, speed); if (rc) return rc; return orion_spi_set_transfer_size(orion_spi, bits_per_word); } static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) { if (enable) orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); else orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); } static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi) { int i; for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) { if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG))) return 1; else udelay(1); } return -1; } static inline int orion_spi_write_read_8bit(struct spi_device *spi, const u8 **tx_buf, u8 **rx_buf) { void __iomem *tx_reg, *rx_reg, *int_reg; struct orion_spi *orion_spi; orion_spi = spi_master_get_devdata(spi->master); tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); /* clear the interrupt cause register */ writel(0x0, int_reg); if (tx_buf && *tx_buf) writel(*(*tx_buf)++, tx_reg); else writel(0, tx_reg); if (orion_spi_wait_till_ready(orion_spi) < 0) { dev_err(&spi->dev, "TXS timed out\n"); return -1; } if (rx_buf && *rx_buf) *(*rx_buf)++ = readl(rx_reg); return 1; } static inline int orion_spi_write_read_16bit(struct spi_device *spi, const u16 **tx_buf, u16 **rx_buf) { void __iomem *tx_reg, *rx_reg, *int_reg; struct orion_spi *orion_spi; orion_spi = spi_master_get_devdata(spi->master); tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); /* clear the interrupt cause register */ writel(0x0, int_reg); if (tx_buf && *tx_buf) writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg); else writel(0, tx_reg); if (orion_spi_wait_till_ready(orion_spi) < 0) { dev_err(&spi->dev, "TXS timed out\n"); return -1; } if (rx_buf && *rx_buf) put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++); return 1; } static unsigned int orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer) { struct orion_spi *orion_spi; unsigned int count; int word_len; orion_spi = spi_master_get_devdata(spi->master); word_len = spi->bits_per_word; count = xfer->len; if (word_len == 8) { const u8 *tx = xfer->tx_buf; u8 *rx = xfer->rx_buf; do { if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0) goto out; count--; } while (count); } else if (word_len == 16) { const u16 *tx = xfer->tx_buf; u16 *rx = xfer->rx_buf; do { if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0) goto out; count -= 2; } while (count); } out: return xfer->len - count; } static int orion_spi_transfer_one_message(struct spi_master *master, struct spi_message *m) { struct orion_spi *orion_spi = spi_master_get_devdata(master); struct spi_device *spi = m->spi; struct spi_transfer *t = NULL; int par_override = 0; int status = 0; int cs_active = 0; /* Load defaults */ status = orion_spi_setup_transfer(spi, NULL); if (status < 0) goto msg_done; list_for_each_entry(t, &m->transfers, transfer_list) { /* make sure buffer length is even when working in 16 * bit mode*/ if ((t->bits_per_word == 16) && (t->len & 1)) { dev_err(&spi->dev, "message rejected : " "odd data length %d while in 16 bit mode\n", t->len); status = -EIO; goto msg_done; } if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { dev_err(&spi->dev, "message rejected : " "device min speed (%d Hz) exceeds " "required transfer speed (%d Hz)\n", orion_spi->min_speed, t->speed_hz); status = -EIO; goto msg_done; } if (par_override || t->speed_hz || t->bits_per_word) { par_override = 1; status = orion_spi_setup_transfer(spi, t); if (status < 0) break; if (!t->speed_hz && !t->bits_per_word) par_override = 0; } if (!cs_active) { orion_spi_set_cs(orion_spi, 1); cs_active = 1; } if (t->len) m->actual_length += orion_spi_write_read(spi, t); if (t->delay_usecs) udelay(t->delay_usecs); if (t->cs_change) { orion_spi_set_cs(orion_spi, 0); cs_active = 0; } } msg_done: if (cs_active) orion_spi_set_cs(orion_spi, 0); m->status = status; spi_finalize_current_message(master); return 0; } static int orion_spi_reset(struct orion_spi *orion_spi) { /* Verify that the CS is deasserted */ orion_spi_set_cs(orion_spi, 0); return 0; } static int orion_spi_setup(struct spi_device *spi) { struct orion_spi *orion_spi; orion_spi = spi_master_get_devdata(spi->master); if ((spi->max_speed_hz == 0) || (spi->max_speed_hz > orion_spi->max_speed)) spi->max_speed_hz = orion_spi->max_speed; if (spi->max_speed_hz < orion_spi->min_speed) { dev_err(&spi->dev, "setup: requested speed too low %d Hz\n", spi->max_speed_hz); return -EINVAL; } /* * baudrate & width will be set orion_spi_setup_transfer */ return 0; } static int orion_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct orion_spi *spi; struct resource *r; unsigned long tclk_hz; int status = 0; master = spi_alloc_master(&pdev->dev, sizeof *spi); if (master == NULL) { dev_dbg(&pdev->dev, "master allocation failed\n"); return -ENOMEM; } if (pdev->id != -1) master->bus_num = pdev->id; if (pdev->dev.of_node) { u32 cell_index; if (!of_property_read_u32(pdev->dev.of_node, "cell-index", &cell_index)) master->bus_num = cell_index; } /* we support only mode 0, and no options */ master->mode_bits = SPI_CPHA | SPI_CPOL; master->setup = orion_spi_setup; master->transfer_one_message = orion_spi_transfer_one_message; master->num_chipselect = ORION_NUM_CHIPSELECTS; dev_set_drvdata(&pdev->dev, master); spi = spi_master_get_devdata(master); spi->master = master; spi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(spi->clk)) { status = PTR_ERR(spi->clk); goto out; } clk_prepare(spi->clk); clk_enable(spi->clk); tclk_hz = clk_get_rate(spi->clk); spi->max_speed = DIV_ROUND_UP(tclk_hz, 4); spi->min_speed = DIV_ROUND_UP(tclk_hz, 30); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { status = -ENODEV; goto out_rel_clk; } if (!request_mem_region(r->start, resource_size(r), dev_name(&pdev->dev))) { status = -EBUSY; goto out_rel_clk; } spi->base = ioremap(r->start, SZ_1K); if (orion_spi_reset(spi) < 0) goto out_rel_mem; master->dev.of_node = pdev->dev.of_node; status = spi_register_master(master); if (status < 0) goto out_rel_mem; return status; out_rel_mem: release_mem_region(r->start, resource_size(r)); out_rel_clk: clk_disable_unprepare(spi->clk); clk_put(spi->clk); out: spi_master_put(master); return status; } static int orion_spi_remove(struct platform_device *pdev) { struct spi_master *master; struct resource *r; struct orion_spi *spi; master = dev_get_drvdata(&pdev->dev); spi = spi_master_get_devdata(master); clk_disable_unprepare(spi->clk); clk_put(spi->clk); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); spi_unregister_master(master); return 0; } MODULE_ALIAS("platform:" DRIVER_NAME); static const struct of_device_id orion_spi_of_match_table[] = { { .compatible = "marvell,orion-spi", }, {} }; MODULE_DEVICE_TABLE(of, orion_spi_of_match_table); static struct platform_driver orion_spi_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(orion_spi_of_match_table), }, .probe = orion_spi_probe, .remove = orion_spi_remove, }; module_platform_driver(orion_spi_driver); MODULE_DESCRIPTION("Orion SPI driver"); MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Frontier314/kernel_ut4412
block/bsg.c
1101
24282
/* * bsg.c - block layer implementation of the sg v4 interface * * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file "COPYING" in the main directory of this * archive for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/cdev.h> #include <linux/jiffies.h> #include <linux/percpu.h> #include <linux/uio.h> #include <linux/idr.h> #include <linux/bsg.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/sg.h> #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" struct bsg_device { struct request_queue *queue; spinlock_t lock; struct list_head busy_list; struct list_head done_list; struct hlist_node dev_list; atomic_t ref_count; int queued_cmds; int done_cmds; wait_queue_head_t wq_done; wait_queue_head_t wq_free; char name[20]; int max_queue; unsigned long flags; }; enum { BSG_F_BLOCK = 1, }; #define BSG_DEFAULT_CMDS 64 #define BSG_MAX_DEVS 32768 #undef BSG_DEBUG #ifdef BSG_DEBUG #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) #else #define dprintk(fmt, args...) #endif static DEFINE_MUTEX(bsg_mutex); static DEFINE_IDR(bsg_minor_idr); #define BSG_LIST_ARRAY_SIZE 8 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; static struct class *bsg_class; static int bsg_major; static struct kmem_cache *bsg_cmd_cachep; /* * our internal command type */ struct bsg_command { struct bsg_device *bd; struct list_head list; struct request *rq; struct bio *bio; struct bio *bidi_bio; int err; struct sg_io_v4 hdr; char sense[SCSI_SENSE_BUFFERSIZE]; }; static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; unsigned long flags; kmem_cache_free(bsg_cmd_cachep, bc); spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); } static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) { struct bsg_command *bc = ERR_PTR(-EINVAL); spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; bd->queued_cmds++; spin_unlock_irq(&bd->lock); bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); if (unlikely(!bc)) { spin_lock_irq(&bd->lock); bd->queued_cmds--; bc = ERR_PTR(-ENOMEM); goto out; } bc->bd = bd; INIT_LIST_HEAD(&bc->list); dprintk("%s: returning free cmd %p\n", bd->name, bc); return bc; out: spin_unlock_irq(&bd->lock); return bc; } static inline struct hlist_head *bsg_dev_idx_hash(int index) { return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; } static int bsg_io_schedule(struct bsg_device *bd) { DEFINE_WAIT(wait); int ret = 0; spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no * work to do", even though we return -ENOSPC after this same test * during bsg_write() -- there, it means our buffer can't have more * bsg_commands added to it, thus has no space left. */ if (bd->done_cmds == bd->queued_cmds) { ret = -ENODATA; goto unlock; } if (!test_bit(BSG_F_BLOCK, &bd->flags)) { ret = -EAGAIN; goto unlock; } prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bd->lock); io_schedule(); finish_wait(&bd->wq_done, &wait); return ret; unlock: spin_unlock_irq(&bd->lock); return ret; } static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, struct bsg_device *bd, fmode_t has_write_perm) { if (hdr->request_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!rq->cmd) return -ENOMEM; } if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { if (blk_verify_command(rq->cmd, has_write_perm)) return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* * fill in request structure */ rq->cmd_len = hdr->request_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; return 0; } /* * Check if sg_io_v4 from user is allowed and valid */ static int bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) { int ret = 0; if (hdr->guard != 'Q') return -EINVAL; switch (hdr->protocol) { case BSG_PROTOCOL_SCSI: switch (hdr->subprotocol) { case BSG_SUB_PROTOCOL_SCSI_CMD: case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: break; default: ret = -EINVAL; } break; default: ret = -EINVAL; } *rw = hdr->dout_xfer_len ? WRITE : READ; return ret; } /* * map sg_io_v4 to a request. */ static struct request * bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, u8 *sense) { struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; int ret, rw; unsigned int dxfer_len; void *dxferp = NULL; struct bsg_class_device *bcd = &q->bsg_dev; /* if the LLD has been removed then the bsg_unregister_queue will * eventually be called and the class_dev was freed, so we can no * longer use this request_queue. Return no such address. */ if (!bcd->class_dev) return ERR_PTR(-ENXIO); dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->din_xfer_len); ret = bsg_validate_sgv4_hdr(q, hdr, &rw); if (ret) return ERR_PTR(ret); /* * map scatter-gather elements separately and string them to request */ rq = blk_get_request(q, rw, GFP_KERNEL); if (!rq) return ERR_PTR(-ENOMEM); ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); if (ret) goto out; if (rw == WRITE && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, READ, GFP_KERNEL); if (!next_rq) { ret = -ENOMEM; goto out; } rq->next_rq = next_rq; next_rq->cmd_type = rq->cmd_type; dxferp = (void*)(unsigned long)hdr->din_xferp; ret = blk_rq_map_user(q, next_rq, NULL, dxferp, hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out; } if (hdr->dout_xfer_len) { dxfer_len = hdr->dout_xfer_len; dxferp = (void*)(unsigned long)hdr->dout_xferp; } else if (hdr->din_xfer_len) { dxfer_len = hdr->din_xfer_len; dxferp = (void*)(unsigned long)hdr->din_xferp; } else dxfer_len = 0; if (dxfer_len) { ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, GFP_KERNEL); if (ret) goto out; } rq->sense = sense; rq->sense_len = 0; return rq; out: if (rq->cmd != rq->__cmd) kfree(rq->cmd); blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); blk_put_request(next_rq); } return ERR_PTR(ret); } /* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */ static void bsg_rq_end_io(struct request *rq, int uptodate) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", bd->name, rq, bc, bc->bio, uptodate); bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_done); } /* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); /* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); rq->end_io_data = bc; blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); } static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) { struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { bc = list_first_entry(&bd->done_list, struct bsg_command, list); list_del(&bc->list); bd->done_cmds--; } spin_unlock_irq(&bd->lock); return bc; } /* * Get a finished command from the done list */ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) { struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); if (ret) { bc = ERR_PTR(-ERESTARTSYS); break; } } while (1); dprintk("%s: returning done %p\n", bd->name, bc); return bc; } static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { int ret = 0; dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); /* * fill in all the output members */ hdr->device_status = rq->errors & 0xff; hdr->transport_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (rq->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, rq->sense_len); ret = copy_to_user((void*)(unsigned long)hdr->response, rq->sense, len); if (!ret) hdr->response_len = len; else ret = -EFAULT; } if (rq->next_rq) { hdr->dout_resid = rq->resid_len; hdr->din_resid = rq->next_rq->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) hdr->din_resid = rq->resid_len; else hdr->dout_resid = rq->resid_len; /* * If the request generated a negative error number, return it * (providing we aren't already returning an error); if it's * just a protocol response (i.e. non negative), that gets * processed above. */ if (!ret && rq->errors < 0) ret = rq->errors; blk_rq_unmap_user(bio); if (rq->cmd != rq->__cmd) kfree(rq->cmd); blk_put_request(rq); return ret; } static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; dprintk("%s: entered\n", bd->name); /* * wait for all commands to complete */ ret = 0; do { ret = bsg_io_schedule(bd); /* * look for -ENODATA specifically -- we'll sometimes get * -ERESTARTSYS when we've taken a signal, but we can't * return until we're done freeing the queue, so ignore * it. The signal will get handled when we're done freeing * the bsg_device. */ } while (ret != -ENODATA); /* * discard done commands */ ret = 0; do { spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); break; } spin_unlock_irq(&bd->lock); bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; } static int __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) { struct bsg_command *bc; int nr_commands, ret; if (count % sizeof(struct sg_io_v4)) return -EINVAL; ret = 0; nr_commands = count / sizeof(struct sg_io_v4); while (nr_commands) { bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) ret = -EFAULT; bsg_free_command(bc); if (ret) break; buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); nr_commands--; } return ret; } static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); } /* * Check if the error is a "real" error that we should return. */ static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; } static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; dprintk("%s: read %Zd bytes\n", bd->name, count); bsg_set_block(bd, file); bytes_read = 0; ret = __bsg_read(buf, count, bd, NULL, &bytes_read); *ppos = bytes_read; if (!bytes_read || (bytes_read && err_block_err(ret))) bytes_read = ret; return bytes_read; } static int __bsg_write(struct bsg_device *bd, const char __user *buf, size_t count, ssize_t *bytes_written, fmode_t has_write_perm) { struct bsg_command *bc; struct request *rq; int ret, nr_commands; if (count % sizeof(struct sg_io_v4)) return -EINVAL; nr_commands = count / sizeof(struct sg_io_v4); rq = NULL; bc = NULL; ret = 0; while (nr_commands) { struct request_queue *q = bd->queue; bc = bsg_alloc_command(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; buf += sizeof(struct sg_io_v4); *bytes_written += sizeof(struct sg_io_v4); } if (bc) bsg_free_command(bc); return ret; } static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; ssize_t bytes_written; int ret; dprintk("%s: write %Zd bytes\n", bd->name, count); bsg_set_block(bd, file); bytes_written = 0; ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode & FMODE_WRITE); *ppos = bytes_written; /* * return bytes written on non-fatal errors */ if (!bytes_written || (bytes_written && err_block_err(ret))) bytes_written = ret; dprintk("%s: returning %Zd\n", bd->name, bytes_written); return bytes_written; } static struct bsg_device *bsg_alloc_device(void) { struct bsg_device *bd; bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); bd->max_queue = BSG_DEFAULT_CMDS; INIT_LIST_HEAD(&bd->busy_list); INIT_LIST_HEAD(&bd->done_list); INIT_HLIST_NODE(&bd->dev_list); init_waitqueue_head(&bd->wq_free); init_waitqueue_head(&bd->wq_done); return bd; } static void bsg_kref_release_function(struct kref *kref) { struct bsg_class_device *bcd = container_of(kref, struct bsg_class_device, ref); struct device *parent = bcd->parent; if (bcd->release) bcd->release(bcd->parent); put_device(parent); } static int bsg_put_device(struct bsg_device *bd) { int ret = 0, do_free; struct request_queue *q = bd->queue; mutex_lock(&bsg_mutex); do_free = atomic_dec_and_test(&bd->ref_count); if (!do_free) { mutex_unlock(&bsg_mutex); goto out; } hlist_del(&bd->dev_list); mutex_unlock(&bsg_mutex); dprintk("%s: tearing down\n", bd->name); /* * close can always block */ set_bit(BSG_F_BLOCK, &bd->flags); /* * correct error detection baddies here again. it's the responsibility * of the app to properly reap commands before close() if it wants * fool-proof error detection */ ret = bsg_complete_all_commands(bd); kfree(bd); out: kref_put(&q->bsg_dev.ref, bsg_kref_release_function); if (do_free) blk_put_queue(q); return ret; } static struct bsg_device *bsg_add_device(struct inode *inode, struct request_queue *rq, struct file *file) { struct bsg_device *bd; int ret; #ifdef BSG_DEBUG unsigned char buf[32]; #endif ret = blk_get_queue(rq); if (ret) return ERR_PTR(-ENXIO); bd = bsg_alloc_device(); if (!bd) { blk_put_queue(rq); return ERR_PTR(-ENOMEM); } bd->queue = rq; bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); dprintk("bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; struct hlist_node *entry; mutex_lock(&bsg_mutex); hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { atomic_inc(&bd->ref_count); goto found; } } bd = NULL; found: mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) { struct bsg_device *bd; struct bsg_class_device *bcd; /* * find the class device */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); if (bcd) kref_get(&bcd->ref); mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); bd = __bsg_get_device(iminor(inode), bcd->queue); if (bd) return bd; bd = bsg_add_device(inode, bcd->queue, file); if (IS_ERR(bd)) kref_put(&bcd->ref, bsg_kref_release_function); return bd; } static int bsg_open(struct inode *inode, struct file *file) { struct bsg_device *bd; bd = bsg_get_device(inode, file); if (IS_ERR(bd)) return PTR_ERR(bd); file->private_data = bd; return 0; } static int bsg_release(struct inode *inode, struct file *file) { struct bsg_device *bd = file->private_data; file->private_data = NULL; return bsg_put_device(bd); } static unsigned int bsg_poll(struct file *file, poll_table *wait) { struct bsg_device *bd = file->private_data; unsigned int mask = 0; poll_wait(file, &bd->wq_done, wait); poll_wait(file, &bd->wq_free, wait); spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) mask |= POLLIN | POLLRDNORM; if (bd->queued_cmds >= bd->max_queue) mask |= POLLOUT; spin_unlock_irq(&bd->lock); return mask; } static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; int ret; switch (cmd) { /* * our own ioctls */ case SG_GET_COMMAND_Q: return put_user(bd->max_queue, uarg); case SG_SET_COMMAND_Q: { int queue; if (get_user(queue, uarg)) return -EFAULT; if (queue < 1) return -EINVAL; spin_lock_irq(&bd->lock); bd->max_queue = queue; spin_unlock_irq(&bd->lock); return 0; } /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SCSI_IOCTL_SEND_COMMAND: { void __user *uarg = (void __user *) arg; return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); } case SG_IO: { struct request *rq; struct bio *bio, *bidi_bio = NULL; struct sg_io_v4 hdr; int at_head; u8 sense[SCSI_SENSE_BUFFERSIZE]; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); if (IS_ERR(rq)) return PTR_ERR(rq); bio = rq->bio; if (rq->next_rq) bidi_bio = rq->next_rq->bio; at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); blk_execute_rq(bd->queue, NULL, rq, at_head); ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } /* * block device ioctls */ default: #if 0 return ioctl_by_bdev(bd->bdev, cmd, arg); #else return -ENOTTY; #endif } } static const struct file_operations bsg_fops = { .read = bsg_read, .write = bsg_write, .poll = bsg_poll, .open = bsg_open, .release = bsg_release, .unlocked_ioctl = bsg_ioctl, .owner = THIS_MODULE, .llseek = default_llseek, }; void bsg_unregister_queue(struct request_queue *q) { struct bsg_class_device *bcd = &q->bsg_dev; if (!bcd->class_dev) return; mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); sysfs_remove_link(&q->kobj, "bsg"); device_unregister(bcd->class_dev); bcd->class_dev = NULL; kref_put(&bcd->ref, bsg_kref_release_function); mutex_unlock(&bsg_mutex); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, void (*release)(struct device *)) { struct bsg_class_device *bcd; dev_t dev; int ret, minor; struct device *class_dev = NULL; const char *devname; if (name) devname = name; else devname = dev_name(parent); /* * we need a proper transport to send commands, not a stacked device */ if (!q->request_fn) return 0; bcd = &q->bsg_dev; memset(bcd, 0, sizeof(*bcd)); mutex_lock(&bsg_mutex); ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); if (!ret) { ret = -ENOMEM; goto unlock; } ret = idr_get_new(&bsg_minor_idr, bcd, &minor); if (ret < 0) goto unlock; if (minor >= BSG_MAX_DEVS) { printk(KERN_ERR "bsg: too many bsg devices\n"); ret = -EINVAL; goto remove_idr; } bcd->minor = minor; bcd->queue = q; bcd->parent = get_device(parent); bcd->release = release; kref_init(&bcd->ref); dev = MKDEV(bsg_major, bcd->minor); class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); goto put_dev; } bcd->class_dev = class_dev; if (q->kobj.sd) { ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); if (ret) goto unregister_class_dev; } mutex_unlock(&bsg_mutex); return 0; unregister_class_dev: device_unregister(class_dev); put_dev: put_device(parent); remove_idr: idr_remove(&bsg_minor_idr, minor); unlock: mutex_unlock(&bsg_mutex); return ret; } EXPORT_SYMBOL_GPL(bsg_register_queue); static struct cdev bsg_cdev; static char *bsg_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } static int __init bsg_init(void) { int ret, i; dev_t devid; bsg_cmd_cachep = kmem_cache_create("bsg_cmd", sizeof(struct bsg_command), 0, 0, NULL); if (!bsg_cmd_cachep) { printk(KERN_ERR "bsg: failed creating slab cache\n"); return -ENOMEM; } for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) INIT_HLIST_HEAD(&bsg_device_list[i]); bsg_class = class_create(THIS_MODULE, "bsg"); if (IS_ERR(bsg_class)) { ret = PTR_ERR(bsg_class); goto destroy_kmemcache; } bsg_class->devnode = bsg_devnode; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; bsg_major = MAJOR(devid); cdev_init(&bsg_cdev, &bsg_fops); ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); if (ret) goto unregister_chrdev; printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; unregister_chrdev: unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); destroy_bsg_class: class_destroy(bsg_class); destroy_kmemcache: kmem_cache_destroy(bsg_cmd_cachep); return ret; } MODULE_AUTHOR("Jens Axboe"); MODULE_DESCRIPTION(BSG_DESCRIPTION); MODULE_LICENSE("GPL"); device_initcall(bsg_init);
gpl-2.0
ion-storm/Unleashed-N4
arch/arm/mach-msm/qdsp6/audiov2/mp3.c
1101
4599
/* arch/arm/mach-msm/qdsp6/audiov2/mp3.c * * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation * Copyright (c) 2009, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/msm_audio.h> #include <mach/msm_qdsp6_audiov2.h> #include "dal_audio.h" #include "dal_audio_format.h" #define BUFSZ (8192) #define DMASZ (BUFSZ * 2) struct mp3 { struct mutex lock; struct audio_client *ac; struct msm_audio_config cfg; }; static long mp3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mp3 *mp3 = file->private_data; struct adsp_open_command rpc; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; memset(&stats, 0, sizeof(stats)); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return 0; } mutex_lock(&mp3->lock); switch (cmd) { case AUDIO_SET_VOLUME: break; case AUDIO_START: memset(&rpc, 0, sizeof(rpc)); rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_WRITE; rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_PLAYBACK; rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; rpc.format_block.standard.format = ADSP_AUDIO_FORMAT_MP3; rpc.format_block.standard.channels = mp3->cfg.channel_count; rpc.format_block.standard.bits_per_sample = 16; rpc.format_block.standard.sampling_rate = mp3->cfg.sample_rate; rpc.format_block.standard.is_signed = 1; rpc.format_block.standard.is_interleaved = 0; rpc.buf_max_size = BUFSZ; q6audio_start(mp3->ac, (void *) &rpc, sizeof(rpc)); break; case AUDIO_STOP: break; case AUDIO_FLUSH: break; case AUDIO_SET_CONFIG: if (copy_from_user(&mp3->cfg, (void *) arg, sizeof(struct msm_audio_config))) { rc = -EFAULT; break; } if (mp3->cfg.channel_count < 1 || mp3->cfg.channel_count > 2) { rc = -EINVAL; break; } break; case AUDIO_GET_CONFIG: if (copy_to_user((void *) arg, &mp3->cfg, sizeof(struct msm_audio_config))) { rc = -EFAULT; } break; default: rc = -EINVAL; } mutex_unlock(&mp3->lock); return rc; } static int mp3_open(struct inode *inode, struct file *file) { struct mp3 *mp3; mp3 = kzalloc(sizeof(struct mp3), GFP_KERNEL); if (!mp3) return -ENOMEM; mutex_init(&mp3->lock); file->private_data = mp3; mp3->ac = q6audio_open(AUDIO_FLAG_WRITE, BUFSZ); if (!mp3->ac) { kfree(mp3); return -ENOMEM; } mp3->cfg.channel_count = 2; mp3->cfg.buffer_count = 2; mp3->cfg.buffer_size = BUFSZ; mp3->cfg.unused[0] = 0; mp3->cfg.unused[1] = 0; mp3->cfg.unused[2] = 0; mp3->cfg.sample_rate = 48000; return 0; } static ssize_t mp3_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct mp3 *mp3 = file->private_data; struct audio_client *ac; struct audio_buffer *ab; const char __user *start = buf; int xfer; if (!mp3->ac) mp3_ioctl(file, AUDIO_START, 0); ac = mp3->ac; if (!ac) return -ENODEV; while (count > 0) { ab = ac->buf + ac->cpu_buf; if (ab->used) wait_event(ac->wait, (ab->used == 0)); xfer = count; if (xfer > ab->size) xfer = ab->size; if (copy_from_user(ab->data, buf, xfer)) return -EFAULT; buf += xfer; count -= xfer; ab->used = xfer; q6audio_write(ac, ab); ac->cpu_buf ^= 1; } return buf - start; } static int mp3_fsync(struct file *f, int datasync) { struct mp3 *mp3 = f->private_data; if (mp3->ac) return q6audio_async(mp3->ac); return -ENODEV; } static int mp3_release(struct inode *inode, struct file *file) { struct mp3 *mp3 = file->private_data; if (mp3->ac) q6audio_close(mp3->ac); kfree(mp3); return 0; } static const struct file_operations mp3_fops = { .owner = THIS_MODULE, .open = mp3_open, .write = mp3_write, .fsync = mp3_fsync, .release = mp3_release, .unlocked_ioctl = mp3_ioctl, }; struct miscdevice mp3_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_mp3", .fops = &mp3_fops, }; static int __init mp3_init(void) { return misc_register(&mp3_misc); } device_initcall(mp3_init);
gpl-2.0
glfernando/linux-kernel-ipc
arch/alpha/kernel/core_mcpcia.c
1869
16238
/* * linux/arch/alpha/kernel/core_mcpcia.c * * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). * * Code common to all MCbus-PCI Adaptor core logic chipsets */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_mcpcia.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CFG 0 #if DEBUG_CFG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the MCPCIA_HAXR2 register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static unsigned int conf_read(unsigned long addr, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, value, temp, cpu; cpu = smp_processor_id(); local_irq_save(flags); DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", addr, type1, mid)); /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); temp = *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); mb(); draina(); mcheck_expected(cpu) = 1; mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ value = *((vuip)addr); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; value = 0xffffffffU; mb(); } mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_read(): finished\n")); local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, temp, cpu; cpu = smp_processor_id(); local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); temp = *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); draina(); mcheck_expected(cpu) = 1; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ *((vuip)addr) = value; mb(); mb(); /* magic */ temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_write(): finished\n")); local_irq_restore(flags); } static int mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where, struct pci_controller *hose, unsigned long *pci_addr, unsigned char *type1) { u8 bus = pbus->number; unsigned long addr; DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x," " pci_addr=0x%p, type1=0x%p)\n", bus, devfn, hose->index, where, pci_addr, type1)); /* Type 1 configuration cycle for *ALL* busses. */ *type1 = 1; if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; addr = (bus << 16) | (devfn << 8) | (where); addr <<= 5; /* swizzle for SPARSE */ addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { struct pci_controller *hose = bus->sysdata; unsigned long addr, w; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; w = conf_read(addr, type1, hose); switch (size) { case 1: *value = __kernel_extbl(w, where & 3); break; case 2: *value = __kernel_extwl(w, where & 3); break; case 4: *value = w; break; } return PCIBIOS_SUCCESSFUL; } static int mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct pci_controller *hose = bus->sysdata; unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; value = __kernel_insql(value, where & 3); conf_write(addr, value, type1, hose); return PCIBIOS_SUCCESSFUL; } struct pci_ops mcpcia_pci_ops = { .read = mcpcia_read_config, .write = mcpcia_write_config, }; void mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0; mb(); } static int __init mcpcia_probe_hose(int h) { int cpu = smp_processor_id(); int mid = MCPCIA_HOSE2MID(h); unsigned int pci_rev; /* Gotta be REAL careful. If hose is absent, we get an mcheck. */ mb(); mb(); draina(); wrmces(7); mcheck_expected(cpu) = 2; /* indicates probing */ mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access the bus revision word. */ pci_rev = *(vuip)MCPCIA_REV(mid); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; pci_rev = 0xffffffff; mb(); } mcheck_expected(cpu) = 0; mb(); return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST; } static void __init mcpcia_new_hose(int h) { struct pci_controller *hose; struct resource *io, *mem, *hae_mem; int mid = MCPCIA_HOSE2MID(h); hose = alloc_pci_controller(); if (h == 0) pci_isa_hose = hose; io = alloc_resource(); mem = alloc_resource(); hae_mem = alloc_resource(); hose->io_space = io; hose->mem_space = hae_mem; hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR; hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR; hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR; hose->dense_io_base = 0; hose->config_space_base = MCPCIA_CONF(mid); hose->index = h; io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; io->end = io->start + 0xffff; io->name = pci_io_names[h]; io->flags = IORESOURCE_IO; mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; mem->end = mem->start + 0xffffffff; mem->name = pci_mem_names[h]; mem->flags = IORESOURCE_MEM; hae_mem->start = mem->start; hae_mem->end = mem->start + MCPCIA_MEM_MASK; hae_mem->name = pci_hae0_name; hae_mem->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, io) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", h); if (request_resource(&iomem_resource, mem) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", h); if (request_resource(mem, hae_mem) < 0) printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); } static void mcpcia_pci_clr_err(int mid) { *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */ mb(); *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */ } static void __init mcpcia_startup_hose(struct pci_controller *hose) { int mid = MCPCIA_HOSE2MID(hose->index); unsigned int tmp; mcpcia_pci_clr_err(mid); /* * Set up error reporting. */ tmp = *(vuip)MCPCIA_CAP_ERR(mid); tmp |= 0x0006; /* master/target abort */ *(vuip)MCPCIA_CAP_ERR(mid) = tmp; mb(); tmp = *(vuip)MCPCIA_CAP_ERR(mid); /* * Set up the PCI->physical memory translation windows. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) * Window 2 is direct access 2GB at 2GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_pci = iommu_arena_new(hose, 0x40000000, size_for_memory(0x40000000), 0); __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3; *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000; *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3; *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000; *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1; *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000; *(vuip)MCPCIA_T2_BASE(mid) = 0; *(vuip)MCPCIA_W3_BASE(mid) = 0x0; mcpcia_pci_tbi(hose, 0, -1); *(vuip)MCPCIA_HBASE(mid) = 0x0; mb(); *(vuip)MCPCIA_HAE_MEM(mid) = 0U; mb(); *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */ *(vuip)MCPCIA_HAE_IO(mid) = 0; mb(); *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */ } void __init mcpcia_init_arch(void) { /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Allocate hose 0. That's the one that all the ISA junk hangs off of, from which we'll be registering stuff here in a bit. Other hose detection is done in mcpcia_init_hoses, which is called from init_IRQ. */ mcpcia_new_hose(0); } /* This is called from init_IRQ, since we cannot take interrupts before then. Which means we cannot do this in init_arch. */ void __init mcpcia_init_hoses(void) { struct pci_controller *hose; int hose_count; int h; /* First, find how many hoses we have. */ hose_count = 0; for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { if (mcpcia_probe_hose(h)) { if (h != 0) mcpcia_new_hose(h); hose_count++; } } printk("mcpcia_init_hoses: found %d hoses\n", hose_count); /* Now do init for each hose. */ for (hose = hose_head; hose; hose = hose->next) mcpcia_startup_hose(hose); } static void mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) { struct el_common_EV5_uncorrectable_mcheck *frame; int i; frame = &logout->procdata; /* Print PAL fields */ for (i = 0; i < 24; i += 2) { printk(" paltmp[%d-%d] = %16lx %16lx\n", i, i+1, frame->paltemp[i], frame->paltemp[i+1]); } for (i = 0; i < 8; i += 2) { printk(" shadow[%d-%d] = %16lx %16lx\n", i, i+1, frame->shadow[i], frame->shadow[i+1]); } printk(" Addr of excepting instruction = %16lx\n", frame->exc_addr); printk(" Summary of arithmetic traps = %16lx\n", frame->exc_sum); printk(" Exception mask = %16lx\n", frame->exc_mask); printk(" Base address for PALcode = %16lx\n", frame->pal_base); printk(" Interrupt Status Reg = %16lx\n", frame->isr); printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n", frame->icsr); printk(" I-CACHE Reg %s parity error = %16lx\n", (frame->ic_perr_stat & 0x800L) ? "Data" : "Tag", frame->ic_perr_stat); printk(" D-CACHE error Reg = %16lx\n", frame->dc_perr_stat); if (frame->dc_perr_stat & 0x2) { switch (frame->dc_perr_stat & 0x03c) { case 8: printk(" Data error in bank 1\n"); break; case 4: printk(" Data error in bank 0\n"); break; case 20: printk(" Tag error in bank 1\n"); break; case 10: printk(" Tag error in bank 0\n"); break; } } printk(" Effective VA = %16lx\n", frame->va); printk(" Reason for D-stream = %16lx\n", frame->mm_stat); printk(" EV5 SCache address = %16lx\n", frame->sc_addr); printk(" EV5 SCache TAG/Data parity = %16lx\n", frame->sc_stat); printk(" EV5 BC_TAG_ADDR = %16lx\n", frame->bc_tag_addr); printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n", frame->ei_addr); printk(" Fill Syndrome = %16lx\n", frame->fill_syndrome); printk(" EI_STAT reg = %16lx\n", frame->ei_stat); printk(" LD_LOCK = %16lx\n", frame->ld_lock); } static void mcpcia_print_system_area(unsigned long la_ptr) { struct el_common *frame; struct pci_controller *hose; struct IOD_subpacket { unsigned long base; unsigned int whoami; unsigned int rsvd1; unsigned int pci_rev; unsigned int cap_ctrl; unsigned int hae_mem; unsigned int hae_io; unsigned int int_ctl; unsigned int int_reg; unsigned int int_mask0; unsigned int int_mask1; unsigned int mc_err0; unsigned int mc_err1; unsigned int cap_err; unsigned int rsvd2; unsigned int pci_err1; unsigned int mdpa_stat; unsigned int mdpa_syn; unsigned int mdpb_stat; unsigned int mdpb_syn; unsigned int rsvd3; unsigned int rsvd4; unsigned int rsvd5; } *iodpp; frame = (struct el_common *)la_ptr; iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset); for (hose = hose_head; hose; hose = hose->next, iodpp++) { printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n", hose->index, iodpp->base); printk(" WHOAMI = %8x\n", iodpp->whoami); printk(" PCI_REV = %8x\n", iodpp->pci_rev); printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl); printk(" HAE_MEM = %8x\n", iodpp->hae_mem); printk(" HAE_IO = %8x\n", iodpp->hae_io); printk(" INT_CTL = %8x\n", iodpp->int_ctl); printk(" INT_REG = %8x\n", iodpp->int_reg); printk(" INT_MASK0 = %8x\n", iodpp->int_mask0); printk(" INT_MASK1 = %8x\n", iodpp->int_mask1); printk(" MC_ERR0 = %8x\n", iodpp->mc_err0); printk(" MC_ERR1 = %8x\n", iodpp->mc_err1); printk(" CAP_ERR = %8x\n", iodpp->cap_err); printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1); printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat); printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn); printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat); printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn); } } void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header; struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); int expected; mchk_header = (struct el_common *)la_ptr; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; expected = mcheck_expected(cpu); mb(); mb(); /* magic */ draina(); switch (expected) { case 0: { /* FIXME: how do we figure out which hose the error was on? */ struct pci_controller *hose; for (hose = hose_head; hose; hose = hose->next) mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index)); break; } case 1: mcpcia_pci_clr_err(mcheck_extra(cpu)); break; default: /* Otherwise, we're being called from mcpcia_probe_hose and there's no hose clear an error from. */ break; } wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "MCPCIA", expected != 0); if (!expected && vector != 0x620 && vector != 0x630) { mcpcia_print_uncorrectable(mchk_logout); mcpcia_print_system_area(la_ptr); } }
gpl-2.0
boa19861105/android_kernel_htc_b3uhl-JP
net/netfilter/nf_conntrack_labels.c
2125
2543
/* * test/set flag bits stored in conntrack extension area. * * (C) 2013 Astaro GmbH & Co KG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ctype.h> #include <linux/export.h> #include <linux/jhash.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/slab.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_labels.h> static unsigned int label_bits(const struct nf_conn_labels *l) { unsigned int longs = l->words; return longs * BITS_PER_LONG; } bool nf_connlabel_match(const struct nf_conn *ct, u16 bit) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); if (!labels) return false; return bit < label_bits(labels) && test_bit(bit, labels->bits); } EXPORT_SYMBOL_GPL(nf_connlabel_match); int nf_connlabel_set(struct nf_conn *ct, u16 bit) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); if (!labels || bit >= label_bits(labels)) return -ENOSPC; if (test_bit(bit, labels->bits)) return 0; if (!test_and_set_bit(bit, labels->bits)) nf_conntrack_event_cache(IPCT_LABEL, ct); return 0; } EXPORT_SYMBOL_GPL(nf_connlabel_set); #if IS_ENABLED(CONFIG_NF_CT_NETLINK) static void replace_u32(u32 *address, u32 mask, u32 new) { u32 old, tmp; do { old = *address; tmp = (old & mask) ^ new; } while (cmpxchg(address, old, tmp) != old); } int nf_connlabels_replace(struct nf_conn *ct, const u32 *data, const u32 *mask, unsigned int words32) { struct nf_conn_labels *labels; unsigned int size, i; u32 *dst; labels = nf_ct_labels_find(ct); if (!labels) return -ENOSPC; size = labels->words * sizeof(long); if (size < (words32 * sizeof(u32))) words32 = size / sizeof(u32); dst = (u32 *) labels->bits; if (words32) { for (i = 0; i < words32; i++) replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]); } size /= sizeof(u32); for (i = words32; i < size; i++) /* pad */ replace_u32(&dst[i], 0, 0); nf_conntrack_event_cache(IPCT_LABEL, ct); return 0; } EXPORT_SYMBOL_GPL(nf_connlabels_replace); #endif static struct nf_ct_ext_type labels_extend __read_mostly = { .len = sizeof(struct nf_conn_labels), .align = __alignof__(struct nf_conn_labels), .id = NF_CT_EXT_LABELS, }; int nf_conntrack_labels_init(void) { return nf_ct_extend_register(&labels_extend); } void nf_conntrack_labels_fini(void) { nf_ct_extend_unregister(&labels_extend); }
gpl-2.0
ElysiumRom/android_kernel_samsung_msm8660-common
drivers/usb/musb/musb_gadget_ep0.c
2125
27667
/* * MUSB OTG peripheral driver ep0 handling * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include "musb_core.h" /* ep0 is always musb->endpoints[0].ep_in */ #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) /* * locking note: we use only the controller lock, for simpler correctness. * It's always held with IRQs blocked. * * It protects the ep0 request queue as well as ep0_state, not just the * controller and indexed registers. And that lock stays held unless it * needs to be dropped to allow reentering this driver ... like upcalls to * the gadget driver, or adjusting endpoint halt status. */ static char *decode_ep0stage(u8 stage) { switch (stage) { case MUSB_EP0_STAGE_IDLE: return "idle"; case MUSB_EP0_STAGE_SETUP: return "setup"; case MUSB_EP0_STAGE_TX: return "in"; case MUSB_EP0_STAGE_RX: return "out"; case MUSB_EP0_STAGE_ACKWAIT: return "wait"; case MUSB_EP0_STAGE_STATUSIN: return "in/status"; case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; default: return "?"; } } /* handle a standard GET_STATUS request * Context: caller holds controller lock */ static int service_tx_status_request( struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { void __iomem *mbase = musb->mregs; int handled = 1; u8 result[2], epnum = 0; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; result[1] = 0; switch (recip) { case USB_RECIP_DEVICE: result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; #ifdef CONFIG_USB_MUSB_OTG if (musb->g.is_otg) { result[0] |= musb->g.b_hnp_enable << USB_DEVICE_B_HNP_ENABLE; result[0] |= musb->g.a_alt_hnp_support << USB_DEVICE_A_ALT_HNP_SUPPORT; result[0] |= musb->g.a_hnp_support << USB_DEVICE_A_HNP_SUPPORT; } #endif break; case USB_RECIP_INTERFACE: result[0] = 0; break; case USB_RECIP_ENDPOINT: { int is_in; struct musb_ep *ep; u16 tmp; void __iomem *regs; epnum = (u8) ctrlrequest->wIndex; if (!epnum) { result[0] = 0; break; } is_in = epnum & USB_DIR_IN; if (is_in) { epnum &= 0x0f; ep = &musb->endpoints[epnum].ep_in; } else { ep = &musb->endpoints[epnum].ep_out; } regs = musb->endpoints[epnum].regs; if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { handled = -EINVAL; break; } musb_ep_select(mbase, epnum); if (is_in) tmp = musb_readw(regs, MUSB_TXCSR) & MUSB_TXCSR_P_SENDSTALL; else tmp = musb_readw(regs, MUSB_RXCSR) & MUSB_RXCSR_P_SENDSTALL; musb_ep_select(mbase, 0); result[0] = tmp ? 1 : 0; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } /* fill up the fifo; caller updates csr0 */ if (handled > 0) { u16 len = le16_to_cpu(ctrlrequest->wLength); if (len > 2) len = 2; musb_write_fifo(&musb->endpoints[0], len, result); } return handled; } /* * handle a control-IN request, the end0 buffer contains the current request * that is supposed to be a standard control request. Assumes the fifo to * be at least 2 bytes long. * * @return 0 if the request was NOT HANDLED, * < 0 when error * > 0 when the request is processed * * Context: caller holds controller lock */ static int service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) { int handled = 0; /* not handled */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_GET_STATUS: handled = service_tx_status_request(musb, ctrlrequest); break; /* case USB_REQ_SYNC_FRAME: */ default: break; } } return handled; } /* * Context: caller holds controller lock */ static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) { musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); } /* * Tries to start B-device HNP negotiation if enabled via sysfs */ static inline void musb_try_b_hnp_enable(struct musb *musb) { void __iomem *mbase = musb->mregs; u8 devctl; dev_dbg(musb->controller, "HNP: Setting HR\n"); devctl = musb_readb(mbase, MUSB_DEVCTL); musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); } /* * Handle all control requests with no DATA stage, including standard * requests such as: * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized * always delegated to the gadget driver * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE * always handled here, except for class/vendor/... features * * Context: caller holds controller lock */ static int service_zero_data_request(struct musb *musb, struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int handled = -EINVAL; void __iomem *mbase = musb->mregs; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; /* the gadget driver handles everything except what we MUST handle */ if ((ctrlrequest->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrlrequest->bRequest) { case USB_REQ_SET_ADDRESS: /* change it after the status stage */ musb->set_address = true; musb->address = (u8) (ctrlrequest->wValue & 0x7f); handled = 1; break; case USB_REQ_CLEAR_FEATURE: switch (recip) { case USB_RECIP_DEVICE: if (ctrlrequest->wValue != USB_DEVICE_REMOTE_WAKEUP) break; musb->may_wakeup = 0; handled = 1; break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; struct musb_request *request; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; handled = 1; /* Ignore request if endpoint is wedged */ if (musb_ep->wedged) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); csr |= MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL | MUSB_TXCSR_TXPKTRDY); musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL); musb_writew(regs, MUSB_RXCSR, csr); } /* Maybe start the first request in the queue */ request = next_request(musb_ep); if (!musb_ep->busy && request) { dev_dbg(musb->controller, "restarting the request\n"); musb_ep_restart(musb, request); } /* select ep0 again */ musb_ep_select(mbase, 0); } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; case USB_REQ_SET_FEATURE: switch (recip) { case USB_RECIP_DEVICE: handled = 1; switch (ctrlrequest->wValue) { case USB_DEVICE_REMOTE_WAKEUP: musb->may_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (musb->g.speed != USB_SPEED_HIGH) goto stall; if (ctrlrequest->wIndex & 0xff) goto stall; switch (ctrlrequest->wIndex >> 8) { case 1: pr_debug("TEST_J\n"); /* TEST_J */ musb->test_mode_nr = MUSB_TEST_J; break; case 2: /* TEST_K */ pr_debug("TEST_K\n"); musb->test_mode_nr = MUSB_TEST_K; break; case 3: /* TEST_SE0_NAK */ pr_debug("TEST_SE0_NAK\n"); musb->test_mode_nr = MUSB_TEST_SE0_NAK; break; case 4: /* TEST_PACKET */ pr_debug("TEST_PACKET\n"); musb->test_mode_nr = MUSB_TEST_PACKET; break; case 0xc0: /* TEST_FORCE_HS */ pr_debug("TEST_FORCE_HS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HS; break; case 0xc1: /* TEST_FORCE_FS */ pr_debug("TEST_FORCE_FS\n"); musb->test_mode_nr = MUSB_TEST_FORCE_FS; break; case 0xc2: /* TEST_FIFO_ACCESS */ pr_debug("TEST_FIFO_ACCESS\n"); musb->test_mode_nr = MUSB_TEST_FIFO_ACCESS; break; case 0xc3: /* TEST_FORCE_HOST */ pr_debug("TEST_FORCE_HOST\n"); musb->test_mode_nr = MUSB_TEST_FORCE_HOST; break; default: goto stall; } /* enter test mode after irq */ if (handled > 0) musb->test_mode = true; break; #ifdef CONFIG_USB_MUSB_OTG case USB_DEVICE_B_HNP_ENABLE: if (!musb->g.is_otg) goto stall; musb->g.b_hnp_enable = 1; musb_try_b_hnp_enable(musb); break; case USB_DEVICE_A_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_hnp_support = 1; break; case USB_DEVICE_A_ALT_HNP_SUPPORT: if (!musb->g.is_otg) goto stall; musb->g.a_alt_hnp_support = 1; break; #endif case USB_DEVICE_DEBUG_MODE: handled = 0; break; stall: default: handled = -EINVAL; break; } break; case USB_RECIP_INTERFACE: break; case USB_RECIP_ENDPOINT:{ const u8 epnum = ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; void __iomem *regs; int is_in; u16 csr; if (epnum == 0 || epnum >= MUSB_C_NUM_EPS || ctrlrequest->wValue != USB_ENDPOINT_HALT) break; ep = musb->endpoints + epnum; regs = ep->regs; is_in = ctrlrequest->wIndex & USB_DIR_IN; if (is_in) musb_ep = &ep->ep_in; else musb_ep = &ep->ep_out; if (!musb_ep->desc) break; musb_ep_select(mbase, epnum); if (is_in) { csr = musb_readw(regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) csr |= MUSB_TXCSR_FLUSHFIFO; csr |= MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_CLRDATATOG | MUSB_TXCSR_P_WZC_BITS; musb_writew(regs, MUSB_TXCSR, csr); } else { csr = musb_readw(regs, MUSB_RXCSR); csr |= MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_P_WZC_BITS; musb_writew(regs, MUSB_RXCSR, csr); } /* select ep0 again */ musb_ep_select(mbase, 0); handled = 1; } break; default: /* class, vendor, etc ... delegate */ handled = 0; break; } break; default: /* delegate SET_CONFIGURATION, etc */ handled = 0; } } else handled = 0; return handled; } /* we have an ep0out data packet * Context: caller holds controller lock */ static void ep0_rxstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *request; struct usb_request *req; u16 count, csr; request = next_ep0_request(musb); req = &request->request; /* read packet and ack; or stall because of gadget driver bug: * should have provided the rx buffer before setup() returned. */ if (req) { void *buf = req->buf + req->actual; unsigned len = req->length - req->actual; /* read the buffer */ count = musb_readb(regs, MUSB_COUNT0); if (count > len) { req->status = -EOVERFLOW; count = len; } musb_read_fifo(&musb->endpoints[0], count, buf); req->actual += count; csr = MUSB_CSR0_P_SVDRXPKTRDY; if (count < 64 || req->actual == req->length) { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; csr |= MUSB_CSR0_P_DATAEND; } else req = NULL; } else csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; /* Completion handler may choose to stall, e.g. because the * message just received holds invalid data. */ if (req) { musb->ackpend = csr; musb_g_ep0_giveback(musb, req); if (!musb->ackpend) return; musb->ackpend = 0; } musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * transmitting to the host (IN), this code might be called from IRQ * and from kernel thread. * * Context: caller holds controller lock */ static void ep0_txstate(struct musb *musb) { void __iomem *regs = musb->control_ep->regs; struct musb_request *req = next_ep0_request(musb); struct usb_request *request; u16 csr = MUSB_CSR0_TXPKTRDY; u8 *fifo_src; u8 fifo_count; if (!req) { /* WARN_ON(1); */ dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); return; } request = &req->request; /* load the data */ fifo_src = (u8 *) request->buf + request->actual; fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, request->length - request->actual); musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); request->actual += fifo_count; /* update the flags */ if (fifo_count < MUSB_MAX_END0_PACKET || (request->actual == request->length && !request->zero)) { musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; csr |= MUSB_CSR0_P_DATAEND; } else request = NULL; /* report completions as soon as the fifo's loaded; there's no * win in waiting till this last packet gets acked. (other than * very precise fault reporting, needed by USB TMC; possible with * this hardware, but not usable from portable gadget drivers.) */ if (request) { musb->ackpend = csr; musb_g_ep0_giveback(musb, request); if (!musb->ackpend) return; musb->ackpend = 0; } /* send it out, triggering a "txpktrdy cleared" irq */ musb_ep_select(musb->mregs, 0); musb_writew(regs, MUSB_CSR0, csr); } /* * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. * Fields are left in USB byte-order. * * Context: caller holds controller lock. */ static void musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) { struct musb_request *r; void __iomem *regs = musb->control_ep->regs; musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); /* NOTE: earlier 2.6 versions changed setup packets to host * order, but now USB packets always stay in USB byte order. */ dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); /* clean up any leftover transfers */ r = next_ep0_request(musb); if (r) musb_g_ep0_giveback(musb, &r->request); /* For zero-data requests we want to delay the STATUS stage to * avoid SETUPEND errors. If we read data (OUT), delay accepting * packets until there's a buffer to store them in. * * If we write data, the controller acts happier if we enable * the TX FIFO right away, and give the controller a moment * to switch modes... */ musb->set_address = false; musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; if (req->wLength == 0) { if (req->bRequestType & USB_DIR_IN) musb->ackpend |= MUSB_CSR0_TXPKTRDY; musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; } else if (req->bRequestType & USB_DIR_IN) { musb->ep0_state = MUSB_EP0_STAGE_TX; musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); while ((musb_readw(regs, MUSB_CSR0) & MUSB_CSR0_RXPKTRDY) != 0) cpu_relax(); musb->ackpend = 0; } else musb->ep0_state = MUSB_EP0_STAGE_RX; } static int forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) __releases(musb->lock) __acquires(musb->lock) { int retval; if (!musb->gadget_driver) return -EOPNOTSUPP; spin_unlock(&musb->lock); retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); spin_lock(&musb->lock); return retval; } /* * Handle peripheral ep0 interrupt * * Context: irq handler; we won't re-enter the driver that way. */ irqreturn_t musb_g_ep0_irq(struct musb *musb) { u16 csr; u16 len; void __iomem *mbase = musb->mregs; void __iomem *regs = musb->endpoints[0].regs; irqreturn_t retval = IRQ_NONE; musb_ep_select(mbase, 0); /* select ep0 */ csr = musb_readw(regs, MUSB_CSR0); len = musb_readb(regs, MUSB_COUNT0); dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n", csr, len, musb_readb(mbase, MUSB_FADDR), decode_ep0stage(musb->ep0_state)); /* I sent a stall.. need to acknowledge it now.. */ if (csr & MUSB_CSR0_P_SENTSTALL) { musb_writew(regs, MUSB_CSR0, csr & ~MUSB_CSR0_P_SENTSTALL); retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; csr = musb_readw(regs, MUSB_CSR0); } /* request ended "early" */ if (csr & MUSB_CSR0_P_SETUPEND) { musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); retval = IRQ_HANDLED; /* Transition into the early status phase */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; break; case MUSB_EP0_STAGE_RX: musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; default: ERR("SetupEnd came in a wrong ep0stage %s\n", decode_ep0stage(musb->ep0_state)); } csr = musb_readw(regs, MUSB_CSR0); /* NOTE: request may need completion */ } /* docs from Mentor only describe tx, rx, and idle/setup states. * we need to handle nuances around status stages, and also the * case where status and setup stages come back-to-back ... */ switch (musb->ep0_state) { case MUSB_EP0_STAGE_TX: /* irq on clearing txpktrdy */ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { ep0_txstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_RX: /* irq on set rxpktrdy */ if (csr & MUSB_CSR0_RXPKTRDY) { ep0_rxstate(musb); retval = IRQ_HANDLED; } break; case MUSB_EP0_STAGE_STATUSIN: /* end of sequence #2 (OUT/RX state) or #3 (no data) */ /* update address (if needed) only @ the end of the * status phase per usb spec, which also guarantees * we get 10 msec to receive this irq... until this * is done we won't see the next packet. */ if (musb->set_address) { musb->set_address = false; musb_writeb(mbase, MUSB_FADDR, musb->address); } /* enter test mode if needed (exit by reset) */ else if (musb->test_mode) { dev_dbg(musb->controller, "entering TESTMODE\n"); if (MUSB_TEST_PACKET == musb->test_mode_nr) musb_load_testpacket(musb); musb_writeb(mbase, MUSB_TESTMODE, musb->test_mode_nr); } /* FALLTHROUGH */ case MUSB_EP0_STAGE_STATUSOUT: /* end of sequence #1: write to host (TX state) */ { struct musb_request *req; req = next_ep0_request(musb); if (req) musb_g_ep0_giveback(musb, &req->request); } /* * In case when several interrupts can get coalesced, * check to see if we've already received a SETUP packet... */ if (csr & MUSB_CSR0_RXPKTRDY) goto setup; retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; case MUSB_EP0_STAGE_IDLE: /* * This state is typically (but not always) indiscernible * from the status states since the corresponding interrupts * tend to happen within too little period of time (with only * a zero-length packet in between) and so get coalesced... */ retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_SETUP; /* FALLTHROUGH */ case MUSB_EP0_STAGE_SETUP: setup: if (csr & MUSB_CSR0_RXPKTRDY) { struct usb_ctrlrequest setup; int handled = 0; if (len != 8) { ERR("SETUP packet len %d != 8 ?\n", len); break; } musb_read_setup(musb, &setup); retval = IRQ_HANDLED; /* sometimes the RESET won't be reported */ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { u8 power; printk(KERN_NOTICE "%s: peripheral reset " "irq lost!\n", musb_driver_name); power = musb_readb(mbase, MUSB_POWER); musb->g.speed = (power & MUSB_POWER_HSMODE) ? USB_SPEED_HIGH : USB_SPEED_FULL; } switch (musb->ep0_state) { /* sequence #3 (no data stage), includes requests * we can't forward (notably SET_ADDRESS and the * device/endpoint feature set/clear operations) * plus SET_CONFIGURATION and others we must */ case MUSB_EP0_STAGE_ACKWAIT: handled = service_zero_data_request( musb, &setup); /* * We're expecting no data in any case, so * always set the DATAEND bit -- doing this * here helps avoid SetupEnd interrupt coming * in the idle stage when we're stalling... */ musb->ackpend |= MUSB_CSR0_P_DATAEND; /* status stage might be immediate */ if (handled > 0) musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; break; /* sequence #1 (IN to host), includes GET_STATUS * requests that we can't forward, GET_DESCRIPTOR * and others that we must */ case MUSB_EP0_STAGE_TX: handled = service_in_request(musb, &setup); if (handled > 0) { musb->ackpend = MUSB_CSR0_TXPKTRDY | MUSB_CSR0_P_DATAEND; musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; } break; /* sequence #2 (OUT from host), always forward */ default: /* MUSB_EP0_STAGE_RX */ break; } dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n", handled, csr, decode_ep0stage(musb->ep0_state)); /* unless we need to delegate this to the gadget * driver, we know how to wrap this up: csr0 has * not yet been written. */ if (handled < 0) goto stall; else if (handled > 0) goto finish; handled = forward_to_driver(musb, &setup); if (handled < 0) { musb_ep_select(mbase, 0); stall: dev_dbg(musb->controller, "stall (%d)\n", handled); musb->ackpend |= MUSB_CSR0_P_SENDSTALL; musb->ep0_state = MUSB_EP0_STAGE_IDLE; finish: musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } } break; case MUSB_EP0_STAGE_ACKWAIT: /* This should not happen. But happens with tusb6010 with * g_file_storage and high speed. Do nothing. */ retval = IRQ_HANDLED; break; default: /* "can't happen" */ WARN_ON(1); musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); musb->ep0_state = MUSB_EP0_STAGE_IDLE; break; } return retval; } static int musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_disable(struct usb_ep *e) { /* always enabled */ return -EINVAL; } static int musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) { struct musb_ep *ep; struct musb_request *req; struct musb *musb; int status; unsigned long lockflags; void __iomem *regs; if (!e || !r) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; regs = musb->control_ep->regs; req = to_musb_request(r); req->musb = musb; req->request.actual = 0; req->request.status = -EINPROGRESS; req->tx = ep->is_in; spin_lock_irqsave(&musb->lock, lockflags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } switch (musb->ep0_state) { case MUSB_EP0_STAGE_RX: /* control-OUT data */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ status = 0; break; default: dev_dbg(musb->controller, "ep0 request queued in state %d\n", musb->ep0_state); status = -EINVAL; goto cleanup; } /* add request to the list */ list_add_tail(&req->list, &ep->req_list); dev_dbg(musb->controller, "queue to %s (%s), length=%d\n", ep->name, ep->is_in ? "IN/TX" : "OUT/RX", req->request.length); musb_ep_select(musb->mregs, 0); /* sequence #1, IN ... start writing the data */ if (musb->ep0_state == MUSB_EP0_STAGE_TX) ep0_txstate(musb); /* sequence #3, no-data ... issue IN status */ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { if (req->request.length) status = -EINVAL; else { musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; musb_writew(regs, MUSB_CSR0, musb->ackpend | MUSB_CSR0_P_DATAEND); musb->ackpend = 0; musb_g_ep0_giveback(ep->musb, r); } /* else for sequence #2 (OUT), caller provides a buffer * before the next packet arrives. deferred responses * (after SETUP is acked) are racey. */ } else if (musb->ackpend) { musb_writew(regs, MUSB_CSR0, musb->ackpend); musb->ackpend = 0; } cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) { /* we just won't support this */ return -EINVAL; } static int musb_g_ep0_halt(struct usb_ep *e, int value) { struct musb_ep *ep; struct musb *musb; void __iomem *base, *regs; unsigned long flags; int status; u16 csr; if (!e || !value) return -EINVAL; ep = to_musb_ep(e); musb = ep->musb; base = musb->mregs; regs = musb->control_ep->regs; status = 0; spin_lock_irqsave(&musb->lock, flags); if (!list_empty(&ep->req_list)) { status = -EBUSY; goto cleanup; } musb_ep_select(base, 0); csr = musb->ackpend; switch (musb->ep0_state) { /* Stalls are usually issued after parsing SETUP packet, either * directly in irq context from setup() or else later. */ case MUSB_EP0_STAGE_TX: /* control-IN data */ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ case MUSB_EP0_STAGE_RX: /* control-OUT data */ csr = musb_readw(regs, MUSB_CSR0); /* FALLTHROUGH */ /* It's also OK to issue stalls during callbacks when a non-empty * DATA stage buffer has been read (or even written). */ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ csr |= MUSB_CSR0_P_SENDSTALL; musb_writew(regs, MUSB_CSR0, csr); musb->ep0_state = MUSB_EP0_STAGE_IDLE; musb->ackpend = 0; break; default: dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state); status = -EINVAL; } cleanup: spin_unlock_irqrestore(&musb->lock, flags); return status; } const struct usb_ep_ops musb_g_ep0_ops = { .enable = musb_g_ep0_enable, .disable = musb_g_ep0_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, .queue = musb_g_ep0_queue, .dequeue = musb_g_ep0_dequeue, .set_halt = musb_g_ep0_halt, };
gpl-2.0
cxgbit/cxgbit
drivers/media/usb/gspca/sq905c.c
2125
9825
/* * SQ905C subdriver * * Copyright (C) 2009 Theodore Kilgore * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * * This driver uses work done in * libgphoto2/camlibs/digigr8, Copyright (C) Theodore Kilgore. * * This driver has also used as a base the sq905c driver * and may contain code fragments from it. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905c" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905C USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905C_CMD_TIMEOUT 500 #define SQ905C_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905C_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 0x50 /* Commands. These go in the "value" slot. */ #define SQ905C_CLEAR 0xa0 /* clear everything */ #define SQ905C_GET_ID 0x14f4 /* Read version number */ #define SQ905C_CAPTURE_LOW 0xa040 /* Starts capture at 160x120 */ #define SQ905C_CAPTURE_MED 0x1440 /* Starts capture at 320x240 */ #define SQ905C_CAPTURE_HI 0x2840 /* Starts capture at 320x240 */ /* For capture, this must go in the "index" slot. */ #define SQ905C_CAPTURE_INDEX 0x110f /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ const struct v4l2_pix_format *cap_mode; /* Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; /* * Most of these cameras will do 640x480 and 320x240. 160x120 works * in theory but gives very poor output. Therefore, not supported. * The 0x2770:0x9050 cameras have max resolution of 320x240. */ static struct v4l2_pix_format sq905c_mode[] = { { 320, 240, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* Send a command to the camera. */ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, NULL, 0, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index, int size) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, gspca_dev->usb_buf, size, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905c_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int act_len; int packet_type; int ret; u8 *buffer; buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL | GFP_DMA); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* Request the header, which tells the size to download */ ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, FRAME_HEADER_LEN, &act_len, SQ905C_DATA_TIMEOUT); PDEBUG(D_STREAM, "Got %d bytes out of %d for header", act_len, FRAME_HEADER_LEN); if (ret < 0 || act_len < FRAME_HEADER_LEN) goto quit_stream; /* size is read from 4 bytes starting 0x40, little endian */ bytes_left = buffer[0x40]|(buffer[0x41]<<8)|(buffer[0x42]<<16) |(buffer[0x43]<<24); PDEBUG(D_STREAM, "bytes_left = 0x%x", bytes_left); /* We keep the header. It has other information, too. */ packet_type = FIRST_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, FRAME_HEADER_LEN); while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905C_MAX_TRANSFER ? SQ905C_MAX_TRANSFER : bytes_left; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, data_len, &act_len, SQ905C_DATA_TIMEOUT); if (ret < 0 || act_len < data_len) goto quit_stream; PDEBUG(D_STREAM, "Got %d bytes out of %d for frame", data_len, bytes_left); bytes_left -= data_len; if (bytes_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, data_len); } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905c_command(gspca_dev, SQ905C_CLEAR, 0); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; int ret; PDEBUG(D_PROBE, "SQ9050 camera detected" " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0); if (ret < 0) { PERR("Get version command failed"); return ret; } ret = sq905c_read(gspca_dev, 0xf5, 0, 20); if (ret < 0) { PERR("Reading version command failed"); return ret; } /* Note we leave out the usb id and the manufacturing date */ PDEBUG(D_PROBE, "SQ9050 ID string: %02x - %*ph", gspca_dev->usb_buf[3], 6, gspca_dev->usb_buf + 14); cam->cam_mode = sq905c_mode; cam->nmodes = 2; if (gspca_dev->usb_buf[15] == 0) cam->nmodes = 1; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk_size = 32; cam->bulk = 1; INIT_WORK(&dev->work_struct, sq905c_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905c_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { int ret; /* connect to the camera and reset it. */ ret = sq905c_command(gspca_dev, SQ905C_CLEAR, 0); return ret; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; dev->cap_mode = gspca_dev->cam.cam_mode; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->pixfmt.width) { case 640: PDEBUG(D_STREAM, "Start streaming at high resolution"); dev->cap_mode++; ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_HI, SQ905C_CAPTURE_INDEX); break; default: /* 320 */ PDEBUG(D_STREAM, "Start streaming at medium resolution"); ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_MED, SQ905C_CAPTURE_INDEX); } if (ret < 0) { PERR("Start streaming command failed"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x905c)}, {USB_DEVICE(0x2770, 0x9050)}, {USB_DEVICE(0x2770, 0x9051)}, {USB_DEVICE(0x2770, 0x9052)}, {USB_DEVICE(0x2770, 0x913d)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
flaming-toast/linux-jeyu
fs/reiserfs/hashes.c
2637
3609
/* * Keyed 32-bit hash function using TEA in a Davis-Meyer function * H0 = Key * Hi = E Mi(Hi-1) + Hi-1 * * (see Applied Cryptography, 2nd edition, p448). * * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998 * * Jeremy has agreed to the contents of reiserfs/README. -Hans * Yura's function is added (04/07/2000) */ #include <linux/kernel.h> #include "reiserfs.h" #include <asm/types.h> #define DELTA 0x9E3779B9 #define FULLROUNDS 10 /* 32 is overkill, 16 is strong crypto */ #define PARTROUNDS 6 /* 6 gets complete mixing */ /* a, b, c, d - data; h0, h1 - accumulated hash */ #define TEACORE(rounds) \ do { \ u32 sum = 0; \ int n = rounds; \ u32 b0, b1; \ \ b0 = h0; \ b1 = h1; \ \ do \ { \ sum += DELTA; \ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ } while(--n); \ \ h0 += b0; \ h1 += b1; \ } while(0) u32 keyed_hash(const signed char *msg, int len) { u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3 }; u32 h0 = k[0], h1 = k[1]; u32 a, b, c, d; u32 pad; int i; /* assert(len >= 0 && len < 256); */ pad = (u32) len | ((u32) len << 8); pad |= pad << 16; while (len >= 16) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = (u32) msg[8] | (u32) msg[9] << 8 | (u32) msg[10] << 16 | (u32) msg[11] << 24; d = (u32) msg[12] | (u32) msg[13] << 8 | (u32) msg[14] << 16 | (u32) msg[15] << 24; TEACORE(PARTROUNDS); len -= 16; msg += 16; } if (len >= 12) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = (u32) msg[8] | (u32) msg[9] << 8 | (u32) msg[10] << 16 | (u32) msg[11] << 24; d = pad; for (i = 12; i < len; i++) { d <<= 8; d |= msg[i]; } } else if (len >= 8) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = d = pad; for (i = 8; i < len; i++) { c <<= 8; c |= msg[i]; } } else if (len >= 4) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = c = d = pad; for (i = 4; i < len; i++) { b <<= 8; b |= msg[i]; } } else { a = b = c = d = pad; for (i = 0; i < len; i++) { a <<= 8; a |= msg[i]; } } TEACORE(FULLROUNDS); /* return 0;*/ return h0 ^ h1; } /* * What follows in this file is copyright 2000 by Hans Reiser, and the * licensing of what follows is governed by reiserfs/README */ u32 yura_hash(const signed char *msg, int len) { int j, pow; u32 a, c; int i; for (pow = 1, i = 1; i < len; i++) pow = pow * 10; if (len == 1) a = msg[0] - 48; else a = (msg[0] - 48) * pow; for (i = 1; i < len; i++) { c = msg[i] - 48; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } for (; i < 40; i++) { c = '0' - 48; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } for (; i < 256; i++) { c = i; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } a = a << 7; return a; } u32 r5_hash(const signed char *msg, int len) { u32 a = 0; while (*msg) { a += *msg << 4; a += *msg >> 4; a *= 11; msg++; } return a; }
gpl-2.0
souljaboy11792/linux
drivers/usb/otg/nop-usb-xceiv.c
3149
4065
/* * drivers/usb/otg/nop-usb-xceiv.c * * NOP USB transceiver for all USB transceiver which are either built-in * into USB IP or which are mostly autonomous. * * Copyright (C) 2009 Texas Instruments Inc * Author: Ajay Kumar Gupta <ajay.gupta@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Current status: * This provides a "nop" transceiver for PHYs which are * autonomous such as isp1504, isp1707, etc. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/usb/otg.h> #include <linux/slab.h> struct nop_usb_xceiv { struct otg_transceiver otg; struct device *dev; }; static struct platform_device *pd; void usb_nop_xceiv_register(void) { if (pd) return; pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0); if (!pd) { printk(KERN_ERR "Unable to register usb nop transceiver\n"); return; } } EXPORT_SYMBOL(usb_nop_xceiv_register); void usb_nop_xceiv_unregister(void) { platform_device_unregister(pd); pd = NULL; } EXPORT_SYMBOL(usb_nop_xceiv_unregister); static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x) { return container_of(x, struct nop_usb_xceiv, otg); } static int nop_set_suspend(struct otg_transceiver *x, int suspend) { return 0; } static int nop_set_peripheral(struct otg_transceiver *x, struct usb_gadget *gadget) { struct nop_usb_xceiv *nop; if (!x) return -ENODEV; nop = xceiv_to_nop(x); if (!gadget) { nop->otg.gadget = NULL; return -ENODEV; } nop->otg.gadget = gadget; nop->otg.state = OTG_STATE_B_IDLE; return 0; } static int nop_set_host(struct otg_transceiver *x, struct usb_bus *host) { struct nop_usb_xceiv *nop; if (!x) return -ENODEV; nop = xceiv_to_nop(x); if (!host) { nop->otg.host = NULL; return -ENODEV; } nop->otg.host = host; return 0; } static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev) { struct nop_usb_xceiv *nop; int err; nop = kzalloc(sizeof *nop, GFP_KERNEL); if (!nop) return -ENOMEM; nop->dev = &pdev->dev; nop->otg.dev = nop->dev; nop->otg.label = "nop-xceiv"; nop->otg.state = OTG_STATE_UNDEFINED; nop->otg.set_host = nop_set_host; nop->otg.set_peripheral = nop_set_peripheral; nop->otg.set_suspend = nop_set_suspend; err = otg_set_transceiver(&nop->otg); if (err) { dev_err(&pdev->dev, "can't register transceiver, err: %d\n", err); goto exit; } platform_set_drvdata(pdev, nop); ATOMIC_INIT_NOTIFIER_HEAD(&nop->otg.notifier); return 0; exit: kfree(nop); return err; } static int __devexit nop_usb_xceiv_remove(struct platform_device *pdev) { struct nop_usb_xceiv *nop = platform_get_drvdata(pdev); otg_set_transceiver(NULL); platform_set_drvdata(pdev, NULL); kfree(nop); return 0; } static struct platform_driver nop_usb_xceiv_driver = { .probe = nop_usb_xceiv_probe, .remove = __devexit_p(nop_usb_xceiv_remove), .driver = { .name = "nop_usb_xceiv", .owner = THIS_MODULE, }, }; static int __init nop_usb_xceiv_init(void) { return platform_driver_register(&nop_usb_xceiv_driver); } subsys_initcall(nop_usb_xceiv_init); static void __exit nop_usb_xceiv_exit(void) { platform_driver_unregister(&nop_usb_xceiv_driver); } module_exit(nop_usb_xceiv_exit); MODULE_ALIAS("platform:nop_usb_xceiv"); MODULE_AUTHOR("Texas Instruments Inc"); MODULE_DESCRIPTION("NOP USB Transceiver driver"); MODULE_LICENSE("GPL");
gpl-2.0
smaeul/kernel_samsung_aries
net/irda/irsysctl.c
3661
7269
/********************************************************************* * * Filename: irsysctl.c * Version: 1.0 * Description: Sysctl interface for IrDA * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun May 24 22:12:06 1998 * Modified at: Fri Jun 4 02:50:15 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/mm.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/init.h> #include <net/irda/irda.h> /* irda_debug */ #include <net/irda/irlmp.h> #include <net/irda/timer.h> #include <net/irda/irias_object.h> extern int sysctl_discovery; extern int sysctl_discovery_slots; extern int sysctl_discovery_timeout; extern int sysctl_slot_timeout; extern int sysctl_fast_poll_increase; extern char sysctl_devname[]; extern int sysctl_max_baud_rate; extern int sysctl_min_tx_turn_time; extern int sysctl_max_tx_data_size; extern int sysctl_max_tx_window; extern int sysctl_max_noreply_time; extern int sysctl_warn_noreply_time; extern int sysctl_lap_keepalive_time; extern struct irlmp_cb *irlmp; /* this is needed for the proc_dointvec_minmax - Jean II */ static int max_discovery_slots = 16; /* ??? */ static int min_discovery_slots = 1; /* IrLAP 6.13.2 says 25ms to 10+70ms - allow higher since some devices * seems to require it. (from Dag's comment) */ static int max_slot_timeout = 160; static int min_slot_timeout = 20; static int max_max_baud_rate = 16000000; /* See qos.c - IrLAP spec */ static int min_max_baud_rate = 2400; static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */ static int min_min_tx_turn_time; static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */ static int min_max_tx_data_size = 64; static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */ static int min_max_tx_window = 1; static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */ static int min_max_noreply_time = 3; static int max_warn_noreply_time = 3; /* 3s == standard */ static int min_warn_noreply_time = 1; /* 1s == min WD_TIMER */ static int max_lap_keepalive_time = 10000; /* 10s */ static int min_lap_keepalive_time = 100; /* 100us */ /* For other sysctl, I've no idea of the range. Maybe Dag could help * us on that - Jean II */ static int do_devname(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dostring(table, write, buffer, lenp, ppos); if (ret == 0 && write) { struct ias_value *val; val = irias_new_string_value(sysctl_devname); if (val) irias_object_change_attribute("Device", "DeviceName", val); } return ret; } static int do_discovery(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret) return ret; if (irlmp == NULL) return -ENODEV; if (sysctl_discovery) irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ); else del_timer_sync(&irlmp->discovery_timer); return ret; } /* One file */ static ctl_table irda_table[] = { { .procname = "discovery", .data = &sysctl_discovery, .maxlen = sizeof(int), .mode = 0644, .proc_handler = do_discovery, }, { .procname = "devname", .data = sysctl_devname, .maxlen = 65, .mode = 0644, .proc_handler = do_devname, }, #ifdef CONFIG_IRDA_DEBUG { .procname = "debug", .data = &irda_debug, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #ifdef CONFIG_IRDA_FAST_RR { .procname = "fast_poll_increase", .data = &sysctl_fast_poll_increase, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .procname = "discovery_slots", .data = &sysctl_discovery_slots, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_discovery_slots, .extra2 = &max_discovery_slots }, { .procname = "discovery_timeout", .data = &sysctl_discovery_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "slot_timeout", .data = &sysctl_slot_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_slot_timeout, .extra2 = &max_slot_timeout }, { .procname = "max_baud_rate", .data = &sysctl_max_baud_rate, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_baud_rate, .extra2 = &max_max_baud_rate }, { .procname = "min_tx_turn_time", .data = &sysctl_min_tx_turn_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_min_tx_turn_time, .extra2 = &max_min_tx_turn_time }, { .procname = "max_tx_data_size", .data = &sysctl_max_tx_data_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_tx_data_size, .extra2 = &max_max_tx_data_size }, { .procname = "max_tx_window", .data = &sysctl_max_tx_window, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_tx_window, .extra2 = &max_max_tx_window }, { .procname = "max_noreply_time", .data = &sysctl_max_noreply_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_noreply_time, .extra2 = &max_max_noreply_time }, { .procname = "warn_noreply_time", .data = &sysctl_warn_noreply_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_warn_noreply_time, .extra2 = &max_warn_noreply_time }, { .procname = "lap_keepalive_time", .data = &sysctl_lap_keepalive_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_lap_keepalive_time, .extra2 = &max_lap_keepalive_time }, { } }; static struct ctl_path irda_path[] = { { .procname = "net", }, { .procname = "irda", }, { } }; static struct ctl_table_header *irda_table_header; /* * Function irda_sysctl_register (void) * * Register our sysctl interface * */ int __init irda_sysctl_register(void) { irda_table_header = register_sysctl_paths(irda_path, irda_table); if (!irda_table_header) return -ENOMEM; return 0; } /* * Function irda_sysctl_unregister (void) * * Unregister our sysctl interface * */ void irda_sysctl_unregister(void) { unregister_sysctl_table(irda_table_header); }
gpl-2.0
mythos234/SimplKernel-LL-G920F
arch/sh/cchips/hd6446x/hd64461.c
3917
2644
/* * Copyright (C) 2000 YAEGASHI Takeshi * Hitachi HD64461 companion chip support */ #include <linux/sched.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/hd64461.h> /* This belongs in cpu specific */ #define INTC_ICR1 0xA4140010UL static void hd64461_mask_irq(struct irq_data *data) { unsigned int irq = data->irq; unsigned short nimr; unsigned short mask = 1 << (irq - HD64461_IRQBASE); nimr = __raw_readw(HD64461_NIMR); nimr |= mask; __raw_writew(nimr, HD64461_NIMR); } static void hd64461_unmask_irq(struct irq_data *data) { unsigned int irq = data->irq; unsigned short nimr; unsigned short mask = 1 << (irq - HD64461_IRQBASE); nimr = __raw_readw(HD64461_NIMR); nimr &= ~mask; __raw_writew(nimr, HD64461_NIMR); } static void hd64461_mask_and_ack_irq(struct irq_data *data) { hd64461_mask_irq(data); #ifdef CONFIG_HD64461_ENABLER if (data->irq == HD64461_IRQBASE + 13) __raw_writeb(0x00, HD64461_PCC1CSCR); #endif } static struct irq_chip hd64461_irq_chip = { .name = "HD64461-IRQ", .irq_mask = hd64461_mask_irq, .irq_mask_ack = hd64461_mask_and_ack_irq, .irq_unmask = hd64461_unmask_irq, }; static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc) { unsigned short intv = __raw_readw(HD64461_NIRR); unsigned int ext_irq = HD64461_IRQBASE; intv &= (1 << HD64461_IRQ_NUM) - 1; for (; intv; intv >>= 1, ext_irq++) { if (!(intv & 1)) continue; generic_handle_irq(ext_irq); } } int __init setup_hd64461(void) { int irq_base, i; printk(KERN_INFO "HD64461 configured at 0x%x on irq %d(mapped into %d to %d)\n", HD64461_IOBASE, CONFIG_HD64461_IRQ, HD64461_IRQBASE, HD64461_IRQBASE + 15); /* Should be at processor specific part.. */ #if defined(CONFIG_CPU_SUBTYPE_SH7709) __raw_writew(0x2240, INTC_ICR1); #endif __raw_writew(0xffff, HD64461_NIMR); irq_base = irq_alloc_descs(HD64461_IRQBASE, HD64461_IRQBASE, 16, -1); if (IS_ERR_VALUE(irq_base)) { pr_err("%s: failed hooking irqs for HD64461\n", __func__); return irq_base; } for (i = 0; i < 16; i++) irq_set_chip_and_handler(irq_base + i, &hd64461_irq_chip, handle_level_irq); irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); #ifdef CONFIG_HD64461_ENABLER printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); __raw_writeb(0x4c, HD64461_PCC1CSCIER); __raw_writeb(0x00, HD64461_PCC1CSCR); #endif return 0; } module_init(setup_hd64461);
gpl-2.0
rooque/android_kernel_xiaomi_cancro
drivers/rtc/rtc-em3027.c
4941
3689
/* * An rtc/i2c driver for the EM Microelectronic EM3027 * Copyright 2011 CompuLab, Ltd. * * Author: Mike Rapoport <mike@compulab.co.il> * * Based on rtc-ds1672.c by Alessandro Zummo <a.zummo@towertech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/module.h> /* Registers */ #define EM3027_REG_ON_OFF_CTRL 0x00 #define EM3027_REG_IRQ_CTRL 0x01 #define EM3027_REG_IRQ_FLAGS 0x02 #define EM3027_REG_STATUS 0x03 #define EM3027_REG_RST_CTRL 0x04 #define EM3027_REG_WATCH_SEC 0x08 #define EM3027_REG_WATCH_MIN 0x09 #define EM3027_REG_WATCH_HOUR 0x0a #define EM3027_REG_WATCH_DATE 0x0b #define EM3027_REG_WATCH_DAY 0x0c #define EM3027_REG_WATCH_MON 0x0d #define EM3027_REG_WATCH_YEAR 0x0e #define EM3027_REG_ALARM_SEC 0x10 #define EM3027_REG_ALARM_MIN 0x11 #define EM3027_REG_ALARM_HOUR 0x12 #define EM3027_REG_ALARM_DATE 0x13 #define EM3027_REG_ALARM_DAY 0x14 #define EM3027_REG_ALARM_MON 0x15 #define EM3027_REG_ALARM_YEAR 0x16 static struct i2c_driver em3027_driver; static int em3027_get_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char addr = EM3027_REG_WATCH_SEC; unsigned char buf[7]; struct i2c_msg msgs[] = { {client->addr, 0, 1, &addr}, /* setup read addr */ {client->addr, I2C_M_RD, 7, buf}, /* read time/date */ }; /* read time/date registers */ if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { dev_err(&client->dev, "%s: read error\n", __func__); return -EIO; } tm->tm_sec = bcd2bin(buf[0]); tm->tm_min = bcd2bin(buf[1]); tm->tm_hour = bcd2bin(buf[2]); tm->tm_mday = bcd2bin(buf[3]); tm->tm_wday = bcd2bin(buf[4]); tm->tm_mon = bcd2bin(buf[5]); tm->tm_year = bcd2bin(buf[6]) + 100; return 0; } static int em3027_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char buf[8]; struct i2c_msg msg = { client->addr, 0, 8, buf, /* write time/date */ }; buf[0] = EM3027_REG_WATCH_SEC; buf[1] = bin2bcd(tm->tm_sec); buf[2] = bin2bcd(tm->tm_min); buf[3] = bin2bcd(tm->tm_hour); buf[4] = bin2bcd(tm->tm_mday); buf[5] = bin2bcd(tm->tm_wday); buf[6] = bin2bcd(tm->tm_mon); buf[7] = bin2bcd(tm->tm_year % 100); /* write time/date registers */ if ((i2c_transfer(client->adapter, &msg, 1)) != 1) { dev_err(&client->dev, "%s: write error\n", __func__); return -EIO; } return 0; } static const struct rtc_class_ops em3027_rtc_ops = { .read_time = em3027_get_time, .set_time = em3027_set_time, }; static int em3027_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct rtc_device *rtc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; rtc = rtc_device_register(em3027_driver.driver.name, &client->dev, &em3027_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); i2c_set_clientdata(client, rtc); return 0; } static int em3027_remove(struct i2c_client *client) { struct rtc_device *rtc = i2c_get_clientdata(client); if (rtc) rtc_device_unregister(rtc); return 0; } static struct i2c_device_id em3027_id[] = { { "em3027", 0 }, { } }; static struct i2c_driver em3027_driver = { .driver = { .name = "rtc-em3027", }, .probe = &em3027_probe, .remove = &em3027_remove, .id_table = em3027_id, }; module_i2c_driver(em3027_driver); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("EM Microelectronic EM3027 RTC driver"); MODULE_LICENSE("GPL");
gpl-2.0
jmztaylor/android_kernel_htc_a3ul_new
drivers/media/video/cpia2/cpia2_v4l.c
5453
40773
/**************************************************************************** * * Filename: cpia2_v4l.c * * Copyright 2001, STMicrolectronics, Inc. * Contact: steve.miller@st.com * Copyright 2001,2005, Scott J. Bertin <scottbertin@yahoo.com> * * Description: * This is a USB driver for CPia2 based video cameras. * The infrastructure of this driver is based on the cpia usb driver by * Jochen Scharrlach and Johannes Erdfeldt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Stripped of 2.4 stuff ready for main kernel submit by * Alan Cox <alan@lxorguk.ukuu.org.uk> ****************************************************************************/ #define CPIA_VERSION "3.0.1" #include <linux/module.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/videodev2.h> #include <linux/stringify.h> #include <media/v4l2-ioctl.h> #include "cpia2.h" #include "cpia2dev.h" static int video_nr = -1; module_param(video_nr, int, 0); MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)"); static int buffer_size = 68*1024; module_param(buffer_size, int, 0); MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)"); static int num_buffers = 3; module_param(num_buffers, int, 0); MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-" __stringify(VIDEO_MAX_FRAME) ", default 3)"); static int alternate = DEFAULT_ALT; module_param(alternate, int, 0); MODULE_PARM_DESC(alternate, "USB Alternate (" __stringify(USBIF_ISO_1) "-" __stringify(USBIF_ISO_6) ", default " __stringify(DEFAULT_ALT) ")"); static int flicker_freq = 60; module_param(flicker_freq, int, 0); MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" __stringify(50) "or" __stringify(60) ", default " __stringify(60) ")"); static int flicker_mode = NEVER_FLICKER; module_param(flicker_mode, int, 0); MODULE_PARM_DESC(flicker_mode, "Flicker supression (" __stringify(NEVER_FLICKER) "or" __stringify(ANTI_FLICKER_ON) ", default " __stringify(NEVER_FLICKER) ")"); MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); MODULE_SUPPORTED_DEVICE("video"); MODULE_LICENSE("GPL"); MODULE_VERSION(CPIA_VERSION); #define ABOUT "V4L-Driver for Vision CPiA2 based cameras" struct control_menu_info { int value; char name[32]; }; static struct control_menu_info framerate_controls[] = { { CPIA2_VP_FRAMERATE_6_25, "6.25 fps" }, { CPIA2_VP_FRAMERATE_7_5, "7.5 fps" }, { CPIA2_VP_FRAMERATE_12_5, "12.5 fps" }, { CPIA2_VP_FRAMERATE_15, "15 fps" }, { CPIA2_VP_FRAMERATE_25, "25 fps" }, { CPIA2_VP_FRAMERATE_30, "30 fps" }, }; #define NUM_FRAMERATE_CONTROLS (ARRAY_SIZE(framerate_controls)) static struct control_menu_info flicker_controls[] = { { NEVER_FLICKER, "Off" }, { FLICKER_50, "50 Hz" }, { FLICKER_60, "60 Hz" }, }; #define NUM_FLICKER_CONTROLS (ARRAY_SIZE(flicker_controls)) static struct control_menu_info lights_controls[] = { { 0, "Off" }, { 64, "Top" }, { 128, "Bottom" }, { 192, "Both" }, }; #define NUM_LIGHTS_CONTROLS (ARRAY_SIZE(lights_controls)) #define GPIO_LIGHTS_MASK 192 static struct v4l2_queryctrl controls[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_BRIGHTNESS, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_CONTRAST, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_SATURATION, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Horizontally", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flip Vertically", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_TARGET_KB, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Target KB", .minimum = 0, .maximum = 255, .step = 1, .default_value = DEFAULT_TARGET_KB, }, { .id = CPIA2_CID_GPIO, .type = V4L2_CTRL_TYPE_INTEGER, .name = "GPIO", .minimum = 0, .maximum = 255, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_FLICKER_MODE, .type = V4L2_CTRL_TYPE_MENU, .name = "Flicker Reduction", .minimum = 0, .maximum = NUM_FLICKER_CONTROLS-1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_FRAMERATE, .type = V4L2_CTRL_TYPE_MENU, .name = "Framerate", .minimum = 0, .maximum = NUM_FRAMERATE_CONTROLS-1, .step = 1, .default_value = NUM_FRAMERATE_CONTROLS-1, }, { .id = CPIA2_CID_USB_ALT, .type = V4L2_CTRL_TYPE_INTEGER, .name = "USB Alternate", .minimum = USBIF_ISO_1, .maximum = USBIF_ISO_6, .step = 1, .default_value = DEFAULT_ALT, }, { .id = CPIA2_CID_LIGHTS, .type = V4L2_CTRL_TYPE_MENU, .name = "Lights", .minimum = 0, .maximum = NUM_LIGHTS_CONTROLS-1, .step = 1, .default_value = 0, }, { .id = CPIA2_CID_RESET_CAMERA, .type = V4L2_CTRL_TYPE_BUTTON, .name = "Reset Camera", .minimum = 0, .maximum = 0, .step = 0, .default_value = 0, }, }; #define NUM_CONTROLS (ARRAY_SIZE(controls)) /****************************************************************************** * * cpia2_open * *****************************************************************************/ static int cpia2_open(struct file *file) { struct camera_data *cam = video_drvdata(file); struct cpia2_fh *fh; if (!cam) { ERR("Internal error, camera_data not found!\n"); return -ENODEV; } if (!cam->present) return -ENODEV; if (cam->open_count == 0) { if (cpia2_allocate_buffers(cam)) return -ENOMEM; /* reset the camera */ if (cpia2_reset_camera(cam) < 0) return -EIO; cam->APP_len = 0; cam->COM_len = 0; } fh = kmalloc(sizeof(*fh), GFP_KERNEL); if (!fh) return -ENOMEM; file->private_data = fh; fh->prio = V4L2_PRIORITY_UNSET; v4l2_prio_open(&cam->prio, &fh->prio); fh->mmapped = 0; ++cam->open_count; cpia2_dbg_dump_registers(cam); return 0; } /****************************************************************************** * * cpia2_close * *****************************************************************************/ static int cpia2_close(struct file *file) { struct video_device *dev = video_devdata(file); struct camera_data *cam = video_get_drvdata(dev); struct cpia2_fh *fh = file->private_data; if (cam->present && (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) { cpia2_usb_stream_stop(cam); if (cam->open_count == 1) { /* save camera state for later open */ cpia2_save_camera_state(cam); cpia2_set_low_power(cam); cpia2_free_buffers(cam); } } if (fh->mmapped) cam->mmapped = 0; v4l2_prio_close(&cam->prio, fh->prio); file->private_data = NULL; kfree(fh); if (--cam->open_count == 0) { cpia2_free_buffers(cam); if (!cam->present) { video_unregister_device(dev); kfree(cam); return 0; } } return 0; } /****************************************************************************** * * cpia2_v4l_read * *****************************************************************************/ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct camera_data *cam = video_drvdata(file); int noblock = file->f_flags&O_NONBLOCK; struct cpia2_fh *fh = file->private_data; if(!cam) return -EINVAL; /* Priority check */ if(fh->prio != V4L2_PRIORITY_RECORD) { return -EBUSY; } return cpia2_read(cam, buf, count, noblock); } /****************************************************************************** * * cpia2_v4l_poll * *****************************************************************************/ static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait) { struct camera_data *cam = video_drvdata(filp); struct cpia2_fh *fh = filp->private_data; if(!cam) return POLLERR; /* Priority check */ if(fh->prio != V4L2_PRIORITY_RECORD) { return POLLERR; } return cpia2_poll(cam, filp, wait); } static int sync(struct camera_data *cam, int frame_nr) { struct framebuf *frame = &cam->buffers[frame_nr]; while (1) { if (frame->status == FRAME_READY) return 0; if (!cam->streaming) { frame->status = FRAME_READY; frame->length = 0; return 0; } mutex_unlock(&cam->v4l2_lock); wait_event_interruptible(cam->wq_stream, !cam->streaming || frame->status == FRAME_READY); mutex_lock(&cam->v4l2_lock); if (signal_pending(current)) return -ERESTARTSYS; if(!cam->present) return -ENOTTY; } } /****************************************************************************** * * ioctl_set_gpio * *****************************************************************************/ static long cpia2_default(struct file *file, void *fh, bool valid_prio, int cmd, void *arg) { struct camera_data *cam = video_drvdata(file); __u32 gpio_val; if (cmd != CPIA2_CID_GPIO) return -EINVAL; gpio_val = *(__u32*) arg; if (gpio_val &~ 0xFFU) return -EINVAL; return cpia2_set_gpio(cam, (unsigned char)gpio_val); } /****************************************************************************** * * ioctl_querycap * * V4L2 device capabilities * *****************************************************************************/ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *vc) { struct camera_data *cam = video_drvdata(file); strcpy(vc->driver, "cpia2"); if (cam->params.pnp_id.product == 0x151) strcpy(vc->card, "QX5 Microscope"); else strcpy(vc->card, "CPiA2 Camera"); switch (cam->params.pnp_id.device_type) { case DEVICE_STV_672: strcat(vc->card, " (672/"); break; case DEVICE_STV_676: strcat(vc->card, " (676/"); break; default: strcat(vc->card, " (XXX/"); break; } switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: strcat(vc->card, "404)"); break; case CPIA2_VP_SENSOR_FLAGS_407: strcat(vc->card, "407)"); break; case CPIA2_VP_SENSOR_FLAGS_409: strcat(vc->card, "409)"); break; case CPIA2_VP_SENSOR_FLAGS_410: strcat(vc->card, "410)"); break; case CPIA2_VP_SENSOR_FLAGS_500: strcat(vc->card, "500)"); break; default: strcat(vc->card, "XXX)"); break; } if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0) memset(vc->bus_info,0, sizeof(vc->bus_info)); vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; return 0; } /****************************************************************************** * * ioctl_input * * V4L2 input get/set/enumerate * *****************************************************************************/ static int cpia2_enum_input(struct file *file, void *fh, struct v4l2_input *i) { if (i->index) return -EINVAL; strcpy(i->name, "Camera"); i->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int cpia2_g_input(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int cpia2_s_input(struct file *file, void *fh, unsigned int i) { return i ? -EINVAL : 0; } /****************************************************************************** * * ioctl_enum_fmt * * V4L2 format enumerate * *****************************************************************************/ static int cpia2_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f) { int index = f->index; if (index < 0 || index > 1) return -EINVAL; memset(f, 0, sizeof(*f)); f->index = index; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; f->flags = V4L2_FMT_FLAG_COMPRESSED; switch(index) { case 0: strcpy(f->description, "MJPEG"); f->pixelformat = V4L2_PIX_FMT_MJPEG; break; case 1: strcpy(f->description, "JPEG"); f->pixelformat = V4L2_PIX_FMT_JPEG; break; default: return -EINVAL; } return 0; } /****************************************************************************** * * ioctl_try_fmt * * V4L2 format try * *****************************************************************************/ static int cpia2_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct camera_data *cam = video_drvdata(file); if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG && f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG) return -EINVAL; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = cam->frame_size; f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; f->fmt.pix.priv = 0; switch (cpia2_match_video_size(f->fmt.pix.width, f->fmt.pix.height)) { case VIDEOSIZE_VGA: f->fmt.pix.width = 640; f->fmt.pix.height = 480; break; case VIDEOSIZE_CIF: f->fmt.pix.width = 352; f->fmt.pix.height = 288; break; case VIDEOSIZE_QVGA: f->fmt.pix.width = 320; f->fmt.pix.height = 240; break; case VIDEOSIZE_288_216: f->fmt.pix.width = 288; f->fmt.pix.height = 216; break; case VIDEOSIZE_256_192: f->fmt.pix.width = 256; f->fmt.pix.height = 192; break; case VIDEOSIZE_224_168: f->fmt.pix.width = 224; f->fmt.pix.height = 168; break; case VIDEOSIZE_192_144: f->fmt.pix.width = 192; f->fmt.pix.height = 144; break; case VIDEOSIZE_QCIF: default: f->fmt.pix.width = 176; f->fmt.pix.height = 144; break; } return 0; } /****************************************************************************** * * ioctl_set_fmt * * V4L2 format set * *****************************************************************************/ static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh, struct v4l2_format *f) { struct camera_data *cam = video_drvdata(file); struct cpia2_fh *fh = _fh; int err, frame; err = v4l2_prio_check(&cam->prio, fh->prio); if (err) return err; err = cpia2_try_fmt_vid_cap(file, _fh, f); if(err != 0) return err; /* Ensure that only this process can change the format. */ err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD); if(err != 0) { return err; } cam->pixelformat = f->fmt.pix.pixelformat; /* NOTE: This should be set to 1 for MJPEG, but some apps don't handle * the missing Huffman table properly. */ cam->params.compression.inhibit_htables = 0; /*f->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG;*/ /* we set the video window to something smaller or equal to what * is requested by the user??? */ DBG("Requested width = %d, height = %d\n", f->fmt.pix.width, f->fmt.pix.height); if (f->fmt.pix.width != cam->width || f->fmt.pix.height != cam->height) { cam->width = f->fmt.pix.width; cam->height = f->fmt.pix.height; cam->params.roi.width = f->fmt.pix.width; cam->params.roi.height = f->fmt.pix.height; cpia2_set_format(cam); } for (frame = 0; frame < cam->num_frames; ++frame) { if (cam->buffers[frame].status == FRAME_READING) if ((err = sync(cam, frame)) < 0) return err; cam->buffers[frame].status = FRAME_EMPTY; } return 0; } /****************************************************************************** * * ioctl_get_fmt * * V4L2 format get * *****************************************************************************/ static int cpia2_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct camera_data *cam = video_drvdata(file); f->fmt.pix.width = cam->width; f->fmt.pix.height = cam->height; f->fmt.pix.pixelformat = cam->pixelformat; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = cam->frame_size; f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; f->fmt.pix.priv = 0; return 0; } /****************************************************************************** * * ioctl_cropcap * * V4L2 query cropping capabilities * NOTE: cropping is currently disabled * *****************************************************************************/ static int cpia2_cropcap(struct file *file, void *fh, struct v4l2_cropcap *c) { struct camera_data *cam = video_drvdata(file); if (c->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; c->bounds.left = 0; c->bounds.top = 0; c->bounds.width = cam->width; c->bounds.height = cam->height; c->defrect.left = 0; c->defrect.top = 0; c->defrect.width = cam->width; c->defrect.height = cam->height; c->pixelaspect.numerator = 1; c->pixelaspect.denominator = 1; return 0; } /****************************************************************************** * * ioctl_queryctrl * * V4L2 query possible control variables * *****************************************************************************/ static int cpia2_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c) { struct camera_data *cam = video_drvdata(file); int i; for(i=0; i<NUM_CONTROLS; ++i) { if(c->id == controls[i].id) { memcpy(c, controls+i, sizeof(*c)); break; } } if(i == NUM_CONTROLS) return -EINVAL; /* Some devices have additional limitations */ switch(c->id) { case V4L2_CID_BRIGHTNESS: /*** * Don't let the register be set to zero - bug in VP4 * flash of full brightness ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_672) c->minimum = 1; break; case V4L2_CID_VFLIP: // VP5 Only if(cam->params.pnp_id.device_type == DEVICE_STV_672) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; case CPIA2_CID_FRAMERATE: if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){ // Maximum 15fps for(i=0; i<c->maximum; ++i) { if(framerate_controls[i].value == CPIA2_VP_FRAMERATE_15) { c->maximum = i; c->default_value = i; } } } break; case CPIA2_CID_FLICKER_MODE: // Flicker control only valid for 672. if(cam->params.pnp_id.device_type != DEVICE_STV_672) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; case CPIA2_CID_LIGHTS: // Light control only valid for the QX5 Microscope. if(cam->params.pnp_id.product != 0x151) c->flags |= V4L2_CTRL_FLAG_DISABLED; break; default: break; } return 0; } /****************************************************************************** * * ioctl_querymenu * * V4L2 query possible control variables * *****************************************************************************/ static int cpia2_querymenu(struct file *file, void *fh, struct v4l2_querymenu *m) { struct camera_data *cam = video_drvdata(file); switch(m->id) { case CPIA2_CID_FLICKER_MODE: if (m->index >= NUM_FLICKER_CONTROLS) return -EINVAL; strcpy(m->name, flicker_controls[m->index].name); break; case CPIA2_CID_FRAMERATE: { int maximum = NUM_FRAMERATE_CONTROLS - 1; if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){ // Maximum 15fps int i; for(i=0; i<maximum; ++i) { if(framerate_controls[i].value == CPIA2_VP_FRAMERATE_15) maximum = i; } } if (m->index > maximum) return -EINVAL; strcpy(m->name, framerate_controls[m->index].name); break; } case CPIA2_CID_LIGHTS: if (m->index >= NUM_LIGHTS_CONTROLS) return -EINVAL; strcpy(m->name, lights_controls[m->index].name); break; default: return -EINVAL; } return 0; } /****************************************************************************** * * ioctl_g_ctrl * * V4L2 get the value of a control variable * *****************************************************************************/ static int cpia2_g_ctrl(struct file *file, void *fh, struct v4l2_control *c) { struct camera_data *cam = video_drvdata(file); switch(c->id) { case V4L2_CID_BRIGHTNESS: cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0); c->value = cam->params.color_params.brightness; break; case V4L2_CID_CONTRAST: cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0); c->value = cam->params.color_params.contrast; break; case V4L2_CID_SATURATION: cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0); c->value = cam->params.color_params.saturation; break; case V4L2_CID_HFLIP: cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); c->value = (cam->params.vp_params.user_effects & CPIA2_VP_USER_EFFECTS_MIRROR) != 0; break; case V4L2_CID_VFLIP: cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); c->value = (cam->params.vp_params.user_effects & CPIA2_VP_USER_EFFECTS_FLIP) != 0; break; case CPIA2_CID_TARGET_KB: c->value = cam->params.vc_params.target_kb; break; case CPIA2_CID_GPIO: cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA, TRANSFER_READ, 0); c->value = cam->params.vp_params.gpio_data; break; case CPIA2_CID_FLICKER_MODE: { int i, mode; cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES, TRANSFER_READ, 0); if(cam->params.flicker_control.cam_register & CPIA2_VP_FLICKER_MODES_NEVER_FLICKER) { mode = NEVER_FLICKER; } else { if(cam->params.flicker_control.cam_register & CPIA2_VP_FLICKER_MODES_50HZ) { mode = FLICKER_50; } else { mode = FLICKER_60; } } for(i=0; i<NUM_FLICKER_CONTROLS; i++) { if(flicker_controls[i].value == mode) { c->value = i; break; } } if(i == NUM_FLICKER_CONTROLS) return -EINVAL; break; } case CPIA2_CID_FRAMERATE: { int maximum = NUM_FRAMERATE_CONTROLS - 1; int i; for(i=0; i<= maximum; i++) { if(cam->params.vp_params.frame_rate == framerate_controls[i].value) break; } if(i > maximum) return -EINVAL; c->value = i; break; } case CPIA2_CID_USB_ALT: c->value = cam->params.camera_state.stream_mode; break; case CPIA2_CID_LIGHTS: { int i; cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA, TRANSFER_READ, 0); for(i=0; i<NUM_LIGHTS_CONTROLS; i++) { if((cam->params.vp_params.gpio_data&GPIO_LIGHTS_MASK) == lights_controls[i].value) { break; } } if(i == NUM_LIGHTS_CONTROLS) return -EINVAL; c->value = i; break; } case CPIA2_CID_RESET_CAMERA: return -EINVAL; default: return -EINVAL; } DBG("Get control id:%d, value:%d\n", c->id, c->value); return 0; } /****************************************************************************** * * ioctl_s_ctrl * * V4L2 set the value of a control variable * *****************************************************************************/ static int cpia2_s_ctrl(struct file *file, void *fh, struct v4l2_control *c) { struct camera_data *cam = video_drvdata(file); int i; int retval = 0; DBG("Set control id:%d, value:%d\n", c->id, c->value); /* Check that the value is in range */ for(i=0; i<NUM_CONTROLS; i++) { if(c->id == controls[i].id) { if(c->value < controls[i].minimum || c->value > controls[i].maximum) { return -EINVAL; } break; } } if(i == NUM_CONTROLS) return -EINVAL; switch(c->id) { case V4L2_CID_BRIGHTNESS: cpia2_set_brightness(cam, c->value); break; case V4L2_CID_CONTRAST: cpia2_set_contrast(cam, c->value); break; case V4L2_CID_SATURATION: cpia2_set_saturation(cam, c->value); break; case V4L2_CID_HFLIP: cpia2_set_property_mirror(cam, c->value); break; case V4L2_CID_VFLIP: cpia2_set_property_flip(cam, c->value); break; case CPIA2_CID_TARGET_KB: retval = cpia2_set_target_kb(cam, c->value); break; case CPIA2_CID_GPIO: retval = cpia2_set_gpio(cam, c->value); break; case CPIA2_CID_FLICKER_MODE: retval = cpia2_set_flicker_mode(cam, flicker_controls[c->value].value); break; case CPIA2_CID_FRAMERATE: retval = cpia2_set_fps(cam, framerate_controls[c->value].value); break; case CPIA2_CID_USB_ALT: retval = cpia2_usb_change_streaming_alternate(cam, c->value); break; case CPIA2_CID_LIGHTS: retval = cpia2_set_gpio(cam, lights_controls[c->value].value); break; case CPIA2_CID_RESET_CAMERA: cpia2_usb_stream_pause(cam); cpia2_reset_camera(cam); cpia2_usb_stream_resume(cam); break; default: retval = -EINVAL; } return retval; } /****************************************************************************** * * ioctl_g_jpegcomp * * V4L2 get the JPEG compression parameters * *****************************************************************************/ static int cpia2_g_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompression *parms) { struct camera_data *cam = video_drvdata(file); memset(parms, 0, sizeof(*parms)); parms->quality = 80; // TODO: Can this be made meaningful? parms->jpeg_markers = V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI; if(!cam->params.compression.inhibit_htables) { parms->jpeg_markers |= V4L2_JPEG_MARKER_DHT; } parms->APPn = cam->APPn; parms->APP_len = cam->APP_len; if(cam->APP_len > 0) { memcpy(parms->APP_data, cam->APP_data, cam->APP_len); parms->jpeg_markers |= V4L2_JPEG_MARKER_APP; } parms->COM_len = cam->COM_len; if(cam->COM_len > 0) { memcpy(parms->COM_data, cam->COM_data, cam->COM_len); parms->jpeg_markers |= JPEG_MARKER_COM; } DBG("G_JPEGCOMP APP_len:%d COM_len:%d\n", parms->APP_len, parms->COM_len); return 0; } /****************************************************************************** * * ioctl_s_jpegcomp * * V4L2 set the JPEG compression parameters * NOTE: quality and some jpeg_markers are ignored. * *****************************************************************************/ static int cpia2_s_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompression *parms) { struct camera_data *cam = video_drvdata(file); DBG("S_JPEGCOMP APP_len:%d COM_len:%d\n", parms->APP_len, parms->COM_len); cam->params.compression.inhibit_htables = !(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT); if(parms->APP_len != 0) { if(parms->APP_len > 0 && parms->APP_len <= sizeof(cam->APP_data) && parms->APPn >= 0 && parms->APPn <= 15) { cam->APPn = parms->APPn; cam->APP_len = parms->APP_len; memcpy(cam->APP_data, parms->APP_data, parms->APP_len); } else { LOG("Bad APPn Params n=%d len=%d\n", parms->APPn, parms->APP_len); return -EINVAL; } } else { cam->APP_len = 0; } if(parms->COM_len != 0) { if(parms->COM_len > 0 && parms->COM_len <= sizeof(cam->COM_data)) { cam->COM_len = parms->COM_len; memcpy(cam->COM_data, parms->COM_data, parms->COM_len); } else { LOG("Bad COM_len=%d\n", parms->COM_len); return -EINVAL; } } return 0; } /****************************************************************************** * * ioctl_reqbufs * * V4L2 Initiate memory mapping. * NOTE: The user's request is ignored. For now the buffers are fixed. * *****************************************************************************/ static int cpia2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { struct camera_data *cam = video_drvdata(file); if(req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || req->memory != V4L2_MEMORY_MMAP) return -EINVAL; DBG("REQBUFS requested:%d returning:%d\n", req->count, cam->num_frames); req->count = cam->num_frames; memset(&req->reserved, 0, sizeof(req->reserved)); return 0; } /****************************************************************************** * * ioctl_querybuf * * V4L2 Query memory buffer status. * *****************************************************************************/ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->index > cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; buf->memory = V4L2_MEMORY_MMAP; if(cam->mmapped) buf->flags = V4L2_BUF_FLAG_MAPPED; else buf->flags = 0; switch (cam->buffers[buf->index].status) { case FRAME_EMPTY: case FRAME_ERROR: case FRAME_READING: buf->bytesused = 0; buf->flags = V4L2_BUF_FLAG_QUEUED; break; case FRAME_READY: buf->bytesused = cam->buffers[buf->index].length; buf->timestamp = cam->buffers[buf->index].timestamp; buf->sequence = cam->buffers[buf->index].seq; buf->flags = V4L2_BUF_FLAG_DONE; break; } DBG("QUERYBUF index:%d offset:%d flags:%d seq:%d bytesused:%d\n", buf->index, buf->m.offset, buf->flags, buf->sequence, buf->bytesused); return 0; } /****************************************************************************** * * ioctl_qbuf * * V4L2 User is freeing buffer * *****************************************************************************/ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || buf->index > cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); if(cam->buffers[buf->index].status == FRAME_READY) cam->buffers[buf->index].status = FRAME_EMPTY; return 0; } /****************************************************************************** * * find_earliest_filled_buffer * * Helper for ioctl_dqbuf. Find the next ready buffer. * *****************************************************************************/ static int find_earliest_filled_buffer(struct camera_data *cam) { int i; int found = -1; for (i=0; i<cam->num_frames; i++) { if(cam->buffers[i].status == FRAME_READY) { if(found < 0) { found = i; } else { /* find which buffer is earlier */ struct timeval *tv1, *tv2; tv1 = &cam->buffers[i].timestamp; tv2 = &cam->buffers[found].timestamp; if(tv1->tv_sec < tv2->tv_sec || (tv1->tv_sec == tv2->tv_sec && tv1->tv_usec < tv2->tv_usec)) found = i; } } } return found; } /****************************************************************************** * * ioctl_dqbuf * * V4L2 User is asking for a filled buffer. * *****************************************************************************/ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct camera_data *cam = video_drvdata(file); int frame; if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP) return -EINVAL; frame = find_earliest_filled_buffer(cam); if(frame < 0 && file->f_flags&O_NONBLOCK) return -EAGAIN; if(frame < 0) { /* Wait for a frame to become available */ struct framebuf *cb=cam->curbuff; mutex_unlock(&cam->v4l2_lock); wait_event_interruptible(cam->wq_stream, !cam->present || (cb=cam->curbuff)->status == FRAME_READY); mutex_lock(&cam->v4l2_lock); if (signal_pending(current)) return -ERESTARTSYS; if(!cam->present) return -ENOTTY; frame = cb->num; } buf->index = frame; buf->bytesused = cam->buffers[buf->index].length; buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE; buf->field = V4L2_FIELD_NONE; buf->timestamp = cam->buffers[buf->index].timestamp; buf->sequence = cam->buffers[buf->index].seq; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; buf->input = 0; buf->reserved = 0; memset(&buf->timecode, 0, sizeof(buf->timecode)); DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index, cam->buffers[buf->index].status, buf->sequence, buf->bytesused); return 0; } static int cpia2_g_priority(struct file *file, void *_fh, enum v4l2_priority *p) { struct cpia2_fh *fh = _fh; *p = fh->prio; return 0; } static int cpia2_s_priority(struct file *file, void *_fh, enum v4l2_priority prio) { struct camera_data *cam = video_drvdata(file); struct cpia2_fh *fh = _fh; if (cam->streaming && prio != fh->prio && fh->prio == V4L2_PRIORITY_RECORD) /* Can't drop record priority while streaming */ return -EBUSY; if (prio == V4L2_PRIORITY_RECORD && prio != fh->prio && v4l2_prio_max(&cam->prio) == V4L2_PRIORITY_RECORD) /* Only one program can record at a time */ return -EBUSY; return v4l2_prio_change(&cam->prio, &fh->prio, prio); } static int cpia2_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct camera_data *cam = video_drvdata(file); DBG("VIDIOC_STREAMON, streaming=%d\n", cam->streaming); if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (!cam->streaming) return cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); return -EINVAL; } static int cpia2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct camera_data *cam = video_drvdata(file); DBG("VIDIOC_STREAMOFF, streaming=%d\n", cam->streaming); if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (cam->streaming) return cpia2_usb_stream_stop(cam); return -EINVAL; } /****************************************************************************** * * cpia2_mmap * *****************************************************************************/ static int cpia2_mmap(struct file *file, struct vm_area_struct *area) { struct camera_data *cam = video_drvdata(file); int retval; /* Priority check */ struct cpia2_fh *fh = file->private_data; if(fh->prio != V4L2_PRIORITY_RECORD) { return -EBUSY; } retval = cpia2_remap_buffer(cam, area); if(!retval) fh->mmapped = 1; return retval; } /****************************************************************************** * * reset_camera_struct_v4l * * Sets all values to the defaults *****************************************************************************/ static void reset_camera_struct_v4l(struct camera_data *cam) { cam->width = cam->params.roi.width; cam->height = cam->params.roi.height; cam->frame_size = buffer_size; cam->num_frames = num_buffers; /* FlickerModes */ cam->params.flicker_control.flicker_mode_req = flicker_mode; cam->params.flicker_control.mains_frequency = flicker_freq; /* streamMode */ cam->params.camera_state.stream_mode = alternate; cam->pixelformat = V4L2_PIX_FMT_JPEG; v4l2_prio_init(&cam->prio); } static const struct v4l2_ioctl_ops cpia2_ioctl_ops = { .vidioc_querycap = cpia2_querycap, .vidioc_enum_input = cpia2_enum_input, .vidioc_g_input = cpia2_g_input, .vidioc_s_input = cpia2_s_input, .vidioc_enum_fmt_vid_cap = cpia2_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = cpia2_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = cpia2_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = cpia2_try_fmt_vid_cap, .vidioc_queryctrl = cpia2_queryctrl, .vidioc_querymenu = cpia2_querymenu, .vidioc_g_ctrl = cpia2_g_ctrl, .vidioc_s_ctrl = cpia2_s_ctrl, .vidioc_g_jpegcomp = cpia2_g_jpegcomp, .vidioc_s_jpegcomp = cpia2_s_jpegcomp, .vidioc_cropcap = cpia2_cropcap, .vidioc_reqbufs = cpia2_reqbufs, .vidioc_querybuf = cpia2_querybuf, .vidioc_qbuf = cpia2_qbuf, .vidioc_dqbuf = cpia2_dqbuf, .vidioc_streamon = cpia2_streamon, .vidioc_streamoff = cpia2_streamoff, .vidioc_g_priority = cpia2_g_priority, .vidioc_s_priority = cpia2_s_priority, .vidioc_default = cpia2_default, }; /*** * The v4l video device structure initialized for this device ***/ static const struct v4l2_file_operations cpia2_fops = { .owner = THIS_MODULE, .open = cpia2_open, .release = cpia2_close, .read = cpia2_v4l_read, .poll = cpia2_v4l_poll, .unlocked_ioctl = video_ioctl2, .mmap = cpia2_mmap, }; static struct video_device cpia2_template = { /* I could not find any place for the old .initialize initializer?? */ .name = "CPiA2 Camera", .fops = &cpia2_fops, .ioctl_ops = &cpia2_ioctl_ops, .release = video_device_release, }; /****************************************************************************** * * cpia2_register_camera * *****************************************************************************/ int cpia2_register_camera(struct camera_data *cam) { cam->vdev = video_device_alloc(); if(!cam->vdev) return -ENOMEM; memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template)); video_set_drvdata(cam->vdev, cam); cam->vdev->lock = &cam->v4l2_lock; reset_camera_struct_v4l(cam); /* register v4l device */ if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) { ERR("video_register_device failed\n"); video_device_release(cam->vdev); return -ENODEV; } return 0; } /****************************************************************************** * * cpia2_unregister_camera * *****************************************************************************/ void cpia2_unregister_camera(struct camera_data *cam) { if (!cam->open_count) { video_unregister_device(cam->vdev); } else { LOG("%s removed while open, deferring " "video_unregister_device\n", video_device_node_name(cam->vdev)); } } /****************************************************************************** * * check_parameters * * Make sure that all user-supplied parameters are sensible *****************************************************************************/ static void __init check_parameters(void) { if(buffer_size < PAGE_SIZE) { buffer_size = PAGE_SIZE; LOG("buffer_size too small, setting to %d\n", buffer_size); } else if(buffer_size > 1024*1024) { /* arbitrary upper limiit */ buffer_size = 1024*1024; LOG("buffer_size ridiculously large, setting to %d\n", buffer_size); } else { buffer_size += PAGE_SIZE-1; buffer_size &= ~(PAGE_SIZE-1); } if(num_buffers < 1) { num_buffers = 1; LOG("num_buffers too small, setting to %d\n", num_buffers); } else if(num_buffers > VIDEO_MAX_FRAME) { num_buffers = VIDEO_MAX_FRAME; LOG("num_buffers too large, setting to %d\n", num_buffers); } if(alternate < USBIF_ISO_1 || alternate > USBIF_ISO_6) { alternate = DEFAULT_ALT; LOG("alternate specified is invalid, using %d\n", alternate); } if (flicker_mode != NEVER_FLICKER && flicker_mode != ANTI_FLICKER_ON) { flicker_mode = NEVER_FLICKER; LOG("Flicker mode specified is invalid, using %d\n", flicker_mode); } if (flicker_freq != FLICKER_50 && flicker_freq != FLICKER_60) { flicker_freq = FLICKER_60; LOG("Flicker mode specified is invalid, using %d\n", flicker_freq); } if(video_nr < -1 || video_nr > 64) { video_nr = -1; LOG("invalid video_nr specified, must be -1 to 64\n"); } DBG("Using %d buffers, each %d bytes, alternate=%d\n", num_buffers, buffer_size, alternate); } /************ Module Stuff ***************/ /****************************************************************************** * * cpia2_init/module_init * *****************************************************************************/ static int __init cpia2_init(void) { LOG("%s v%s\n", ABOUT, CPIA_VERSION); check_parameters(); cpia2_usb_init(); return 0; } /****************************************************************************** * * cpia2_exit/module_exit * *****************************************************************************/ static void __exit cpia2_exit(void) { cpia2_usb_cleanup(); schedule_timeout(2 * HZ); } module_init(cpia2_init); module_exit(cpia2_exit);
gpl-2.0
lim417dev/android_kernel_nubia_nx505j
drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
8013
35880
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : ADDI DATA | Compiler : GCC | | Modulname : addi_eeprom.c | Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-----------------------------------------------------------------------+ | Description : ADDI EEPROM Module | +-----------------------------------------------------------------------+ | UPDATE'S | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | | | | | | | | +----------+-----------+------------------------------------------------+ */ #define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */ #define NVCMD_LOAD_LOW (0x4 << 5) /* nvRam load low command */ #define NVCMD_LOAD_HIGH (0x5 << 5) /* nvRam load high command */ #define EE76_CMD_LEN 13 /* bits in instructions */ #define EE_READ 0x0180 /* 01 1000 0000 read instruction */ #define EEPROM_DIGITALINPUT 0 #define EEPROM_DIGITALOUTPUT 1 #define EEPROM_ANALOGINPUT 2 #define EEPROM_ANALOGOUTPUT 3 #define EEPROM_TIMER 4 #define EEPROM_WATCHDOG 5 #define EEPROM_TIMER_WATCHDOG_COUNTER 10 struct str_Functionality { unsigned char b_Type; unsigned short w_Address; }; struct str_MainHeader { unsigned short w_HeaderSize; unsigned char b_Nfunctions; struct str_Functionality s_Functions[7]; }; struct str_DigitalInputHeader { unsigned short w_Nchannel; unsigned char b_Interruptible; unsigned short w_NinterruptLogic; }; struct str_DigitalOutputHeader { unsigned short w_Nchannel; }; /* used for timer as well as watchdog */ struct str_TimerDetails { unsigned short w_HeaderSize; unsigned char b_Resolution; unsigned char b_Mode; /* in case of Watchdog it is functionality */ unsigned short w_MinTiming; unsigned char b_TimeBase; }; struct str_TimerMainHeader { unsigned short w_Ntimer; struct str_TimerDetails s_TimerDetails[4]; /* supports 4 timers */ }; struct str_AnalogOutputHeader { unsigned short w_Nchannel; unsigned char b_Resolution; }; struct str_AnalogInputHeader { unsigned short w_Nchannel; unsigned short w_MinConvertTiming; unsigned short w_MinDelayTiming; unsigned char b_HasDma; unsigned char b_Resolution; }; /*****************************************/ /* Read Header Functions */ /*****************************************/ int i_EepromReadMainHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, struct comedi_device *dev); int i_EepromReadDigitalInputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_DigitalInputHeader *s_Header); int i_EepromReadDigitalOutputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_DigitalOutputHeader *s_Header); int i_EepromReadTimerHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_TimerMainHeader *s_Header); int i_EepromReadAnlogOutputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_AnalogOutputHeader *s_Header); int i_EepromReadAnlogInputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_AnalogInputHeader *s_Header); /******************************************/ /* Eeprom Specific Functions */ /******************************************/ unsigned short w_EepromReadWord(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_EepromStartAddress); void v_EepromWaitBusy(unsigned short w_PCIBoardEepromAddress); void v_EepromClock76(unsigned int dw_Address, unsigned int dw_RegisterValue); void v_EepromWaitBusy(unsigned short w_PCIBoardEepromAddress); void v_EepromSendCommand76(unsigned int dw_Address, unsigned int dw_EepromCommand, unsigned char b_DataLengthInBits); void v_EepromCs76Read(unsigned int dw_Address, unsigned short w_offset, unsigned short *pw_Value); /* +----------------------------------------------------------------------------+ | Function Name : unsigned short w_EepromReadWord | | (unsigned short w_PCIBoardEepromAddress, | | char * pc_PCIChipInformation, | | unsigned short w_EepromStartAddress) | +----------------------------------------------------------------------------+ | Task : Read from eepromn a word | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | unsigned short w_EepromStartAddress : Selected eeprom address | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : Read word value from eeprom | +----------------------------------------------------------------------------+ */ unsigned short w_EepromReadWord(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_EepromStartAddress) { unsigned char b_Counter = 0; unsigned char b_ReadByte = 0; unsigned char b_ReadLowByte = 0; unsigned char b_ReadHighByte = 0; unsigned char b_SelectedAddressLow = 0; unsigned char b_SelectedAddressHigh = 0; unsigned short w_ReadWord = 0; /**************************/ /* Test the PCI chip type */ /**************************/ if ((!strcmp(pc_PCIChipInformation, "S5920")) || (!strcmp(pc_PCIChipInformation, "S5933"))) { for (b_Counter = 0; b_Counter < 2; b_Counter++) { b_SelectedAddressLow = (w_EepromStartAddress + b_Counter) % 256; /* Read the low 8 bit part */ b_SelectedAddressHigh = (w_EepromStartAddress + b_Counter) / 256; /* Read the high 8 bit part */ /************************************/ /* Select the load low address mode */ /************************************/ outb(NVCMD_LOAD_LOW, w_PCIBoardEepromAddress + 0x3F); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /************************/ /* Load the low address */ /************************/ outb(b_SelectedAddressLow, w_PCIBoardEepromAddress + 0x3E); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /*************************************/ /* Select the load high address mode */ /*************************************/ outb(NVCMD_LOAD_HIGH, w_PCIBoardEepromAddress + 0x3F); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /*************************/ /* Load the high address */ /*************************/ outb(b_SelectedAddressHigh, w_PCIBoardEepromAddress + 0x3E); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /************************/ /* Select the READ mode */ /************************/ outb(NVCMD_BEGIN_READ, w_PCIBoardEepromAddress + 0x3F); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /*****************************/ /* Read data into the EEPROM */ /*****************************/ b_ReadByte = inb(w_PCIBoardEepromAddress + 0x3E); /****************/ /* Wait on busy */ /****************/ v_EepromWaitBusy(w_PCIBoardEepromAddress); /*********************************/ /* Select the upper address part */ /*********************************/ if (b_Counter == 0) { b_ReadLowByte = b_ReadByte; } /* if(b_Counter==0) */ else { b_ReadHighByte = b_ReadByte; } /* if(b_Counter==0) */ } /* for (b_Counter=0; b_Counter<2; b_Counter++) */ w_ReadWord = (b_ReadLowByte | (((unsigned short) b_ReadHighByte) * 256)); } /* end of if ((!strcmp(pc_PCIChipInformation, "S5920")) || (!strcmp(pc_PCIChipInformation, "S5933"))) */ if (!strcmp(pc_PCIChipInformation, "93C76")) { /*************************************/ /* Read 16 bit from the EEPROM 93C76 */ /*************************************/ v_EepromCs76Read(w_PCIBoardEepromAddress, w_EepromStartAddress, &w_ReadWord); } return w_ReadWord; } /* +----------------------------------------------------------------------------+ | Function Name : void v_EepromWaitBusy | | (unsigned short w_PCIBoardEepromAddress) | +----------------------------------------------------------------------------+ | Task : Wait the busy flag from PCI controller | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom base address | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : - | +----------------------------------------------------------------------------+ */ void v_EepromWaitBusy(unsigned short w_PCIBoardEepromAddress) { unsigned char b_EepromBusy = 0; do { /*************/ /* IMPORTANT */ /*************/ /************************************************************************/ /* An error has been written in the AMCC 5933 book at the page B-13 */ /* Ex: if you read a byte and look for the busy statusEEPROM=0x80 and */ /* the operator register is AMCC_OP_REG_MCSR+3 */ /* unsigned short read EEPROM=0x8000 andAMCC_OP_REG_MCSR+2 */ /* unsigned int read EEPROM=0x80000000 and AMCC_OP_REG_MCSR */ /************************************************************************/ b_EepromBusy = inb(w_PCIBoardEepromAddress + 0x3F); b_EepromBusy = b_EepromBusy & 0x80; } while (b_EepromBusy == 0x80); } /* +---------------------------------------------------------------------------------+ | Function Name : void v_EepromClock76(unsigned int dw_Address, | | unsigned int dw_RegisterValue) | +---------------------------------------------------------------------------------+ | Task : This function sends the clocking sequence to the EEPROM. | +---------------------------------------------------------------------------------+ | Input Parameters : unsigned int dw_Address : PCI eeprom base address | | unsigned int dw_RegisterValue : PCI eeprom register value to write.| +---------------------------------------------------------------------------------+ | Output Parameters : - | +---------------------------------------------------------------------------------+ | Return Value : - | +---------------------------------------------------------------------------------+ */ void v_EepromClock76(unsigned int dw_Address, unsigned int dw_RegisterValue) { /************************/ /* Set EEPROM clock Low */ /************************/ outl(dw_RegisterValue & 0x6, dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); /*************************/ /* Set EEPROM clock High */ /*************************/ outl(dw_RegisterValue | 0x1, dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); } /* +---------------------------------------------------------------------------------+ | Function Name : void v_EepromSendCommand76(unsigned int dw_Address, | | unsigned int dw_EepromCommand, | | unsigned char b_DataLengthInBits) | +---------------------------------------------------------------------------------+ | Task : This function sends a Command to the EEPROM 93C76. | +---------------------------------------------------------------------------------+ | Input Parameters : unsigned int dw_Address : PCI eeprom base address | | unsigned int dw_EepromCommand : PCI eeprom command to write. | | unsigned char b_DataLengthInBits : PCI eeprom command data length. | +---------------------------------------------------------------------------------+ | Output Parameters : - | +---------------------------------------------------------------------------------+ | Return Value : - | +---------------------------------------------------------------------------------+ */ void v_EepromSendCommand76(unsigned int dw_Address, unsigned int dw_EepromCommand, unsigned char b_DataLengthInBits) { char c_BitPos = 0; unsigned int dw_RegisterValue = 0; /*****************************/ /* Enable EEPROM Chip Select */ /*****************************/ dw_RegisterValue = 0x2; /********************************************************************/ /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */ /********************************************************************/ outl(dw_RegisterValue, dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); /*******************************************/ /* Send EEPROM command - one bit at a time */ /*******************************************/ for (c_BitPos = (b_DataLengthInBits - 1); c_BitPos >= 0; c_BitPos--) { /**********************************/ /* Check if current bit is 0 or 1 */ /**********************************/ if (dw_EepromCommand & (1 << c_BitPos)) { /***********/ /* Write 1 */ /***********/ dw_RegisterValue = dw_RegisterValue | 0x4; } else { /***********/ /* Write 0 */ /***********/ dw_RegisterValue = dw_RegisterValue & 0x3; } /*********************/ /* Write the command */ /*********************/ outl(dw_RegisterValue, dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); /****************************/ /* Trigger the EEPROM clock */ /****************************/ v_EepromClock76(dw_Address, dw_RegisterValue); } } /* +---------------------------------------------------------------------------------+ | Function Name : void v_EepromCs76Read(unsigned int dw_Address, | | unsigned short w_offset, | | unsigned short * pw_Value) | +---------------------------------------------------------------------------------+ | Task : This function read a value from the EEPROM 93C76. | +---------------------------------------------------------------------------------+ | Input Parameters : unsigned int dw_Address : PCI eeprom base address | | unsigned short w_offset : Offset of the address to read | | unsigned short * pw_Value : PCI eeprom 16 bit read value. | +---------------------------------------------------------------------------------+ | Output Parameters : - | +---------------------------------------------------------------------------------+ | Return Value : - | +---------------------------------------------------------------------------------+ */ void v_EepromCs76Read(unsigned int dw_Address, unsigned short w_offset, unsigned short *pw_Value) { char c_BitPos = 0; unsigned int dw_RegisterValue = 0; unsigned int dw_RegisterValueRead = 0; /*************************************************/ /* Send EEPROM read command and offset to EEPROM */ /*************************************************/ v_EepromSendCommand76(dw_Address, (EE_READ << 4) | (w_offset / 2), EE76_CMD_LEN); /*******************************/ /* Get the last register value */ /*******************************/ dw_RegisterValue = (((w_offset / 2) & 0x1) << 2) | 0x2; /*****************************/ /* Set the 16-bit value of 0 */ /*****************************/ *pw_Value = 0; /************************/ /* Get the 16-bit value */ /************************/ for (c_BitPos = 0; c_BitPos < 16; c_BitPos++) { /****************************/ /* Trigger the EEPROM clock */ /****************************/ v_EepromClock76(dw_Address, dw_RegisterValue); /**********************/ /* Get the result bit */ /**********************/ dw_RegisterValueRead = inl(dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); /***************************************/ /* Get bit value and shift into result */ /***************************************/ if (dw_RegisterValueRead & 0x8) { /**********/ /* Read 1 */ /**********/ *pw_Value = (*pw_Value << 1) | 0x1; } else { /**********/ /* Read 0 */ /**********/ *pw_Value = (*pw_Value << 1); } } /*************************/ /* Clear all EEPROM bits */ /*************************/ dw_RegisterValue = 0x0; /********************************************************************/ /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */ /********************************************************************/ outl(dw_RegisterValue, dw_Address); /***************/ /* Wait 0.1 ms */ /***************/ udelay(100); } /******************************************/ /* EEPROM HEADER READ FUNCTIONS */ /******************************************/ /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadMainHeader(unsigned short w_PCIBoardEepromAddress, | | char * pc_PCIChipInformation,struct comedi_device *dev) | +----------------------------------------------------------------------------+ | Task : Read from eeprom Main Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | struct comedi_device *dev : comedi device structure | | pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ int i_EepromReadMainHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, struct comedi_device *dev) { unsigned short w_Temp, i, w_Count = 0; unsigned int ui_Temp; struct str_MainHeader s_MainHeader; struct str_DigitalInputHeader s_DigitalInputHeader; struct str_DigitalOutputHeader s_DigitalOutputHeader; /* struct str_TimerMainHeader s_TimerMainHeader,s_WatchdogMainHeader; */ struct str_AnalogOutputHeader s_AnalogOutputHeader; struct str_AnalogInputHeader s_AnalogInputHeader; /* Read size */ s_MainHeader.w_HeaderSize = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + 8); /* Read nbr of functionality */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + 10); s_MainHeader.b_Nfunctions = (unsigned char) w_Temp & 0x00FF; /* Read functionality details */ for (i = 0; i < s_MainHeader.b_Nfunctions; i++) { /* Read Type */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + 12 + w_Count); s_MainHeader.s_Functions[i].b_Type = (unsigned char) w_Temp & 0x3F; w_Count = w_Count + 2; /* Read Address */ s_MainHeader.s_Functions[i].w_Address = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + 12 + w_Count); w_Count = w_Count + 2; } /* Display main header info */ for (i = 0; i < s_MainHeader.b_Nfunctions; i++) { switch (s_MainHeader.s_Functions[i].b_Type) { case EEPROM_DIGITALINPUT: i_EepromReadDigitalInputHeader(w_PCIBoardEepromAddress, pc_PCIChipInformation, s_MainHeader.s_Functions[i].w_Address, &s_DigitalInputHeader); devpriv->s_EeParameters.i_NbrDiChannel = s_DigitalInputHeader.w_Nchannel; break; case EEPROM_DIGITALOUTPUT: i_EepromReadDigitalOutputHeader(w_PCIBoardEepromAddress, pc_PCIChipInformation, s_MainHeader.s_Functions[i].w_Address, &s_DigitalOutputHeader); devpriv->s_EeParameters.i_NbrDoChannel = s_DigitalOutputHeader.w_Nchannel; ui_Temp = 0xffffffff; devpriv->s_EeParameters.i_DoMaxdata = ui_Temp >> (32 - devpriv->s_EeParameters.i_NbrDoChannel); break; case EEPROM_ANALOGINPUT: i_EepromReadAnlogInputHeader(w_PCIBoardEepromAddress, pc_PCIChipInformation, s_MainHeader.s_Functions[i].w_Address, &s_AnalogInputHeader); if (!(strcmp(this_board->pc_DriverName, "apci3200"))) devpriv->s_EeParameters.i_NbrAiChannel = s_AnalogInputHeader.w_Nchannel * 4; else devpriv->s_EeParameters.i_NbrAiChannel = s_AnalogInputHeader.w_Nchannel; devpriv->s_EeParameters.i_Dma = s_AnalogInputHeader.b_HasDma; devpriv->s_EeParameters.ui_MinAcquisitiontimeNs = (unsigned int) s_AnalogInputHeader.w_MinConvertTiming * 1000; devpriv->s_EeParameters.ui_MinDelaytimeNs = (unsigned int) s_AnalogInputHeader.w_MinDelayTiming * 1000; ui_Temp = 0xffff; devpriv->s_EeParameters.i_AiMaxdata = ui_Temp >> (16 - s_AnalogInputHeader.b_Resolution); break; case EEPROM_ANALOGOUTPUT: i_EepromReadAnlogOutputHeader(w_PCIBoardEepromAddress, pc_PCIChipInformation, s_MainHeader.s_Functions[i].w_Address, &s_AnalogOutputHeader); devpriv->s_EeParameters.i_NbrAoChannel = s_AnalogOutputHeader.w_Nchannel; ui_Temp = 0xffff; devpriv->s_EeParameters.i_AoMaxdata = ui_Temp >> (16 - s_AnalogOutputHeader.b_Resolution); break; case EEPROM_TIMER: /* Timer subdevice present */ devpriv->s_EeParameters.i_Timer = 1; break; case EEPROM_WATCHDOG: /* Timer subdevice present */ devpriv->s_EeParameters.i_Timer = 1; break; case EEPROM_TIMER_WATCHDOG_COUNTER: /* Timer subdevice present */ devpriv->s_EeParameters.i_Timer = 1; break; } } return 0; } /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadDigitalInputHeader(unsigned short | | w_PCIBoardEepromAddress,char *pc_PCIChipInformation, | | unsigned short w_Address,struct str_DigitalInputHeader *s_Header) | | | +----------------------------------------------------------------------------+ | Task : Read Digital Input Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | struct str_DigitalInputHeader *s_Header: Digita Input Header | | Pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ int i_EepromReadDigitalInputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_DigitalInputHeader *s_Header) { unsigned short w_Temp; /* read nbr of channels */ s_Header->w_Nchannel = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 6); /* interruptible or not */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 8); s_Header->b_Interruptible = (unsigned char) (w_Temp >> 7) & 0x01; /* How many interruptible logic */ s_Header->w_NinterruptLogic = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 10); return 0; } /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadDigitalOutputHeader(unsigned short | | w_PCIBoardEepromAddress,char *pc_PCIChipInformation, | | unsigned short w_Address,struct str_DigitalOutputHeader *s_Header) | | | +----------------------------------------------------------------------------+ | Task : Read Digital Output Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | struct str_DigitalOutputHeader *s_Header: Digital Output Header| | Pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ int i_EepromReadDigitalOutputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_DigitalOutputHeader *s_Header) { /* Read Nbr channels */ s_Header->w_Nchannel = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 6); return 0; } /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadTimerHeader(unsigned short w_PCIBoardEepromAddress, | | char *pc_PCIChipInformation,WORD w_Address, | | struct str_TimerMainHeader *s_Header) | +----------------------------------------------------------------------------+ | Task : Read Timer or Watchdog Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | struct str_TimerMainHeader *s_Header: Timer Header | | Pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ int i_EepromReadTimerHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_TimerMainHeader *s_Header) { unsigned short i, w_Size = 0, w_Temp; /* Read No of Timer */ s_Header->w_Ntimer = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 6); /* Read header size */ for (i = 0; i < s_Header->w_Ntimer; i++) { s_Header->s_TimerDetails[i].w_HeaderSize = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 8 + w_Size + 0); w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 8 + w_Size + 2); /* Read Resolution */ s_Header->s_TimerDetails[i].b_Resolution = (unsigned char) (w_Temp >> 10) & 0x3F; /* Read Mode */ s_Header->s_TimerDetails[i].b_Mode = (unsigned char) (w_Temp >> 4) & 0x3F; w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 8 + w_Size + 4); /* Read MinTiming */ s_Header->s_TimerDetails[i].w_MinTiming = (w_Temp >> 6) & 0x3FF; /* Read Timebase */ s_Header->s_TimerDetails[i].b_TimeBase = (unsigned char) (w_Temp) & 0x3F; w_Size += s_Header->s_TimerDetails[i].w_HeaderSize; } return 0; } /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadAnlogOutputHeader(unsigned short | | w_PCIBoardEepromAddress,char *pc_PCIChipInformation, | | unsigned short w_Address,str_AnalogOutputHeader *s_Header) | +----------------------------------------------------------------------------+ | Task : Read Nalog Output Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | str_AnalogOutputHeader *s_Header:Anlog Output Header | | Pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ int i_EepromReadAnlogOutputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_AnalogOutputHeader *s_Header) { unsigned short w_Temp; /* No of channels for 1st hard component */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 10); s_Header->w_Nchannel = (w_Temp >> 4) & 0x03FF; /* Resolution for 1st hard component */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 16); s_Header->b_Resolution = (unsigned char) (w_Temp >> 8) & 0xFF; return 0; } /* +----------------------------------------------------------------------------+ | Function Name : int i_EepromReadAnlogInputHeader(unsigned short | | w_PCIBoardEepromAddress,char *pc_PCIChipInformation, | | unsigned short w_Address,struct str_AnalogInputHeader *s_Header) | +----------------------------------------------------------------------------+ | Task : Read Nalog Output Header | +----------------------------------------------------------------------------+ | Input Parameters : unsigned short w_PCIBoardEepromAddress : PCI eeprom address | | | | char *pc_PCIChipInformation : PCI Chip Type. | | | | struct str_AnalogInputHeader *s_Header:Anlog Input Header | | Pointer | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0 | +----------------------------------------------------------------------------+ */ /* Reads only for ONE hardware component */ int i_EepromReadAnlogInputHeader(unsigned short w_PCIBoardEepromAddress, char *pc_PCIChipInformation, unsigned short w_Address, struct str_AnalogInputHeader *s_Header) { unsigned short w_Temp, w_Offset; w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 10); s_Header->w_Nchannel = (w_Temp >> 4) & 0x03FF; s_Header->w_MinConvertTiming = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 16); s_Header->w_MinDelayTiming = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 30); w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 20); s_Header->b_HasDma = (w_Temp >> 13) & 0x01; /* whether dma present or not */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + 72); /* reading Y */ w_Temp = w_Temp & 0x00FF; if (w_Temp) /* Y>0 */ { w_Offset = 74 + (2 * w_Temp) + (10 * (1 + (w_Temp / 16))); /* offset of first analog input single header */ w_Offset = w_Offset + 2; /* resolution */ } else /* Y=0 */ { w_Offset = 74; w_Offset = w_Offset + 2; /* resolution */ } /* read Resolution */ w_Temp = w_EepromReadWord(w_PCIBoardEepromAddress, pc_PCIChipInformation, 0x100 + w_Address + w_Offset); s_Header->b_Resolution = w_Temp & 0x001F; /* last 5 bits */ return 0; }
gpl-2.0
GenTarkin/SGH-T769-kernel
fs/fscache/operation.c
8013
12096
/* FS-Cache worker operation management routines * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/filesystems/caching/operations.txt */ #define FSCACHE_DEBUG_LEVEL OPERATION #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "internal.h" atomic_t fscache_op_debug_id; EXPORT_SYMBOL(fscache_op_debug_id); /** * fscache_enqueue_operation - Enqueue an operation for processing * @op: The operation to enqueue * * Enqueue an operation for processing by the FS-Cache thread pool. * * This will get its own ref on the object. */ void fscache_enqueue_operation(struct fscache_operation *op) { _enter("{OBJ%x OP%x,%u}", op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ASSERT(list_empty(&op->pend_link)); ASSERT(op->processor != NULL); ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(atomic_read(&op->usage), >, 0); fscache_stat(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { case FSCACHE_OP_ASYNC: _debug("queue async"); atomic_inc(&op->usage); if (!queue_work(fscache_op_wq, &op->work)) fscache_put_operation(op); break; case FSCACHE_OP_MYTHREAD: _debug("queue for caller's attention"); break; default: printk(KERN_ERR "FS-Cache: Unexpected op type %lx", op->flags); BUG(); break; } } EXPORT_SYMBOL(fscache_enqueue_operation); /* * start an op running */ static void fscache_run_op(struct fscache_object *object, struct fscache_operation *op) { object->n_in_progress++; if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) wake_up_bit(&op->flags, FSCACHE_OP_WAITING); if (op->processor) fscache_enqueue_operation(op); fscache_stat(&fscache_n_op_run); } /* * submit an exclusive operation for an object * - other ops are excluded from running simultaneously with this one * - this gets any extra refs it needs on an op */ int fscache_submit_exclusive_op(struct fscache_object *object, struct fscache_operation *op) { int ret; _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); spin_lock(&object->lock); ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_exclusive); ASSERT(list_empty(&op->pend_link)); ret = -ENOBUFS; if (fscache_object_is_active(object)) { op->object = object; object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ if (object->n_ops > 1) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); } else if (!list_empty(&object->pending_ops)) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); fscache_start_operations(object); } else { ASSERTCMP(object->n_in_progress, ==, 0); fscache_run_op(object, op); } /* need to issue a new write op after this */ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); ret = 0; } else if (object->state == FSCACHE_OBJECT_CREATING) { op->object = object; object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); ret = 0; } else { /* not allowed to submit ops in any other state */ BUG(); } spin_unlock(&object->lock); return ret; } /* * report an unexpected submission */ static void fscache_report_unexpected_submission(struct fscache_object *object, struct fscache_operation *op, unsigned long ostate) { static bool once_only; struct fscache_operation *p; unsigned n; if (once_only) return; once_only = true; kdebug("unexpected submission OP%x [OBJ%x %s]", op->debug_id, object->debug_id, fscache_object_states[object->state]); kdebug("objstate=%s [%s]", fscache_object_states[object->state], fscache_object_states[ostate]); kdebug("objflags=%lx", object->flags); kdebug("objevent=%lx [%lx]", object->events, object->event_mask); kdebug("ops=%u inp=%u exc=%u", object->n_ops, object->n_in_progress, object->n_exclusive); if (!list_empty(&object->pending_ops)) { n = 0; list_for_each_entry(p, &object->pending_ops, pend_link) { ASSERTCMP(p->object, ==, object); kdebug("%p %p", op->processor, op->release); n++; } kdebug("n=%u", n); } dump_stack(); } /* * submit an operation for an object * - objects may be submitted only in the following states: * - during object creation (write ops may be submitted) * - whilst the object is active * - after an I/O error incurred in one of the two above states (op rejected) * - this gets any extra refs it needs on an op */ int fscache_submit_op(struct fscache_object *object, struct fscache_operation *op) { unsigned long ostate; int ret; _enter("{OBJ%x OP%x},{%u}", object->debug_id, op->debug_id, atomic_read(&op->usage)); ASSERTCMP(atomic_read(&op->usage), >, 0); spin_lock(&object->lock); ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_exclusive); ASSERT(list_empty(&op->pend_link)); ostate = object->state; smp_rmb(); if (fscache_object_is_active(object)) { op->object = object; object->n_ops++; if (object->n_exclusive > 0) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); } else if (!list_empty(&object->pending_ops)) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); fscache_start_operations(object); } else { ASSERTCMP(object->n_exclusive, ==, 0); fscache_run_op(object, op); } ret = 0; } else if (object->state == FSCACHE_OBJECT_CREATING) { op->object = object; object->n_ops++; atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); ret = 0; } else if (object->state == FSCACHE_OBJECT_DYING || object->state == FSCACHE_OBJECT_LC_DYING || object->state == FSCACHE_OBJECT_WITHDRAWING) { fscache_stat(&fscache_n_op_rejected); ret = -ENOBUFS; } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { fscache_report_unexpected_submission(object, op, ostate); ASSERT(!fscache_object_is_active(object)); ret = -ENOBUFS; } else { ret = -ENOBUFS; } spin_unlock(&object->lock); return ret; } /* * queue an object for withdrawal on error, aborting all following asynchronous * operations */ void fscache_abort_object(struct fscache_object *object) { _enter("{OBJ%x}", object->debug_id); fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); } /* * jump start the operation processing on an object * - caller must hold object->lock */ void fscache_start_operations(struct fscache_object *object) { struct fscache_operation *op; bool stop = false; while (!list_empty(&object->pending_ops) && !stop) { op = list_entry(object->pending_ops.next, struct fscache_operation, pend_link); if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { if (object->n_in_progress > 0) break; stop = true; } list_del_init(&op->pend_link); fscache_run_op(object, op); /* the pending queue was holding a ref on the object */ fscache_put_operation(op); } ASSERTCMP(object->n_in_progress, <=, object->n_ops); _debug("woke %d ops on OBJ%x", object->n_in_progress, object->debug_id); } /* * cancel an operation that's pending on an object */ int fscache_cancel_op(struct fscache_operation *op) { struct fscache_object *object = op->object; int ret; _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); spin_lock(&object->lock); ret = -EBUSY; if (!list_empty(&op->pend_link)) { fscache_stat(&fscache_n_op_cancelled); list_del_init(&op->pend_link); object->n_ops--; if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) object->n_exclusive--; if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) wake_up_bit(&op->flags, FSCACHE_OP_WAITING); fscache_put_operation(op); ret = 0; } spin_unlock(&object->lock); _leave(" = %d", ret); return ret; } /* * release an operation * - queues pending ops if this is the last in-progress op */ void fscache_put_operation(struct fscache_operation *op) { struct fscache_object *object; struct fscache_cache *cache; _enter("{OBJ%x OP%x,%d}", op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ASSERTCMP(atomic_read(&op->usage), >, 0); if (!atomic_dec_and_test(&op->usage)) return; _debug("PUT OP"); if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) BUG(); fscache_stat(&fscache_n_op_release); if (op->release) { op->release(op); op->release = NULL; } object = op->object; if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) atomic_dec(&object->n_reads); /* now... we may get called with the object spinlock held, so we * complete the cleanup here only if we can immediately acquire the * lock, and defer it otherwise */ if (!spin_trylock(&object->lock)) { _debug("defer put"); fscache_stat(&fscache_n_op_deferred_release); cache = object->cache; spin_lock(&cache->op_gc_list_lock); list_add_tail(&op->pend_link, &cache->op_gc_list); spin_unlock(&cache->op_gc_list_lock); schedule_work(&cache->op_gc); _leave(" [defer]"); return; } if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { ASSERTCMP(object->n_exclusive, >, 0); object->n_exclusive--; } ASSERTCMP(object->n_in_progress, >, 0); object->n_in_progress--; if (object->n_in_progress == 0) fscache_start_operations(object); ASSERTCMP(object->n_ops, >, 0); object->n_ops--; if (object->n_ops == 0) fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); spin_unlock(&object->lock); kfree(op); _leave(" [done]"); } EXPORT_SYMBOL(fscache_put_operation); /* * garbage collect operations that have had their release deferred */ void fscache_operation_gc(struct work_struct *work) { struct fscache_operation *op; struct fscache_object *object; struct fscache_cache *cache = container_of(work, struct fscache_cache, op_gc); int count = 0; _enter(""); do { spin_lock(&cache->op_gc_list_lock); if (list_empty(&cache->op_gc_list)) { spin_unlock(&cache->op_gc_list_lock); break; } op = list_entry(cache->op_gc_list.next, struct fscache_operation, pend_link); list_del(&op->pend_link); spin_unlock(&cache->op_gc_list_lock); object = op->object; _debug("GC DEFERRED REL OBJ%x OP%x", object->debug_id, op->debug_id); fscache_stat(&fscache_n_op_gc); ASSERTCMP(atomic_read(&op->usage), ==, 0); spin_lock(&object->lock); if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { ASSERTCMP(object->n_exclusive, >, 0); object->n_exclusive--; } ASSERTCMP(object->n_in_progress, >, 0); object->n_in_progress--; if (object->n_in_progress == 0) fscache_start_operations(object); ASSERTCMP(object->n_ops, >, 0); object->n_ops--; if (object->n_ops == 0) fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); spin_unlock(&object->lock); } while (count++ < 20); if (!list_empty(&cache->op_gc_list)) schedule_work(&cache->op_gc); _leave(""); } /* * execute an operation using fs_op_wq to provide processing context - * the caller holds a ref to this object, so we don't need to hold one */ void fscache_op_work_func(struct work_struct *work) { struct fscache_operation *op = container_of(work, struct fscache_operation, work); unsigned long start; _enter("{OBJ%x OP%x,%d}", op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ASSERT(op->processor != NULL); start = jiffies; op->processor(op); fscache_hist(fscache_ops_histogram, start); fscache_put_operation(op); _leave(""); }
gpl-2.0
cmvienneau/android_kernel_htc_m4
sound/pci/ice1712/delta.c
8269
25303
/* * ALSA driver for ICEnsemble ICE1712 (Envy24) * * Lowlevel functions for M-Audio Delta 1010, 1010E, 44, 66, 66E, Dio2496, * Audiophile, Digigram VX442 * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/cs8427.h> #include <sound/asoundef.h> #include "ice1712.h" #include "delta.h" #define SND_CS8403 #include <sound/cs8403.h> /* * CS8427 via SPI mode (for Audiophile), emulated I2C */ /* send 8 bits */ static void ap_cs8427_write_byte(struct snd_ice1712 *ice, unsigned char data, unsigned char tmp) { int idx; for (idx = 7; idx >= 0; idx--) { tmp &= ~(ICE1712_DELTA_AP_DOUT|ICE1712_DELTA_AP_CCLK); if (data & (1 << idx)) tmp |= ICE1712_DELTA_AP_DOUT; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); tmp |= ICE1712_DELTA_AP_CCLK; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); } } /* read 8 bits */ static unsigned char ap_cs8427_read_byte(struct snd_ice1712 *ice, unsigned char tmp) { unsigned char data = 0; int idx; for (idx = 7; idx >= 0; idx--) { tmp &= ~ICE1712_DELTA_AP_CCLK; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); if (snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA) & ICE1712_DELTA_AP_DIN) data |= 1 << idx; tmp |= ICE1712_DELTA_AP_CCLK; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); } return data; } /* assert chip select */ static unsigned char ap_cs8427_codec_select(struct snd_ice1712 *ice) { unsigned char tmp; tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA); switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTA1010LT: tmp &= ~ICE1712_DELTA_1010LT_CS; tmp |= ICE1712_DELTA_1010LT_CCLK | ICE1712_DELTA_1010LT_CS_CS8427; break; case ICE1712_SUBDEVICE_AUDIOPHILE: case ICE1712_SUBDEVICE_DELTA410: tmp |= ICE1712_DELTA_AP_CCLK | ICE1712_DELTA_AP_CS_CODEC; tmp &= ~ICE1712_DELTA_AP_CS_DIGITAL; break; case ICE1712_SUBDEVICE_DELTA66E: tmp |= ICE1712_DELTA_66E_CCLK | ICE1712_DELTA_66E_CS_CHIP_A | ICE1712_DELTA_66E_CS_CHIP_B; tmp &= ~ICE1712_DELTA_66E_CS_CS8427; break; case ICE1712_SUBDEVICE_VX442: tmp |= ICE1712_VX442_CCLK | ICE1712_VX442_CODEC_CHIP_A | ICE1712_VX442_CODEC_CHIP_B; tmp &= ~ICE1712_VX442_CS_DIGITAL; break; } snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); return tmp; } /* deassert chip select */ static void ap_cs8427_codec_deassert(struct snd_ice1712 *ice, unsigned char tmp) { switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTA1010LT: tmp &= ~ICE1712_DELTA_1010LT_CS; tmp |= ICE1712_DELTA_1010LT_CS_NONE; break; case ICE1712_SUBDEVICE_AUDIOPHILE: case ICE1712_SUBDEVICE_DELTA410: tmp |= ICE1712_DELTA_AP_CS_DIGITAL; break; case ICE1712_SUBDEVICE_DELTA66E: tmp |= ICE1712_DELTA_66E_CS_CS8427; break; case ICE1712_SUBDEVICE_VX442: tmp |= ICE1712_VX442_CS_DIGITAL; break; } snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); } /* sequential write */ static int ap_cs8427_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_ice1712 *ice = device->bus->private_data; int res = count; unsigned char tmp; mutex_lock(&ice->gpio_mutex); tmp = ap_cs8427_codec_select(ice); ap_cs8427_write_byte(ice, (device->addr << 1) | 0, tmp); /* address + write mode */ while (count-- > 0) ap_cs8427_write_byte(ice, *bytes++, tmp); ap_cs8427_codec_deassert(ice, tmp); mutex_unlock(&ice->gpio_mutex); return res; } /* sequential read */ static int ap_cs8427_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_ice1712 *ice = device->bus->private_data; int res = count; unsigned char tmp; mutex_lock(&ice->gpio_mutex); tmp = ap_cs8427_codec_select(ice); ap_cs8427_write_byte(ice, (device->addr << 1) | 1, tmp); /* address + read mode */ while (count-- > 0) *bytes++ = ap_cs8427_read_byte(ice, tmp); ap_cs8427_codec_deassert(ice, tmp); mutex_unlock(&ice->gpio_mutex); return res; } static int ap_cs8427_probeaddr(struct snd_i2c_bus *bus, unsigned short addr) { if (addr == 0x10) return 1; return -ENOENT; } static struct snd_i2c_ops ap_cs8427_i2c_ops = { .sendbytes = ap_cs8427_sendbytes, .readbytes = ap_cs8427_readbytes, .probeaddr = ap_cs8427_probeaddr, }; /* */ static void snd_ice1712_delta_cs8403_spdif_write(struct snd_ice1712 *ice, unsigned char bits) { unsigned char tmp, mask1, mask2; int idx; /* send byte to transmitter */ mask1 = ICE1712_DELTA_SPDIF_OUT_STAT_CLOCK; mask2 = ICE1712_DELTA_SPDIF_OUT_STAT_DATA; mutex_lock(&ice->gpio_mutex); tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA); for (idx = 7; idx >= 0; idx--) { tmp &= ~(mask1 | mask2); if (bits & (1 << idx)) tmp |= mask2; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(100); tmp |= mask1; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(100); } tmp &= ~mask1; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); mutex_unlock(&ice->gpio_mutex); } static void delta_spdif_default_get(struct snd_ice1712 *ice, struct snd_ctl_elem_value *ucontrol) { snd_cs8403_decode_spdif_bits(&ucontrol->value.iec958, ice->spdif.cs8403_bits); } static int delta_spdif_default_put(struct snd_ice1712 *ice, struct snd_ctl_elem_value *ucontrol) { unsigned int val; int change; val = snd_cs8403_encode_spdif_bits(&ucontrol->value.iec958); spin_lock_irq(&ice->reg_lock); change = ice->spdif.cs8403_bits != val; ice->spdif.cs8403_bits = val; if (change && ice->playback_pro_substream == NULL) { spin_unlock_irq(&ice->reg_lock); snd_ice1712_delta_cs8403_spdif_write(ice, val); } else { spin_unlock_irq(&ice->reg_lock); } return change; } static void delta_spdif_stream_get(struct snd_ice1712 *ice, struct snd_ctl_elem_value *ucontrol) { snd_cs8403_decode_spdif_bits(&ucontrol->value.iec958, ice->spdif.cs8403_stream_bits); } static int delta_spdif_stream_put(struct snd_ice1712 *ice, struct snd_ctl_elem_value *ucontrol) { unsigned int val; int change; val = snd_cs8403_encode_spdif_bits(&ucontrol->value.iec958); spin_lock_irq(&ice->reg_lock); change = ice->spdif.cs8403_stream_bits != val; ice->spdif.cs8403_stream_bits = val; if (change && ice->playback_pro_substream != NULL) { spin_unlock_irq(&ice->reg_lock); snd_ice1712_delta_cs8403_spdif_write(ice, val); } else { spin_unlock_irq(&ice->reg_lock); } return change; } /* * AK4524 on Delta 44 and 66 to choose the chip mask */ static void delta_ak4524_lock(struct snd_akm4xxx *ak, int chip) { struct snd_ak4xxx_private *priv = (void *)ak->private_value[0]; struct snd_ice1712 *ice = ak->private_data[0]; snd_ice1712_save_gpio_status(ice); priv->cs_mask = priv->cs_addr = chip == 0 ? ICE1712_DELTA_CODEC_CHIP_A : ICE1712_DELTA_CODEC_CHIP_B; } /* * AK4524 on Delta1010LT to choose the chip address */ static void delta1010lt_ak4524_lock(struct snd_akm4xxx *ak, int chip) { struct snd_ak4xxx_private *priv = (void *)ak->private_value[0]; struct snd_ice1712 *ice = ak->private_data[0]; snd_ice1712_save_gpio_status(ice); priv->cs_mask = ICE1712_DELTA_1010LT_CS; priv->cs_addr = chip << 4; } /* * AK4524 on Delta66 rev E to choose the chip address */ static void delta66e_ak4524_lock(struct snd_akm4xxx *ak, int chip) { struct snd_ak4xxx_private *priv = (void *)ak->private_value[0]; struct snd_ice1712 *ice = ak->private_data[0]; snd_ice1712_save_gpio_status(ice); priv->cs_mask = priv->cs_addr = chip == 0 ? ICE1712_DELTA_66E_CS_CHIP_A : ICE1712_DELTA_66E_CS_CHIP_B; } /* * AK4528 on VX442 to choose the chip mask */ static void vx442_ak4524_lock(struct snd_akm4xxx *ak, int chip) { struct snd_ak4xxx_private *priv = (void *)ak->private_value[0]; struct snd_ice1712 *ice = ak->private_data[0]; snd_ice1712_save_gpio_status(ice); priv->cs_mask = priv->cs_addr = chip == 0 ? ICE1712_VX442_CODEC_CHIP_A : ICE1712_VX442_CODEC_CHIP_B; } /* * change the DFS bit according rate for Delta1010 */ static void delta_1010_set_rate_val(struct snd_ice1712 *ice, unsigned int rate) { unsigned char tmp, tmp2; if (rate == 0) /* no hint - S/PDIF input is master, simply return */ return; mutex_lock(&ice->gpio_mutex); tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA); tmp2 = tmp & ~ICE1712_DELTA_DFS; if (rate > 48000) tmp2 |= ICE1712_DELTA_DFS; if (tmp != tmp2) snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp2); mutex_unlock(&ice->gpio_mutex); } /* * change the rate of AK4524 on Delta 44/66, AP, 1010LT */ static void delta_ak4524_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char tmp, tmp2; struct snd_ice1712 *ice = ak->private_data[0]; if (rate == 0) /* no hint - S/PDIF input is master, simply return */ return; /* check before reset ak4524 to avoid unnecessary clicks */ mutex_lock(&ice->gpio_mutex); tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA); mutex_unlock(&ice->gpio_mutex); tmp2 = tmp & ~ICE1712_DELTA_DFS; if (rate > 48000) tmp2 |= ICE1712_DELTA_DFS; if (tmp == tmp2) return; /* do it again */ snd_akm4xxx_reset(ak, 1); mutex_lock(&ice->gpio_mutex); tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA) & ~ICE1712_DELTA_DFS; if (rate > 48000) tmp |= ICE1712_DELTA_DFS; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); mutex_unlock(&ice->gpio_mutex); snd_akm4xxx_reset(ak, 0); } /* * change the rate of AK4524 on VX442 */ static void vx442_ak4524_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char val; val = (rate > 48000) ? 0x65 : 0x60; if (snd_akm4xxx_get(ak, 0, 0x02) != val || snd_akm4xxx_get(ak, 1, 0x02) != val) { snd_akm4xxx_reset(ak, 1); snd_akm4xxx_write(ak, 0, 0x02, val); snd_akm4xxx_write(ak, 1, 0x02, val); snd_akm4xxx_reset(ak, 0); } } /* * SPDIF ops for Delta 1010, Dio, 66 */ /* open callback */ static void delta_open_spdif(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { ice->spdif.cs8403_stream_bits = ice->spdif.cs8403_bits; } /* set up */ static void delta_setup_spdif(struct snd_ice1712 *ice, int rate) { unsigned long flags; unsigned int tmp; int change; spin_lock_irqsave(&ice->reg_lock, flags); tmp = ice->spdif.cs8403_stream_bits; if (tmp & 0x01) /* consumer */ tmp &= (tmp & 0x01) ? ~0x06 : ~0x18; switch (rate) { case 32000: tmp |= (tmp & 0x01) ? 0x04 : 0x00; break; case 44100: tmp |= (tmp & 0x01) ? 0x00 : 0x10; break; case 48000: tmp |= (tmp & 0x01) ? 0x02 : 0x08; break; default: tmp |= (tmp & 0x01) ? 0x00 : 0x18; break; } change = ice->spdif.cs8403_stream_bits != tmp; ice->spdif.cs8403_stream_bits = tmp; spin_unlock_irqrestore(&ice->reg_lock, flags); if (change) snd_ctl_notify(ice->card, SNDRV_CTL_EVENT_MASK_VALUE, &ice->spdif.stream_ctl->id); snd_ice1712_delta_cs8403_spdif_write(ice, tmp); } #define snd_ice1712_delta1010lt_wordclock_status_info \ snd_ctl_boolean_mono_info static int snd_ice1712_delta1010lt_wordclock_status_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { char reg = 0x10; /* CS8427 receiver error register */ struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); if (snd_i2c_sendbytes(ice->cs8427, &reg, 1) != 1) snd_printk(KERN_ERR "unable to send register 0x%x byte to CS8427\n", reg); snd_i2c_readbytes(ice->cs8427, &reg, 1); ucontrol->value.integer.value[0] = (reg & CS8427_UNLOCK) ? 1 : 0; return 0; } static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_status __devinitdata = { .access = (SNDRV_CTL_ELEM_ACCESS_READ), .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Word Clock Status", .info = snd_ice1712_delta1010lt_wordclock_status_info, .get = snd_ice1712_delta1010lt_wordclock_status_get, }; /* * initialize the chips on M-Audio cards */ static struct snd_akm4xxx akm_audiophile __devinitdata = { .type = SND_AK4528, .num_adcs = 2, .num_dacs = 2, .ops = { .set_rate_val = delta_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_audiophile_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = ICE1712_DELTA_AP_DOUT, .clk_mask = ICE1712_DELTA_AP_CCLK, .cs_mask = ICE1712_DELTA_AP_CS_CODEC, .cs_addr = ICE1712_DELTA_AP_CS_CODEC, .cs_none = 0, .add_flags = ICE1712_DELTA_AP_CS_DIGITAL, .mask_flags = 0, }; static struct snd_akm4xxx akm_delta410 __devinitdata = { .type = SND_AK4529, .num_adcs = 2, .num_dacs = 8, .ops = { .set_rate_val = delta_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_delta410_priv __devinitdata = { .caddr = 0, .cif = 0, .data_mask = ICE1712_DELTA_AP_DOUT, .clk_mask = ICE1712_DELTA_AP_CCLK, .cs_mask = ICE1712_DELTA_AP_CS_CODEC, .cs_addr = ICE1712_DELTA_AP_CS_CODEC, .cs_none = 0, .add_flags = ICE1712_DELTA_AP_CS_DIGITAL, .mask_flags = 0, }; static struct snd_akm4xxx akm_delta1010lt __devinitdata = { .type = SND_AK4524, .num_adcs = 8, .num_dacs = 8, .ops = { .lock = delta1010lt_ak4524_lock, .set_rate_val = delta_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_delta1010lt_priv __devinitdata = { .caddr = 2, .cif = 0, /* the default level of the CIF pin from AK4524 */ .data_mask = ICE1712_DELTA_1010LT_DOUT, .clk_mask = ICE1712_DELTA_1010LT_CCLK, .cs_mask = 0, .cs_addr = 0, /* set later */ .cs_none = ICE1712_DELTA_1010LT_CS_NONE, .add_flags = 0, .mask_flags = 0, }; static struct snd_akm4xxx akm_delta66e __devinitdata = { .type = SND_AK4524, .num_adcs = 4, .num_dacs = 4, .ops = { .lock = delta66e_ak4524_lock, .set_rate_val = delta_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = { .caddr = 2, .cif = 0, /* the default level of the CIF pin from AK4524 */ .data_mask = ICE1712_DELTA_66E_DOUT, .clk_mask = ICE1712_DELTA_66E_CCLK, .cs_mask = 0, .cs_addr = 0, /* set later */ .cs_none = 0, .add_flags = 0, .mask_flags = 0, }; static struct snd_akm4xxx akm_delta44 __devinitdata = { .type = SND_AK4524, .num_adcs = 4, .num_dacs = 4, .ops = { .lock = delta_ak4524_lock, .set_rate_val = delta_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_delta44_priv __devinitdata = { .caddr = 2, .cif = 0, /* the default level of the CIF pin from AK4524 */ .data_mask = ICE1712_DELTA_CODEC_SERIAL_DATA, .clk_mask = ICE1712_DELTA_CODEC_SERIAL_CLOCK, .cs_mask = 0, .cs_addr = 0, /* set later */ .cs_none = 0, .add_flags = 0, .mask_flags = 0, }; static struct snd_akm4xxx akm_vx442 __devinitdata = { .type = SND_AK4524, .num_adcs = 4, .num_dacs = 4, .ops = { .lock = vx442_ak4524_lock, .set_rate_val = vx442_ak4524_set_rate_val } }; static struct snd_ak4xxx_private akm_vx442_priv __devinitdata = { .caddr = 2, .cif = 0, .data_mask = ICE1712_VX442_DOUT, .clk_mask = ICE1712_VX442_CCLK, .cs_mask = 0, .cs_addr = 0, /* set later */ .cs_none = 0, .add_flags = 0, .mask_flags = 0, }; static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice) { int err; struct snd_akm4xxx *ak; unsigned char tmp; if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DELTA1010 && ice->eeprom.gpiodir == 0x7b) ice->eeprom.subvendor = ICE1712_SUBDEVICE_DELTA1010E; if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DELTA66 && ice->eeprom.gpiodir == 0xfb) ice->eeprom.subvendor = ICE1712_SUBDEVICE_DELTA66E; /* determine I2C, DACs and ADCs */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_AUDIOPHILE: ice->num_total_dacs = 2; ice->num_total_adcs = 2; break; case ICE1712_SUBDEVICE_DELTA410: ice->num_total_dacs = 8; ice->num_total_adcs = 2; break; case ICE1712_SUBDEVICE_DELTA44: case ICE1712_SUBDEVICE_DELTA66: ice->num_total_dacs = ice->omni ? 8 : 4; ice->num_total_adcs = ice->omni ? 8 : 4; break; case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTA1010LT: case ICE1712_SUBDEVICE_MEDIASTATION: case ICE1712_SUBDEVICE_EDIROLDA2496: ice->num_total_dacs = 8; ice->num_total_adcs = 8; break; case ICE1712_SUBDEVICE_DELTADIO2496: ice->num_total_dacs = 4; /* two AK4324 codecs */ break; case ICE1712_SUBDEVICE_VX442: case ICE1712_SUBDEVICE_DELTA66E: /* omni not suported yet */ ice->num_total_dacs = 4; ice->num_total_adcs = 4; break; } /* initialize the SPI clock to high */ tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA); tmp |= ICE1712_DELTA_AP_CCLK; snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp); udelay(5); /* initialize spdif */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_AUDIOPHILE: case ICE1712_SUBDEVICE_DELTA410: case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTA1010LT: case ICE1712_SUBDEVICE_VX442: case ICE1712_SUBDEVICE_DELTA66E: if ((err = snd_i2c_bus_create(ice->card, "ICE1712 GPIO 1", NULL, &ice->i2c)) < 0) { snd_printk(KERN_ERR "unable to create I2C bus\n"); return err; } ice->i2c->private_data = ice; ice->i2c->ops = &ap_cs8427_i2c_ops; if ((err = snd_ice1712_init_cs8427(ice, CS8427_BASE_ADDR)) < 0) return err; break; case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_MEDIASTATION: ice->gpio.set_pro_rate = delta_1010_set_rate_val; break; case ICE1712_SUBDEVICE_DELTADIO2496: ice->gpio.set_pro_rate = delta_1010_set_rate_val; /* fall thru */ case ICE1712_SUBDEVICE_DELTA66: ice->spdif.ops.open = delta_open_spdif; ice->spdif.ops.setup_rate = delta_setup_spdif; ice->spdif.ops.default_get = delta_spdif_default_get; ice->spdif.ops.default_put = delta_spdif_default_put; ice->spdif.ops.stream_get = delta_spdif_stream_get; ice->spdif.ops.stream_put = delta_spdif_stream_put; /* Set spdif defaults */ snd_ice1712_delta_cs8403_spdif_write(ice, ice->spdif.cs8403_bits); break; } /* no analog? */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTADIO2496: case ICE1712_SUBDEVICE_MEDIASTATION: return 0; } /* second stage of initialization, analog parts and others */ ak = ice->akm = kmalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); if (! ak) return -ENOMEM; ice->akm_codecs = 1; switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_AUDIOPHILE: err = snd_ice1712_akm4xxx_init(ak, &akm_audiophile, &akm_audiophile_priv, ice); break; case ICE1712_SUBDEVICE_DELTA410: err = snd_ice1712_akm4xxx_init(ak, &akm_delta410, &akm_delta410_priv, ice); break; case ICE1712_SUBDEVICE_DELTA1010LT: case ICE1712_SUBDEVICE_EDIROLDA2496: err = snd_ice1712_akm4xxx_init(ak, &akm_delta1010lt, &akm_delta1010lt_priv, ice); break; case ICE1712_SUBDEVICE_DELTA66: case ICE1712_SUBDEVICE_DELTA44: err = snd_ice1712_akm4xxx_init(ak, &akm_delta44, &akm_delta44_priv, ice); break; case ICE1712_SUBDEVICE_VX442: err = snd_ice1712_akm4xxx_init(ak, &akm_vx442, &akm_vx442_priv, ice); break; case ICE1712_SUBDEVICE_DELTA66E: err = snd_ice1712_akm4xxx_init(ak, &akm_delta66e, &akm_delta66e_priv, ice); break; default: snd_BUG(); return -EINVAL; } return err; } /* * additional controls for M-Audio cards */ static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_select __devinitdata = ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0); static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_select __devinitdata = ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 0, 0); static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_status __devinitdata = ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE); static struct snd_kcontrol_new snd_ice1712_deltadio2496_spdif_in_select __devinitdata = ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0); static struct snd_kcontrol_new snd_ice1712_delta_spdif_in_status __devinitdata = ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE); static int __devinit snd_ice1712_delta_add_controls(struct snd_ice1712 *ice) { int err; /* 1010 and dio specific controls */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_MEDIASTATION: err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_delta1010_wordclock_select, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_delta1010_wordclock_status, ice)); if (err < 0) return err; break; case ICE1712_SUBDEVICE_DELTADIO2496: err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_deltadio2496_spdif_in_select, ice)); if (err < 0) return err; break; case ICE1712_SUBDEVICE_DELTA1010E: case ICE1712_SUBDEVICE_DELTA1010LT: err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_delta1010lt_wordclock_select, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_delta1010lt_wordclock_status, ice)); if (err < 0) return err; break; } /* normal spdif controls */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_DELTADIO2496: case ICE1712_SUBDEVICE_DELTA66: case ICE1712_SUBDEVICE_MEDIASTATION: err = snd_ice1712_spdif_build_controls(ice); if (err < 0) return err; break; } /* spdif status in */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010: case ICE1712_SUBDEVICE_DELTADIO2496: case ICE1712_SUBDEVICE_DELTA66: case ICE1712_SUBDEVICE_MEDIASTATION: err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_delta_spdif_in_status, ice)); if (err < 0) return err; break; } /* ak4524 controls */ switch (ice->eeprom.subvendor) { case ICE1712_SUBDEVICE_DELTA1010LT: case ICE1712_SUBDEVICE_AUDIOPHILE: case ICE1712_SUBDEVICE_DELTA410: case ICE1712_SUBDEVICE_DELTA44: case ICE1712_SUBDEVICE_DELTA66: case ICE1712_SUBDEVICE_VX442: case ICE1712_SUBDEVICE_DELTA66E: case ICE1712_SUBDEVICE_EDIROLDA2496: err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; break; } return 0; } /* entry point */ struct snd_ice1712_card_info snd_ice1712_delta_cards[] __devinitdata = { { .subvendor = ICE1712_SUBDEVICE_DELTA1010, .name = "M Audio Delta 1010", .model = "delta1010", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { .subvendor = ICE1712_SUBDEVICE_DELTADIO2496, .name = "M Audio Delta DiO 2496", .model = "dio2496", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, .no_mpu401 = 1, }, { .subvendor = ICE1712_SUBDEVICE_DELTA66, .name = "M Audio Delta 66", .model = "delta66", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, .no_mpu401 = 1, }, { .subvendor = ICE1712_SUBDEVICE_DELTA44, .name = "M Audio Delta 44", .model = "delta44", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, .no_mpu401 = 1, }, { .subvendor = ICE1712_SUBDEVICE_AUDIOPHILE, .name = "M Audio Audiophile 24/96", .model = "audiophile", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { .subvendor = ICE1712_SUBDEVICE_DELTA410, .name = "M Audio Delta 410", .model = "delta410", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { .subvendor = ICE1712_SUBDEVICE_DELTA1010LT, .name = "M Audio Delta 1010LT", .model = "delta1010lt", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { .subvendor = ICE1712_SUBDEVICE_VX442, .name = "Digigram VX442", .model = "vx442", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, .no_mpu401 = 1, }, { .subvendor = ICE1712_SUBDEVICE_MEDIASTATION, .name = "Lionstracs Mediastation", .model = "mediastation", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { .subvendor = ICE1712_SUBDEVICE_EDIROLDA2496, .name = "Edirol DA2496", .model = "da2496", .chip_init = snd_ice1712_delta_init, .build_controls = snd_ice1712_delta_add_controls, }, { } /* terminator */ };
gpl-2.0
aicjofs/android_kernel_lge_v500_20d_f2fs
arch/x86/lib/msr-smp.c
11853
4060
#include <linux/module.h> #include <linux/preempt.h> #include <linux/smp.h> #include <asm/msr.h> static void __rdmsr_on_cpu(void *info) { struct msr_info *rv = info; struct msr *reg; int this_cpu = raw_smp_processor_id(); if (rv->msrs) reg = per_cpu_ptr(rv->msrs, this_cpu); else reg = &rv->reg; rdmsr(rv->msr_no, reg->l, reg->h); } static void __wrmsr_on_cpu(void *info) { struct msr_info *rv = info; struct msr *reg; int this_cpu = raw_smp_processor_id(); if (rv->msrs) reg = per_cpu_ptr(rv->msrs, this_cpu); else reg = &rv->reg; wrmsr(rv->msr_no, reg->l, reg->h); } int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); *l = rv.reg.l; *h = rv.reg.h; return err; } EXPORT_SYMBOL(rdmsr_on_cpu); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; rv.reg.l = l; rv.reg.h = h; err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); return err; } EXPORT_SYMBOL(wrmsr_on_cpu); static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs, void (*msr_func) (void *info)) { struct msr_info rv; int this_cpu; memset(&rv, 0, sizeof(rv)); rv.msrs = msrs; rv.msr_no = msr_no; this_cpu = get_cpu(); if (cpumask_test_cpu(this_cpu, mask)) msr_func(&rv); smp_call_function_many(mask, msr_func, &rv, 1); put_cpu(); } /* rdmsr on a bunch of CPUs * * @mask: which CPUs * @msr_no: which MSR * @msrs: array of MSR values * */ void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) { __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); } EXPORT_SYMBOL(rdmsr_on_cpus); /* * wrmsr on a bunch of CPUs * * @mask: which CPUs * @msr_no: which MSR * @msrs: array of MSR values * */ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) { __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); } EXPORT_SYMBOL(wrmsr_on_cpus); /* These "safe" variants are slower and should be used when the target MSR may not actually exist. */ static void __rdmsr_safe_on_cpu(void *info) { struct msr_info *rv = info; rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); } static void __wrmsr_safe_on_cpu(void *info) { struct msr_info *rv = info; rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); } int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); *l = rv.reg.l; *h = rv.reg.h; return err ? err : rv.err; } EXPORT_SYMBOL(rdmsr_safe_on_cpu); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; rv.reg.l = l; rv.reg.h = h; err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); return err ? err : rv.err; } EXPORT_SYMBOL(wrmsr_safe_on_cpu); /* * These variants are significantly slower, but allows control over * the entire 32-bit GPR set. */ static void __rdmsr_safe_regs_on_cpu(void *info) { struct msr_regs_info *rv = info; rv->err = rdmsr_safe_regs(rv->regs); } static void __wrmsr_safe_regs_on_cpu(void *info) { struct msr_regs_info *rv = info; rv->err = wrmsr_safe_regs(rv->regs); } int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) { int err; struct msr_regs_info rv; rv.regs = regs; rv.err = -EIO; err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); return err ? err : rv.err; } EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) { int err; struct msr_regs_info rv; rv.regs = regs; rv.err = -EIO; err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); return err ? err : rv.err; } EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
gpl-2.0
k2wl/5282
lib/bch.c
12877
36404
/* * Generic binary BCH encoding/decoding library * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic <ivan.djelic@parrot.com> * * Description: * * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * * Call init_bch to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * * Call encode_bch to compute and store ecc parity bytes to a given buffer. * Call decode_bch to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided * to decode_bch in order to skip certain steps. See decode_bch() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of * parameters m and t; thus allowing extra compiler optimizations and providing * better (up to 2x) encoding performance. Using this option makes sense when * (m,t) are fixed and known in advance, e.g. when using BCH error correction * on a particular NAND flash device. * * Algorithmic details: * * Encoding is performed by processing 32 input bits in parallel, using 4 * remainder lookup tables. * * The final stage of decoding involves the following internal steps: * a. Syndrome computation * b. Error locator polynomial computation using Berlekamp-Massey algorithm * c. Error locator root finding (by far the most expensive step) * * In this implementation, step c is not performed using the usual Chien search. * Instead, an alternative approach described in [1] is used. It consists in * factoring the error locator polynomial using the Berlekamp Trace algorithm * (BTA) down to a certain degree (4), after which ad hoc low-degree polynomial * solving techniques [2] are used. The resulting algorithm, called BTZ, yields * much better performance than Chien search for usual (m,t) values (typically * m >= 13, t < 32, see [1]). * * [1] B. Biswas, V. Herbert. Efficient root finding of polynomials over fields * of characteristic 2, in: Western European Workshop on Research in Cryptology * - WEWoRC 2009, Graz, Austria, LNCS, Springer, July 2009, to appear. * [2] [Zin96] V.A. Zinoviev. On the solution of equations of degree 10 over * finite fields GF(2^q). In Rapport de recherche INRIA no 2829, 1996. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include <linux/bch.h> #if defined(CONFIG_BCH_CONST_PARAMS) #define GF_M(_p) (CONFIG_BCH_CONST_M) #define GF_T(_p) (CONFIG_BCH_CONST_T) #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) #else #define GF_M(_p) ((_p)->m) #define GF_T(_p) ((_p)->t) #define GF_N(_p) ((_p)->n) #endif #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) #ifndef dbg #define dbg(_fmt, args...) do {} while (0) #endif /* * represent a polynomial over GF(2^m) */ struct gf_poly { unsigned int deg; /* polynomial degree */ unsigned int c[0]; /* polynomial terms */ }; /* given its degree, compute a polynomial size in bytes */ #define GF_POLY_SZ(_d) (sizeof(struct gf_poly)+((_d)+1)*sizeof(unsigned int)) /* polynomial of degree 1 */ struct gf_poly_deg1 { struct gf_poly poly; unsigned int c[2]; }; /* * same as encode_bch(), but process input data one byte at a time */ static void encode_bch_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { int i; const uint32_t *p; const int l = BCH_ECC_WORDS(bch)-1; while (len--) { p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); ecc[l] = (ecc[l] << 8)^(*p); } } /* * convert ecc bytes to aligned, zero-padded 32-bit ecc words */ static void load_ecc8(struct bch_control *bch, uint32_t *dst, const uint8_t *src) { uint8_t pad[4] = {0, 0, 0, 0}; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; } /* * convert 32-bit ecc words to ecc bytes */ static void store_ecc8(struct bch_control *bch, uint8_t *dst, const uint32_t *src) { uint8_t pad[4]; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { *dst++ = (src[i] >> 24); *dst++ = (src[i] >> 16) & 0xff; *dst++ = (src[i] >> 8) & 0xff; *dst++ = (src[i] >> 0) & 0xff; } pad[0] = (src[nwords] >> 24); pad[1] = (src[nwords] >> 16) & 0xff; pad[2] = (src[nwords] >> 8) & 0xff; pad[3] = (src[nwords] >> 0) & 0xff; memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** * encode_bch - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes * @ecc: ecc parity data, must be initialized by caller * * The @ecc parity array is used both as input and output parameter, in order to * allow incremental computations. It should be of the size indicated by member * @ecc_bytes of @bch, and should be initialized to 0 before the first call. * * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ void encode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; unsigned int i, mlen; unsigned long m; uint32_t w, r[l+1]; const uint32_t * const tab0 = bch->mod8_tab; const uint32_t * const tab1 = tab0 + 256*(l+1); const uint32_t * const tab2 = tab1 + 256*(l+1); const uint32_t * const tab3 = tab2 + 256*(l+1); const uint32_t *pdata, *p0, *p1, *p2, *p3; if (ecc) { /* load ecc parity bytes into internal 32-bit buffer */ load_ecc8(bch, bch->ecc_buf, ecc); } else { memset(bch->ecc_buf, 0, sizeof(r)); } /* process first unaligned data bytes */ m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } /* process 32-bit aligned data words */ pdata = (uint32_t *)data; mlen = len/4; data += 4*mlen; len -= 4*mlen; memcpy(r, bch->ecc_buf, sizeof(r)); /* * split each 32-bit word into 4 polynomials of weight 8 as follows: * * 31 ...24 23 ...16 15 ... 8 7 ... 0 * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt * tttttttt mod g = r0 (precomputed) * zzzzzzzz 00000000 mod g = r1 (precomputed) * yyyyyyyy 00000000 00000000 mod g = r2 (precomputed) * xxxxxxxx 00000000 00000000 00000000 mod g = r3 (precomputed) * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt mod g = r0^r1^r2^r3 */ while (mlen--) { /* input data is read in big-endian format */ w = r[0]^cpu_to_be32(*pdata++); p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); p3 = tab3 + (l+1)*((w >> 24) & 0xff); for (i = 0; i < l; i++) r[i] = r[i+1]^p0[i]^p1[i]^p2[i]^p3[i]; r[l] = p0[l]^p1[l]^p2[l]^p3[l]; } memcpy(bch->ecc_buf, r, sizeof(r)); /* process last unaligned bytes */ if (len) encode_bch_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } EXPORT_SYMBOL_GPL(encode_bch); static inline int modulo(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); while (v >= n) { v -= n; v = (v & n) + (v >> GF_M(bch)); } return v; } /* * shorter and faster modulo function, only works when v < 2N. */ static inline int mod_s(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); return (v < n) ? v : v-n; } static inline int deg(unsigned int poly) { /* polynomial degree is the most-significant bit index */ return fls(poly)-1; } static inline int parity(unsigned int x) { /* * public domain code snippet, lifted from * http://www-graphics.stanford.edu/~seander/bithacks.html */ x ^= x >> 1; x ^= x >> 2; x = (x & 0x11111111U) * 0x11111111U; return (x >> 28) & 1; } /* Galois field basic operations: multiply, divide, inverse, etc. */ static inline unsigned int gf_mul(struct bch_control *bch, unsigned int a, unsigned int b) { return (a && b) ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ bch->a_log_tab[b])] : 0; } static inline unsigned int gf_sqr(struct bch_control *bch, unsigned int a) { return a ? bch->a_pow_tab[mod_s(bch, 2*bch->a_log_tab[a])] : 0; } static inline unsigned int gf_div(struct bch_control *bch, unsigned int a, unsigned int b) { return a ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ GF_N(bch)-bch->a_log_tab[b])] : 0; } static inline unsigned int gf_inv(struct bch_control *bch, unsigned int a) { return bch->a_pow_tab[GF_N(bch)-bch->a_log_tab[a]]; } static inline unsigned int a_pow(struct bch_control *bch, int i) { return bch->a_pow_tab[modulo(bch, i)]; } static inline int a_log(struct bch_control *bch, unsigned int x) { return bch->a_log_tab[x]; } static inline int a_ilog(struct bch_control *bch, unsigned int x) { return mod_s(bch, GF_N(bch)-bch->a_log_tab[x]); } /* * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t */ static void compute_syndromes(struct bch_control *bch, uint32_t *ecc, unsigned int *syn) { int i, j, s; unsigned int m; uint32_t poly; const int t = GF_T(bch); s = bch->ecc_bits; /* make sure extra bits in last ecc word are cleared */ m = ((unsigned int)s) & 31; if (m) ecc[s/32] &= ~((1u << (32-m))-1); memset(syn, 0, 2*t*sizeof(*syn)); /* compute v(a^j) for j=1 .. 2t-1 */ do { poly = *ecc++; s -= 32; while (poly) { i = deg(poly); for (j = 0; j < 2*t; j += 2) syn[j] ^= a_pow(bch, (j+1)*(i+s)); poly ^= (1 << i); } } while (s > 0); /* v(a^(2j)) = v(a^j)^2 */ for (j = 0; j < t; j++) syn[2*j+1] = gf_sqr(bch, syn[j]); } static void gf_poly_copy(struct gf_poly *dst, struct gf_poly *src) { memcpy(dst, src, GF_POLY_SZ(src->deg)); } static int compute_error_locator_polynomial(struct bch_control *bch, const unsigned int *syn) { const unsigned int t = GF_T(bch); const unsigned int n = GF_N(bch); unsigned int i, j, tmp, l, pd = 1, d = syn[0]; struct gf_poly *elp = bch->elp; struct gf_poly *pelp = bch->poly_2t[0]; struct gf_poly *elp_copy = bch->poly_2t[1]; int k, pp = -1; memset(pelp, 0, GF_POLY_SZ(2*t)); memset(elp, 0, GF_POLY_SZ(2*t)); pelp->deg = 0; pelp->c[0] = 1; elp->deg = 0; elp->c[0] = 1; /* use simplified binary Berlekamp-Massey algorithm */ for (i = 0; (i < t) && (elp->deg <= t); i++) { if (d) { k = 2*i-pp; gf_poly_copy(elp_copy, elp); /* e[i+1](X) = e[i](X)+di*dp^-1*X^2(i-p)*e[p](X) */ tmp = a_log(bch, d)+n-a_log(bch, pd); for (j = 0; j <= pelp->deg; j++) { if (pelp->c[j]) { l = a_log(bch, pelp->c[j]); elp->c[j+k] ^= a_pow(bch, tmp+l); } } /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */ tmp = pelp->deg+k; if (tmp > elp->deg) { elp->deg = tmp; gf_poly_copy(pelp, elp_copy); pd = d; pp = 2*i; } } /* di+1 = S(2i+3)+elp[i+1].1*S(2i+2)+...+elp[i+1].lS(2i+3-l) */ if (i < t-1) { d = syn[2*i+2]; for (j = 1; j <= elp->deg; j++) d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); } } dbg("elp=%s\n", gf_poly_str(elp)); return (elp->deg > t) ? -1 : (int)elp->deg; } /* * solve a m x m linear system in GF(2) with an expected number of solutions, * and return the number of found solutions */ static int solve_linear_system(struct bch_control *bch, unsigned int *rows, unsigned int *sol, int nsol) { const int m = GF_M(bch); unsigned int tmp, mask; int rem, c, r, p, k, param[m]; k = 0; mask = 1 << m; /* Gaussian elimination */ for (c = 0; c < m; c++) { rem = 0; p = c-k; /* find suitable row for elimination */ for (r = p; r < m; r++) { if (rows[r] & mask) { if (r != p) { tmp = rows[r]; rows[r] = rows[p]; rows[p] = tmp; } rem = r+1; break; } } if (rem) { /* perform elimination on remaining rows */ tmp = rows[p]; for (r = rem; r < m; r++) { if (rows[r] & mask) rows[r] ^= tmp; } } else { /* elimination not needed, store defective row index */ param[k++] = c; } mask >>= 1; } /* rewrite system, inserting fake parameter rows */ if (k > 0) { p = k; for (r = m-1; r >= 0; r--) { if ((r > m-1-k) && rows[r]) /* system has no solution */ return 0; rows[r] = (p && (r == param[p-1])) ? p--, 1u << (m-r) : rows[r-p]; } } if (nsol != (1 << k)) /* unexpected number of solutions */ return 0; for (p = 0; p < nsol; p++) { /* set parameters for p-th solution */ for (c = 0; c < k; c++) rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1); /* compute unique solution */ tmp = 0; for (r = m-1; r >= 0; r--) { mask = rows[r] & (tmp|1); tmp |= parity(mask) << (m-r); } sol[p] = tmp >> 1; } return nsol; } /* * this function builds and solves a linear system for finding roots of a degree * 4 affine monic polynomial X^4+aX^2+bX+c over GF(2^m). */ static int find_affine4_roots(struct bch_control *bch, unsigned int a, unsigned int b, unsigned int c, unsigned int *roots) { int i, j, k; const int m = GF_M(bch); unsigned int mask = 0xff, t, rows[16] = {0,}; j = a_log(bch, b); k = a_log(bch, a); rows[0] = c; /* buid linear system to solve X^4+aX^2+bX+c = 0 */ for (i = 0; i < m; i++) { rows[i+1] = bch->a_pow_tab[4*i]^ (a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^ (b ? bch->a_pow_tab[mod_s(bch, j)] : 0); j++; k += 2; } /* * transpose 16x16 matrix before passing it to linear solver * warning: this code assumes m < 16 */ for (j = 8; j != 0; j >>= 1, mask ^= (mask << j)) { for (k = 0; k < 16; k = (k+j+1) & ~j) { t = ((rows[k] >> j)^rows[k+j]) & mask; rows[k] ^= (t << j); rows[k+j] ^= t; } } return solve_linear_system(bch, rows, roots, 4); } /* * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r)) */ static int find_poly_deg1_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0; if (poly->c[0]) /* poly[X] = bX+c with c!=0, root=c/b */ roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ bch->a_log_tab[poly->c[1]]); return n; } /* * compute roots of a degree 2 polynomial over GF(2^m) */ static int find_poly_deg2_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0, i, l0, l1, l2; unsigned int u, v, r; if (poly->c[0] && poly->c[1]) { l0 = bch->a_log_tab[poly->c[0]]; l1 = bch->a_log_tab[poly->c[1]]; l2 = bch->a_log_tab[poly->c[2]]; /* using z=a/bX, transform aX^2+bX+c into z^2+z+u (u=ac/b^2) */ u = a_pow(bch, l0+l2+2*(GF_N(bch)-l1)); /* * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi): * r^2+r = sum(li.(xi^2+xi)) = sum(li.(a^i+Tr(a^i).a^k)) = * u + sum(li.Tr(a^i).a^k) = u+a^k.Tr(sum(li.a^i)) = u+a^k.Tr(u) * i.e. r and r+1 are roots iff Tr(u)=0 */ r = 0; v = u; while (v) { i = deg(v); r ^= bch->xi_tab[i]; v ^= (1 << i); } /* verify root */ if ((gf_sqr(bch, r)^r) == u) { /* reverse z=a/bX transformation and compute log(1/r) */ roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r]+l2); roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r^1]+l2); } } return n; } /* * compute roots of a degree 3 polynomial over GF(2^m) */ static int find_poly_deg3_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, n = 0; unsigned int a, b, c, a2, b2, c2, e3, tmp[4]; if (poly->c[0]) { /* transform polynomial into monic X^3 + a2X^2 + b2X + c2 */ e3 = poly->c[3]; c2 = gf_div(bch, poly->c[0], e3); b2 = gf_div(bch, poly->c[1], e3); a2 = gf_div(bch, poly->c[2], e3); /* (X+a2)(X^3+a2X^2+b2X+c2) = X^4+aX^2+bX+c (affine) */ c = gf_mul(bch, a2, c2); /* c = a2c2 */ b = gf_mul(bch, a2, b2)^c2; /* b = a2b2 + c2 */ a = gf_sqr(bch, a2)^b2; /* a = a2^2 + b2 */ /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a, b, c, tmp) == 4) { /* remove a2 from final list of roots */ for (i = 0; i < 4; i++) { if (tmp[i] != a2) roots[n++] = a_ilog(bch, tmp[i]); } } } return n; } /* * compute roots of a degree 4 polynomial over GF(2^m) */ static int find_poly_deg4_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, l, n = 0; unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; if (poly->c[0] == 0) return 0; /* transform polynomial into monic X^4 + aX^3 + bX^2 + cX + d */ e4 = poly->c[4]; d = gf_div(bch, poly->c[0], e4); c = gf_div(bch, poly->c[1], e4); b = gf_div(bch, poly->c[2], e4); a = gf_div(bch, poly->c[3], e4); /* use Y=1/X transformation to get an affine polynomial */ if (a) { /* first, eliminate cX by using z=X+e with ae^2+c=0 */ if (c) { /* compute e such that e^2 = c/a */ f = gf_div(bch, c, a); l = a_log(bch, f); l += (l & 1) ? GF_N(bch) : 0; e = a_pow(bch, l/2); /* * use transformation z=X+e: * z^4+e^4 + a(z^3+ez^2+e^2z+e^3) + b(z^2+e^2) +cz+ce+d * z^4 + az^3 + (ae+b)z^2 + (ae^2+c)z+e^4+be^2+ae^3+ce+d * z^4 + az^3 + (ae+b)z^2 + e^4+be^2+d * z^4 + az^3 + b'z^2 + d' */ d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; b = gf_mul(bch, a, e)^b; } /* now, use Y=1/X to get Y^4 + b/dY^2 + a/dY + 1/d */ if (d == 0) /* assume all roots have multiplicity 1 */ return 0; c2 = gf_inv(bch, d); b2 = gf_div(bch, a, d); a2 = gf_div(bch, b, d); } else { /* polynomial is already affine */ c2 = d; b2 = c; a2 = b; } /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a2, b2, c2, roots) == 4) { for (i = 0; i < 4; i++) { /* post-process roots (reverse transformations) */ f = a ? gf_inv(bch, roots[i]) : roots[i]; roots[i] = a_ilog(bch, f^e); } n = 4; } return n; } /* * build monic, log-based representation of a polynomial */ static void gf_poly_logrep(struct bch_control *bch, const struct gf_poly *a, int *rep) { int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); /* represent 0 values with -1; warning, rep[d] is not set to 1 */ for (i = 0; i < d; i++) rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; } /* * compute polynomial Euclidean division remainder in GF(2^m)[X] */ static void gf_poly_mod(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, int *rep) { int la, p, m; unsigned int i, j, *c = a->c; const unsigned int d = b->deg; if (a->deg < d) return; /* reuse or compute log representation of denominator */ if (!rep) { rep = bch->cache; gf_poly_logrep(bch, b, rep); } for (j = a->deg; j >= d; j--) { if (c[j]) { la = a_log(bch, c[j]); p = j-d; for (i = 0; i < d; i++, p++) { m = rep[i]; if (m >= 0) c[p] ^= bch->a_pow_tab[mod_s(bch, m+la)]; } } } a->deg = d-1; while (!c[a->deg] && a->deg) a->deg--; } /* * compute polynomial Euclidean division quotient in GF(2^m)[X] */ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, struct gf_poly *q) { if (a->deg >= b->deg) { q->deg = a->deg-b->deg; /* compute a mod b (modifies a) */ gf_poly_mod(bch, a, b, NULL); /* quotient is stored in upper part of polynomial a */ memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); } else { q->deg = 0; q->c[0] = 0; } } /* * compute polynomial GCD (Greatest Common Divisor) in GF(2^m)[X] */ static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a, struct gf_poly *b) { struct gf_poly *tmp; dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b)); if (a->deg < b->deg) { tmp = b; b = a; a = tmp; } while (b->deg > 0) { gf_poly_mod(bch, a, b, NULL); tmp = b; b = a; a = tmp; } dbg("%s\n", gf_poly_str(a)); return a; } /* * Given a polynomial f and an integer k, compute Tr(a^kX) mod f * This is used in Berlekamp Trace algorithm for splitting polynomials */ static void compute_trace_bk_mod(struct bch_control *bch, int k, const struct gf_poly *f, struct gf_poly *z, struct gf_poly *out) { const int m = GF_M(bch); int i, j; /* z contains z^2j mod f */ z->deg = 1; z->c[0] = 0; z->c[1] = bch->a_pow_tab[k]; out->deg = 0; memset(out, 0, GF_POLY_SZ(f->deg)); /* compute f log representation only once */ gf_poly_logrep(bch, f, bch->cache); for (i = 0; i < m; i++) { /* add a^(k*2^i)(z^(2^i) mod f) and compute (z^(2^i) mod f)^2 */ for (j = z->deg; j >= 0; j--) { out->c[j] ^= z->c[j]; z->c[2*j] = gf_sqr(bch, z->c[j]); z->c[2*j+1] = 0; } if (z->deg > out->deg) out->deg = z->deg; if (i < m-1) { z->deg *= 2; /* z^(2(i+1)) mod f = (z^(2^i) mod f)^2 mod f */ gf_poly_mod(bch, z, f, bch->cache); } } while (!out->c[out->deg] && out->deg) out->deg--; dbg("Tr(a^%d.X) mod f = %s\n", k, gf_poly_str(out)); } /* * factor a polynomial using Berlekamp Trace algorithm (BTA) */ static void factor_polynomial(struct bch_control *bch, int k, struct gf_poly *f, struct gf_poly **g, struct gf_poly **h) { struct gf_poly *f2 = bch->poly_2t[0]; struct gf_poly *q = bch->poly_2t[1]; struct gf_poly *tk = bch->poly_2t[2]; struct gf_poly *z = bch->poly_2t[3]; struct gf_poly *gcd; dbg("factoring %s...\n", gf_poly_str(f)); *g = f; *h = NULL; /* tk = Tr(a^k.X) mod f */ compute_trace_bk_mod(bch, k, f, z, tk); if (tk->deg > 0) { /* compute g = gcd(f, tk) (destructive operation) */ gf_poly_copy(f2, f); gcd = gf_poly_gcd(bch, f2, tk); if (gcd->deg < f->deg) { /* compute h=f/gcd(f,tk); this will modify f and q */ gf_poly_div(bch, f, gcd, q); /* store g and h in-place (clobbering f) */ *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly; gf_poly_copy(*g, gcd); gf_poly_copy(*h, q); } } } /* * find roots of a polynomial, using BTZ algorithm; see the beginning of this * file for details */ static int find_poly_roots(struct bch_control *bch, unsigned int k, struct gf_poly *poly, unsigned int *roots) { int cnt; struct gf_poly *f1, *f2; switch (poly->deg) { /* handle low degree polynomials with ad hoc techniques */ case 1: cnt = find_poly_deg1_roots(bch, poly, roots); break; case 2: cnt = find_poly_deg2_roots(bch, poly, roots); break; case 3: cnt = find_poly_deg3_roots(bch, poly, roots); break; case 4: cnt = find_poly_deg4_roots(bch, poly, roots); break; default: /* factor polynomial using Berlekamp Trace Algorithm (BTA) */ cnt = 0; if (poly->deg && (k <= GF_M(bch))) { factor_polynomial(bch, k, poly, &f1, &f2); if (f1) cnt += find_poly_roots(bch, k+1, f1, roots); if (f2) cnt += find_poly_roots(bch, k+1, f2, roots+cnt); } break; } return cnt; } #if defined(USE_CHIEN_SEARCH) /* * exhaustive root search (Chien) implementation - not used, included only for * reference/comparison tests */ static int chien_search(struct bch_control *bch, unsigned int len, struct gf_poly *p, unsigned int *roots) { int m; unsigned int i, j, syn, syn0, count = 0; const unsigned int k = 8*len+bch->ecc_bits; /* use a log-based representation of polynomial */ gf_poly_logrep(bch, p, bch->cache); bch->cache[p->deg] = 0; syn0 = gf_div(bch, p->c[0], p->c[p->deg]); for (i = GF_N(bch)-k+1; i <= GF_N(bch); i++) { /* compute elp(a^i) */ for (j = 1, syn = syn0; j <= p->deg; j++) { m = bch->cache[j]; if (m >= 0) syn ^= a_pow(bch, m+j*i); } if (syn == 0) { roots[count++] = GF_N(bch)-i; if (count == p->deg) break; } } return (count == p->deg) ? count : 0; } #define find_poly_roots(_p, _k, _elp, _loc) chien_search(_p, len, _elp, _loc) #endif /* USE_CHIEN_SEARCH */ /** * decode_bch - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided * @recv_ecc: received ecc, if NULL then assume it was XORed in @calc_ecc * @calc_ecc: calculated ecc, if NULL then calc_ecc is computed from @data * @syn: hw computed syndrome data (if NULL, syndrome is calculated) * @errloc: output array of error locations * * Returns: * The number of errors found, or -EBADMSG if decoding failed, or -EINVAL if * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc * separately (using encode_bch()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * * Once decode_bch() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for * data correction) * * if (errloc[n] < 8*len), then n-th error is located in data and can be * corrected with statement data[errloc[n]/8] ^= 1 << (errloc[n] % 8); * * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { const unsigned int ecc_words = BCH_ECC_WORDS(bch); unsigned int nbits; int i, err, nroots; uint32_t sum; /* sanity check: make sure data length can be handled */ if (8*len > (bch->n-bch->ecc_bits)) return -EINVAL; /* if caller does not provide syndromes, compute them */ if (!syn) { if (!calc_ecc) { /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; encode_bch(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); } /* load received ecc or assume it was XORed in calc_ecc */ if (recv_ecc) { load_ecc8(bch, bch->ecc_buf2, recv_ecc); /* XOR received and calculated ecc */ for (i = 0, sum = 0; i < (int)ecc_words; i++) { bch->ecc_buf[i] ^= bch->ecc_buf2[i]; sum |= bch->ecc_buf[i]; } if (!sum) /* no error found */ return 0; } compute_syndromes(bch, bch->ecc_buf, bch->syn); syn = bch->syn; } err = compute_error_locator_polynomial(bch, syn); if (err > 0) { nroots = find_poly_roots(bch, 1, bch->elp, errloc); if (err != nroots) err = -1; } if (err > 0) { /* post-process raw error locations for easier correction */ nbits = (len*8)+bch->ecc_bits; for (i = 0; i < err; i++) { if (errloc[i] >= nbits) { err = -1; break; } errloc[i] = nbits-1-errloc[i]; errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } EXPORT_SYMBOL_GPL(decode_bch); /* * generate Galois field lookup tables */ static int build_gf_tables(struct bch_control *bch, unsigned int poly) { unsigned int i, x = 1; const unsigned int k = 1 << deg(poly); /* primitive polynomial must be of degree m */ if (k != (1u << GF_M(bch))) return -1; for (i = 0; i < GF_N(bch); i++) { bch->a_pow_tab[i] = x; bch->a_log_tab[x] = i; if (i && (x == 1)) /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ return -1; x <<= 1; if (x & k) x ^= poly; } bch->a_pow_tab[GF_N(bch)] = 1; bch->a_log_tab[0] = 0; return 0; } /* * compute generator polynomial remainder tables for fast encoding */ static void build_mod8_tables(struct bch_control *bch, const uint32_t *g) { int i, j, b, d; uint32_t data, hi, lo, *tab; const int l = BCH_ECC_WORDS(bch); const int plen = DIV_ROUND_UP(bch->ecc_bits+1, 32); const int ecclen = DIV_ROUND_UP(bch->ecc_bits, 32); memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab)); for (i = 0; i < 256; i++) { /* p(X)=i is a small polynomial of weight <= 8 */ for (b = 0; b < 4; b++) { /* we want to compute (p(X).X^(8*b+deg(g))) mod g(X) */ tab = bch->mod8_tab + (b*256+i)*l; data = i << (8*b); while (data) { d = deg(data); /* subtract X^d.g(X) from p(X).X^(8*b+deg(g)) */ data ^= g[0] >> (31-d); for (j = 0; j < ecclen; j++) { hi = (d < 31) ? g[j] << (d+1) : 0; lo = (j+1 < plen) ? g[j+1] >> (31-d) : 0; tab[j] ^= hi|lo; } } } } } /* * build a base for factoring degree 2 polynomials */ static int build_deg2_base(struct bch_control *bch) { const int m = GF_M(bch); int i, j, r; unsigned int sum, x, y, remaining, ak = 0, xi[m]; /* find k s.t. Tr(a^k) = 1 and 0 <= k < m */ for (i = 0; i < m; i++) { for (j = 0, sum = 0; j < m; j++) sum ^= a_pow(bch, i*(1 << j)); if (sum) { ak = bch->a_pow_tab[i]; break; } } /* find xi, i=0..m-1 such that xi^2+xi = a^i+Tr(a^i).a^k */ remaining = m; memset(xi, 0, sizeof(xi)); for (x = 0; (x <= GF_N(bch)) && remaining; x++) { y = gf_sqr(bch, x)^x; for (i = 0; i < 2; i++) { r = a_log(bch, y); if (y && (r < m) && !xi[r]) { bch->xi_tab[r] = x; xi[r] = 1; remaining--; dbg("x%d = %x\n", r, x); break; } y ^= ak; } } /* should not happen but check anyway */ return remaining ? -1 : 0; } static void *bch_alloc(size_t size, int *err) { void *ptr; ptr = kmalloc(size, GFP_KERNEL); if (ptr == NULL) *err = 1; return ptr; } /* * compute generator polynomial for given (m,t) parameters. */ static uint32_t *compute_generator_polynomial(struct bch_control *bch) { const unsigned int m = GF_M(bch); const unsigned int t = GF_T(bch); int n, err = 0; unsigned int i, j, nbits, r, word, *roots; struct gf_poly *g; uint32_t *genpoly; g = bch_alloc(GF_POLY_SZ(m*t), &err); roots = bch_alloc((bch->n+1)*sizeof(*roots), &err); genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err); if (err) { kfree(genpoly); genpoly = NULL; goto finish; } /* enumerate all roots of g(X) */ memset(roots , 0, (bch->n+1)*sizeof(*roots)); for (i = 0; i < t; i++) { for (j = 0, r = 2*i+1; j < m; j++) { roots[r] = 1; r = mod_s(bch, 2*r); } } /* build generator polynomial g(X) */ g->deg = 0; g->c[0] = 1; for (i = 0; i < GF_N(bch); i++) { if (roots[i]) { /* multiply g(X) by (X+root) */ r = bch->a_pow_tab[i]; g->c[g->deg+1] = 1; for (j = g->deg; j > 0; j--) g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1]; g->c[0] = gf_mul(bch, g->c[0], r); g->deg++; } } /* store left-justified binary representation of g(X) */ n = g->deg+1; i = 0; while (n > 0) { nbits = (n > 32) ? 32 : n; for (j = 0, word = 0; j < nbits; j++) { if (g->c[n-1-j]) word |= 1u << (31-j); } genpoly[i++] = word; n -= nbits; } bch->ecc_bits = g->deg; finish: kfree(g); kfree(roots); return genpoly; } /** * init_bch - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical * path. Usually, init_bch() should be called on module/driver init and * free_bch() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument * @prim_poly, or let init_bch() use its default polynomial. * * Once init_bch() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) { int err = 0; unsigned int i, words; uint32_t *genpoly; struct bch_control *bch = NULL; const int min_m = 5; const int max_m = 15; /* default primitive polynomials */ static const unsigned int prim_poly_tab[] = { 0x25, 0x43, 0x83, 0x11d, 0x211, 0x409, 0x805, 0x1053, 0x201b, 0x402b, 0x8003, }; #if defined(CONFIG_BCH_CONST_PARAMS) if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) { printk(KERN_ERR "bch encoder/decoder was configured to support " "parameters m=%d, t=%d only!\n", CONFIG_BCH_CONST_M, CONFIG_BCH_CONST_T); goto fail; } #endif if ((m < min_m) || (m > max_m)) /* * values of m greater than 15 are not currently supported; * supporting m > 15 would require changing table base type * (uint16_t) and a small patch in matrix transposition */ goto fail; /* sanity checks */ if ((t < 1) || (m*t >= ((1 << m)-1))) /* invalid t value */ goto fail; /* select a primitive polynomial for generating GF(2^m) */ if (prim_poly == 0) prim_poly = prim_poly_tab[m-min_m]; bch = kzalloc(sizeof(*bch), GFP_KERNEL); if (bch == NULL) goto fail; bch->m = m; bch->t = t; bch->n = (1 << m)-1; words = DIV_ROUND_UP(m*t, 32); bch->ecc_bytes = DIV_ROUND_UP(m*t, 8); bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err); bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err); bch->mod8_tab = bch_alloc(words*1024*sizeof(*bch->mod8_tab), &err); bch->ecc_buf = bch_alloc(words*sizeof(*bch->ecc_buf), &err); bch->ecc_buf2 = bch_alloc(words*sizeof(*bch->ecc_buf2), &err); bch->xi_tab = bch_alloc(m*sizeof(*bch->xi_tab), &err); bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); if (err) goto fail; err = build_gf_tables(bch, prim_poly); if (err) goto fail; /* use generator polynomial for computing encoding tables */ genpoly = compute_generator_polynomial(bch); if (genpoly == NULL) goto fail; build_mod8_tables(bch, genpoly); kfree(genpoly); err = build_deg2_base(bch); if (err) goto fail; return bch; fail: free_bch(bch); return NULL; } EXPORT_SYMBOL_GPL(init_bch); /** * free_bch - free the BCH control structure * @bch: BCH control structure to release */ void free_bch(struct bch_control *bch) { unsigned int i; if (bch) { kfree(bch->a_pow_tab); kfree(bch->a_log_tab); kfree(bch->mod8_tab); kfree(bch->ecc_buf); kfree(bch->ecc_buf2); kfree(bch->xi_tab); kfree(bch->syn); kfree(bch->cache); kfree(bch->elp); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) kfree(bch->poly_2t[i]); kfree(bch); } } EXPORT_SYMBOL_GPL(free_bch); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); MODULE_DESCRIPTION("Binary BCH encoder/decoder");
gpl-2.0
identisoft-rashid/ec3_kernel_pre_4.1
lib/bch.c
12877
36404
/* * Generic binary BCH encoding/decoding library * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic <ivan.djelic@parrot.com> * * Description: * * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * * Call init_bch to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * * Call encode_bch to compute and store ecc parity bytes to a given buffer. * Call decode_bch to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided * to decode_bch in order to skip certain steps. See decode_bch() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of * parameters m and t; thus allowing extra compiler optimizations and providing * better (up to 2x) encoding performance. Using this option makes sense when * (m,t) are fixed and known in advance, e.g. when using BCH error correction * on a particular NAND flash device. * * Algorithmic details: * * Encoding is performed by processing 32 input bits in parallel, using 4 * remainder lookup tables. * * The final stage of decoding involves the following internal steps: * a. Syndrome computation * b. Error locator polynomial computation using Berlekamp-Massey algorithm * c. Error locator root finding (by far the most expensive step) * * In this implementation, step c is not performed using the usual Chien search. * Instead, an alternative approach described in [1] is used. It consists in * factoring the error locator polynomial using the Berlekamp Trace algorithm * (BTA) down to a certain degree (4), after which ad hoc low-degree polynomial * solving techniques [2] are used. The resulting algorithm, called BTZ, yields * much better performance than Chien search for usual (m,t) values (typically * m >= 13, t < 32, see [1]). * * [1] B. Biswas, V. Herbert. Efficient root finding of polynomials over fields * of characteristic 2, in: Western European Workshop on Research in Cryptology * - WEWoRC 2009, Graz, Austria, LNCS, Springer, July 2009, to appear. * [2] [Zin96] V.A. Zinoviev. On the solution of equations of degree 10 over * finite fields GF(2^q). In Rapport de recherche INRIA no 2829, 1996. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include <linux/bch.h> #if defined(CONFIG_BCH_CONST_PARAMS) #define GF_M(_p) (CONFIG_BCH_CONST_M) #define GF_T(_p) (CONFIG_BCH_CONST_T) #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) #else #define GF_M(_p) ((_p)->m) #define GF_T(_p) ((_p)->t) #define GF_N(_p) ((_p)->n) #endif #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) #ifndef dbg #define dbg(_fmt, args...) do {} while (0) #endif /* * represent a polynomial over GF(2^m) */ struct gf_poly { unsigned int deg; /* polynomial degree */ unsigned int c[0]; /* polynomial terms */ }; /* given its degree, compute a polynomial size in bytes */ #define GF_POLY_SZ(_d) (sizeof(struct gf_poly)+((_d)+1)*sizeof(unsigned int)) /* polynomial of degree 1 */ struct gf_poly_deg1 { struct gf_poly poly; unsigned int c[2]; }; /* * same as encode_bch(), but process input data one byte at a time */ static void encode_bch_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { int i; const uint32_t *p; const int l = BCH_ECC_WORDS(bch)-1; while (len--) { p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); ecc[l] = (ecc[l] << 8)^(*p); } } /* * convert ecc bytes to aligned, zero-padded 32-bit ecc words */ static void load_ecc8(struct bch_control *bch, uint32_t *dst, const uint8_t *src) { uint8_t pad[4] = {0, 0, 0, 0}; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; } /* * convert 32-bit ecc words to ecc bytes */ static void store_ecc8(struct bch_control *bch, uint8_t *dst, const uint32_t *src) { uint8_t pad[4]; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { *dst++ = (src[i] >> 24); *dst++ = (src[i] >> 16) & 0xff; *dst++ = (src[i] >> 8) & 0xff; *dst++ = (src[i] >> 0) & 0xff; } pad[0] = (src[nwords] >> 24); pad[1] = (src[nwords] >> 16) & 0xff; pad[2] = (src[nwords] >> 8) & 0xff; pad[3] = (src[nwords] >> 0) & 0xff; memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** * encode_bch - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes * @ecc: ecc parity data, must be initialized by caller * * The @ecc parity array is used both as input and output parameter, in order to * allow incremental computations. It should be of the size indicated by member * @ecc_bytes of @bch, and should be initialized to 0 before the first call. * * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ void encode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; unsigned int i, mlen; unsigned long m; uint32_t w, r[l+1]; const uint32_t * const tab0 = bch->mod8_tab; const uint32_t * const tab1 = tab0 + 256*(l+1); const uint32_t * const tab2 = tab1 + 256*(l+1); const uint32_t * const tab3 = tab2 + 256*(l+1); const uint32_t *pdata, *p0, *p1, *p2, *p3; if (ecc) { /* load ecc parity bytes into internal 32-bit buffer */ load_ecc8(bch, bch->ecc_buf, ecc); } else { memset(bch->ecc_buf, 0, sizeof(r)); } /* process first unaligned data bytes */ m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } /* process 32-bit aligned data words */ pdata = (uint32_t *)data; mlen = len/4; data += 4*mlen; len -= 4*mlen; memcpy(r, bch->ecc_buf, sizeof(r)); /* * split each 32-bit word into 4 polynomials of weight 8 as follows: * * 31 ...24 23 ...16 15 ... 8 7 ... 0 * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt * tttttttt mod g = r0 (precomputed) * zzzzzzzz 00000000 mod g = r1 (precomputed) * yyyyyyyy 00000000 00000000 mod g = r2 (precomputed) * xxxxxxxx 00000000 00000000 00000000 mod g = r3 (precomputed) * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt mod g = r0^r1^r2^r3 */ while (mlen--) { /* input data is read in big-endian format */ w = r[0]^cpu_to_be32(*pdata++); p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); p3 = tab3 + (l+1)*((w >> 24) & 0xff); for (i = 0; i < l; i++) r[i] = r[i+1]^p0[i]^p1[i]^p2[i]^p3[i]; r[l] = p0[l]^p1[l]^p2[l]^p3[l]; } memcpy(bch->ecc_buf, r, sizeof(r)); /* process last unaligned bytes */ if (len) encode_bch_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } EXPORT_SYMBOL_GPL(encode_bch); static inline int modulo(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); while (v >= n) { v -= n; v = (v & n) + (v >> GF_M(bch)); } return v; } /* * shorter and faster modulo function, only works when v < 2N. */ static inline int mod_s(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); return (v < n) ? v : v-n; } static inline int deg(unsigned int poly) { /* polynomial degree is the most-significant bit index */ return fls(poly)-1; } static inline int parity(unsigned int x) { /* * public domain code snippet, lifted from * http://www-graphics.stanford.edu/~seander/bithacks.html */ x ^= x >> 1; x ^= x >> 2; x = (x & 0x11111111U) * 0x11111111U; return (x >> 28) & 1; } /* Galois field basic operations: multiply, divide, inverse, etc. */ static inline unsigned int gf_mul(struct bch_control *bch, unsigned int a, unsigned int b) { return (a && b) ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ bch->a_log_tab[b])] : 0; } static inline unsigned int gf_sqr(struct bch_control *bch, unsigned int a) { return a ? bch->a_pow_tab[mod_s(bch, 2*bch->a_log_tab[a])] : 0; } static inline unsigned int gf_div(struct bch_control *bch, unsigned int a, unsigned int b) { return a ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ GF_N(bch)-bch->a_log_tab[b])] : 0; } static inline unsigned int gf_inv(struct bch_control *bch, unsigned int a) { return bch->a_pow_tab[GF_N(bch)-bch->a_log_tab[a]]; } static inline unsigned int a_pow(struct bch_control *bch, int i) { return bch->a_pow_tab[modulo(bch, i)]; } static inline int a_log(struct bch_control *bch, unsigned int x) { return bch->a_log_tab[x]; } static inline int a_ilog(struct bch_control *bch, unsigned int x) { return mod_s(bch, GF_N(bch)-bch->a_log_tab[x]); } /* * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t */ static void compute_syndromes(struct bch_control *bch, uint32_t *ecc, unsigned int *syn) { int i, j, s; unsigned int m; uint32_t poly; const int t = GF_T(bch); s = bch->ecc_bits; /* make sure extra bits in last ecc word are cleared */ m = ((unsigned int)s) & 31; if (m) ecc[s/32] &= ~((1u << (32-m))-1); memset(syn, 0, 2*t*sizeof(*syn)); /* compute v(a^j) for j=1 .. 2t-1 */ do { poly = *ecc++; s -= 32; while (poly) { i = deg(poly); for (j = 0; j < 2*t; j += 2) syn[j] ^= a_pow(bch, (j+1)*(i+s)); poly ^= (1 << i); } } while (s > 0); /* v(a^(2j)) = v(a^j)^2 */ for (j = 0; j < t; j++) syn[2*j+1] = gf_sqr(bch, syn[j]); } static void gf_poly_copy(struct gf_poly *dst, struct gf_poly *src) { memcpy(dst, src, GF_POLY_SZ(src->deg)); } static int compute_error_locator_polynomial(struct bch_control *bch, const unsigned int *syn) { const unsigned int t = GF_T(bch); const unsigned int n = GF_N(bch); unsigned int i, j, tmp, l, pd = 1, d = syn[0]; struct gf_poly *elp = bch->elp; struct gf_poly *pelp = bch->poly_2t[0]; struct gf_poly *elp_copy = bch->poly_2t[1]; int k, pp = -1; memset(pelp, 0, GF_POLY_SZ(2*t)); memset(elp, 0, GF_POLY_SZ(2*t)); pelp->deg = 0; pelp->c[0] = 1; elp->deg = 0; elp->c[0] = 1; /* use simplified binary Berlekamp-Massey algorithm */ for (i = 0; (i < t) && (elp->deg <= t); i++) { if (d) { k = 2*i-pp; gf_poly_copy(elp_copy, elp); /* e[i+1](X) = e[i](X)+di*dp^-1*X^2(i-p)*e[p](X) */ tmp = a_log(bch, d)+n-a_log(bch, pd); for (j = 0; j <= pelp->deg; j++) { if (pelp->c[j]) { l = a_log(bch, pelp->c[j]); elp->c[j+k] ^= a_pow(bch, tmp+l); } } /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */ tmp = pelp->deg+k; if (tmp > elp->deg) { elp->deg = tmp; gf_poly_copy(pelp, elp_copy); pd = d; pp = 2*i; } } /* di+1 = S(2i+3)+elp[i+1].1*S(2i+2)+...+elp[i+1].lS(2i+3-l) */ if (i < t-1) { d = syn[2*i+2]; for (j = 1; j <= elp->deg; j++) d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); } } dbg("elp=%s\n", gf_poly_str(elp)); return (elp->deg > t) ? -1 : (int)elp->deg; } /* * solve a m x m linear system in GF(2) with an expected number of solutions, * and return the number of found solutions */ static int solve_linear_system(struct bch_control *bch, unsigned int *rows, unsigned int *sol, int nsol) { const int m = GF_M(bch); unsigned int tmp, mask; int rem, c, r, p, k, param[m]; k = 0; mask = 1 << m; /* Gaussian elimination */ for (c = 0; c < m; c++) { rem = 0; p = c-k; /* find suitable row for elimination */ for (r = p; r < m; r++) { if (rows[r] & mask) { if (r != p) { tmp = rows[r]; rows[r] = rows[p]; rows[p] = tmp; } rem = r+1; break; } } if (rem) { /* perform elimination on remaining rows */ tmp = rows[p]; for (r = rem; r < m; r++) { if (rows[r] & mask) rows[r] ^= tmp; } } else { /* elimination not needed, store defective row index */ param[k++] = c; } mask >>= 1; } /* rewrite system, inserting fake parameter rows */ if (k > 0) { p = k; for (r = m-1; r >= 0; r--) { if ((r > m-1-k) && rows[r]) /* system has no solution */ return 0; rows[r] = (p && (r == param[p-1])) ? p--, 1u << (m-r) : rows[r-p]; } } if (nsol != (1 << k)) /* unexpected number of solutions */ return 0; for (p = 0; p < nsol; p++) { /* set parameters for p-th solution */ for (c = 0; c < k; c++) rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1); /* compute unique solution */ tmp = 0; for (r = m-1; r >= 0; r--) { mask = rows[r] & (tmp|1); tmp |= parity(mask) << (m-r); } sol[p] = tmp >> 1; } return nsol; } /* * this function builds and solves a linear system for finding roots of a degree * 4 affine monic polynomial X^4+aX^2+bX+c over GF(2^m). */ static int find_affine4_roots(struct bch_control *bch, unsigned int a, unsigned int b, unsigned int c, unsigned int *roots) { int i, j, k; const int m = GF_M(bch); unsigned int mask = 0xff, t, rows[16] = {0,}; j = a_log(bch, b); k = a_log(bch, a); rows[0] = c; /* buid linear system to solve X^4+aX^2+bX+c = 0 */ for (i = 0; i < m; i++) { rows[i+1] = bch->a_pow_tab[4*i]^ (a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^ (b ? bch->a_pow_tab[mod_s(bch, j)] : 0); j++; k += 2; } /* * transpose 16x16 matrix before passing it to linear solver * warning: this code assumes m < 16 */ for (j = 8; j != 0; j >>= 1, mask ^= (mask << j)) { for (k = 0; k < 16; k = (k+j+1) & ~j) { t = ((rows[k] >> j)^rows[k+j]) & mask; rows[k] ^= (t << j); rows[k+j] ^= t; } } return solve_linear_system(bch, rows, roots, 4); } /* * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r)) */ static int find_poly_deg1_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0; if (poly->c[0]) /* poly[X] = bX+c with c!=0, root=c/b */ roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ bch->a_log_tab[poly->c[1]]); return n; } /* * compute roots of a degree 2 polynomial over GF(2^m) */ static int find_poly_deg2_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0, i, l0, l1, l2; unsigned int u, v, r; if (poly->c[0] && poly->c[1]) { l0 = bch->a_log_tab[poly->c[0]]; l1 = bch->a_log_tab[poly->c[1]]; l2 = bch->a_log_tab[poly->c[2]]; /* using z=a/bX, transform aX^2+bX+c into z^2+z+u (u=ac/b^2) */ u = a_pow(bch, l0+l2+2*(GF_N(bch)-l1)); /* * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi): * r^2+r = sum(li.(xi^2+xi)) = sum(li.(a^i+Tr(a^i).a^k)) = * u + sum(li.Tr(a^i).a^k) = u+a^k.Tr(sum(li.a^i)) = u+a^k.Tr(u) * i.e. r and r+1 are roots iff Tr(u)=0 */ r = 0; v = u; while (v) { i = deg(v); r ^= bch->xi_tab[i]; v ^= (1 << i); } /* verify root */ if ((gf_sqr(bch, r)^r) == u) { /* reverse z=a/bX transformation and compute log(1/r) */ roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r]+l2); roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r^1]+l2); } } return n; } /* * compute roots of a degree 3 polynomial over GF(2^m) */ static int find_poly_deg3_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, n = 0; unsigned int a, b, c, a2, b2, c2, e3, tmp[4]; if (poly->c[0]) { /* transform polynomial into monic X^3 + a2X^2 + b2X + c2 */ e3 = poly->c[3]; c2 = gf_div(bch, poly->c[0], e3); b2 = gf_div(bch, poly->c[1], e3); a2 = gf_div(bch, poly->c[2], e3); /* (X+a2)(X^3+a2X^2+b2X+c2) = X^4+aX^2+bX+c (affine) */ c = gf_mul(bch, a2, c2); /* c = a2c2 */ b = gf_mul(bch, a2, b2)^c2; /* b = a2b2 + c2 */ a = gf_sqr(bch, a2)^b2; /* a = a2^2 + b2 */ /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a, b, c, tmp) == 4) { /* remove a2 from final list of roots */ for (i = 0; i < 4; i++) { if (tmp[i] != a2) roots[n++] = a_ilog(bch, tmp[i]); } } } return n; } /* * compute roots of a degree 4 polynomial over GF(2^m) */ static int find_poly_deg4_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, l, n = 0; unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; if (poly->c[0] == 0) return 0; /* transform polynomial into monic X^4 + aX^3 + bX^2 + cX + d */ e4 = poly->c[4]; d = gf_div(bch, poly->c[0], e4); c = gf_div(bch, poly->c[1], e4); b = gf_div(bch, poly->c[2], e4); a = gf_div(bch, poly->c[3], e4); /* use Y=1/X transformation to get an affine polynomial */ if (a) { /* first, eliminate cX by using z=X+e with ae^2+c=0 */ if (c) { /* compute e such that e^2 = c/a */ f = gf_div(bch, c, a); l = a_log(bch, f); l += (l & 1) ? GF_N(bch) : 0; e = a_pow(bch, l/2); /* * use transformation z=X+e: * z^4+e^4 + a(z^3+ez^2+e^2z+e^3) + b(z^2+e^2) +cz+ce+d * z^4 + az^3 + (ae+b)z^2 + (ae^2+c)z+e^4+be^2+ae^3+ce+d * z^4 + az^3 + (ae+b)z^2 + e^4+be^2+d * z^4 + az^3 + b'z^2 + d' */ d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; b = gf_mul(bch, a, e)^b; } /* now, use Y=1/X to get Y^4 + b/dY^2 + a/dY + 1/d */ if (d == 0) /* assume all roots have multiplicity 1 */ return 0; c2 = gf_inv(bch, d); b2 = gf_div(bch, a, d); a2 = gf_div(bch, b, d); } else { /* polynomial is already affine */ c2 = d; b2 = c; a2 = b; } /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a2, b2, c2, roots) == 4) { for (i = 0; i < 4; i++) { /* post-process roots (reverse transformations) */ f = a ? gf_inv(bch, roots[i]) : roots[i]; roots[i] = a_ilog(bch, f^e); } n = 4; } return n; } /* * build monic, log-based representation of a polynomial */ static void gf_poly_logrep(struct bch_control *bch, const struct gf_poly *a, int *rep) { int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); /* represent 0 values with -1; warning, rep[d] is not set to 1 */ for (i = 0; i < d; i++) rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; } /* * compute polynomial Euclidean division remainder in GF(2^m)[X] */ static void gf_poly_mod(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, int *rep) { int la, p, m; unsigned int i, j, *c = a->c; const unsigned int d = b->deg; if (a->deg < d) return; /* reuse or compute log representation of denominator */ if (!rep) { rep = bch->cache; gf_poly_logrep(bch, b, rep); } for (j = a->deg; j >= d; j--) { if (c[j]) { la = a_log(bch, c[j]); p = j-d; for (i = 0; i < d; i++, p++) { m = rep[i]; if (m >= 0) c[p] ^= bch->a_pow_tab[mod_s(bch, m+la)]; } } } a->deg = d-1; while (!c[a->deg] && a->deg) a->deg--; } /* * compute polynomial Euclidean division quotient in GF(2^m)[X] */ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, struct gf_poly *q) { if (a->deg >= b->deg) { q->deg = a->deg-b->deg; /* compute a mod b (modifies a) */ gf_poly_mod(bch, a, b, NULL); /* quotient is stored in upper part of polynomial a */ memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); } else { q->deg = 0; q->c[0] = 0; } } /* * compute polynomial GCD (Greatest Common Divisor) in GF(2^m)[X] */ static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a, struct gf_poly *b) { struct gf_poly *tmp; dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b)); if (a->deg < b->deg) { tmp = b; b = a; a = tmp; } while (b->deg > 0) { gf_poly_mod(bch, a, b, NULL); tmp = b; b = a; a = tmp; } dbg("%s\n", gf_poly_str(a)); return a; } /* * Given a polynomial f and an integer k, compute Tr(a^kX) mod f * This is used in Berlekamp Trace algorithm for splitting polynomials */ static void compute_trace_bk_mod(struct bch_control *bch, int k, const struct gf_poly *f, struct gf_poly *z, struct gf_poly *out) { const int m = GF_M(bch); int i, j; /* z contains z^2j mod f */ z->deg = 1; z->c[0] = 0; z->c[1] = bch->a_pow_tab[k]; out->deg = 0; memset(out, 0, GF_POLY_SZ(f->deg)); /* compute f log representation only once */ gf_poly_logrep(bch, f, bch->cache); for (i = 0; i < m; i++) { /* add a^(k*2^i)(z^(2^i) mod f) and compute (z^(2^i) mod f)^2 */ for (j = z->deg; j >= 0; j--) { out->c[j] ^= z->c[j]; z->c[2*j] = gf_sqr(bch, z->c[j]); z->c[2*j+1] = 0; } if (z->deg > out->deg) out->deg = z->deg; if (i < m-1) { z->deg *= 2; /* z^(2(i+1)) mod f = (z^(2^i) mod f)^2 mod f */ gf_poly_mod(bch, z, f, bch->cache); } } while (!out->c[out->deg] && out->deg) out->deg--; dbg("Tr(a^%d.X) mod f = %s\n", k, gf_poly_str(out)); } /* * factor a polynomial using Berlekamp Trace algorithm (BTA) */ static void factor_polynomial(struct bch_control *bch, int k, struct gf_poly *f, struct gf_poly **g, struct gf_poly **h) { struct gf_poly *f2 = bch->poly_2t[0]; struct gf_poly *q = bch->poly_2t[1]; struct gf_poly *tk = bch->poly_2t[2]; struct gf_poly *z = bch->poly_2t[3]; struct gf_poly *gcd; dbg("factoring %s...\n", gf_poly_str(f)); *g = f; *h = NULL; /* tk = Tr(a^k.X) mod f */ compute_trace_bk_mod(bch, k, f, z, tk); if (tk->deg > 0) { /* compute g = gcd(f, tk) (destructive operation) */ gf_poly_copy(f2, f); gcd = gf_poly_gcd(bch, f2, tk); if (gcd->deg < f->deg) { /* compute h=f/gcd(f,tk); this will modify f and q */ gf_poly_div(bch, f, gcd, q); /* store g and h in-place (clobbering f) */ *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly; gf_poly_copy(*g, gcd); gf_poly_copy(*h, q); } } } /* * find roots of a polynomial, using BTZ algorithm; see the beginning of this * file for details */ static int find_poly_roots(struct bch_control *bch, unsigned int k, struct gf_poly *poly, unsigned int *roots) { int cnt; struct gf_poly *f1, *f2; switch (poly->deg) { /* handle low degree polynomials with ad hoc techniques */ case 1: cnt = find_poly_deg1_roots(bch, poly, roots); break; case 2: cnt = find_poly_deg2_roots(bch, poly, roots); break; case 3: cnt = find_poly_deg3_roots(bch, poly, roots); break; case 4: cnt = find_poly_deg4_roots(bch, poly, roots); break; default: /* factor polynomial using Berlekamp Trace Algorithm (BTA) */ cnt = 0; if (poly->deg && (k <= GF_M(bch))) { factor_polynomial(bch, k, poly, &f1, &f2); if (f1) cnt += find_poly_roots(bch, k+1, f1, roots); if (f2) cnt += find_poly_roots(bch, k+1, f2, roots+cnt); } break; } return cnt; } #if defined(USE_CHIEN_SEARCH) /* * exhaustive root search (Chien) implementation - not used, included only for * reference/comparison tests */ static int chien_search(struct bch_control *bch, unsigned int len, struct gf_poly *p, unsigned int *roots) { int m; unsigned int i, j, syn, syn0, count = 0; const unsigned int k = 8*len+bch->ecc_bits; /* use a log-based representation of polynomial */ gf_poly_logrep(bch, p, bch->cache); bch->cache[p->deg] = 0; syn0 = gf_div(bch, p->c[0], p->c[p->deg]); for (i = GF_N(bch)-k+1; i <= GF_N(bch); i++) { /* compute elp(a^i) */ for (j = 1, syn = syn0; j <= p->deg; j++) { m = bch->cache[j]; if (m >= 0) syn ^= a_pow(bch, m+j*i); } if (syn == 0) { roots[count++] = GF_N(bch)-i; if (count == p->deg) break; } } return (count == p->deg) ? count : 0; } #define find_poly_roots(_p, _k, _elp, _loc) chien_search(_p, len, _elp, _loc) #endif /* USE_CHIEN_SEARCH */ /** * decode_bch - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided * @recv_ecc: received ecc, if NULL then assume it was XORed in @calc_ecc * @calc_ecc: calculated ecc, if NULL then calc_ecc is computed from @data * @syn: hw computed syndrome data (if NULL, syndrome is calculated) * @errloc: output array of error locations * * Returns: * The number of errors found, or -EBADMSG if decoding failed, or -EINVAL if * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc * separately (using encode_bch()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * * Once decode_bch() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for * data correction) * * if (errloc[n] < 8*len), then n-th error is located in data and can be * corrected with statement data[errloc[n]/8] ^= 1 << (errloc[n] % 8); * * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { const unsigned int ecc_words = BCH_ECC_WORDS(bch); unsigned int nbits; int i, err, nroots; uint32_t sum; /* sanity check: make sure data length can be handled */ if (8*len > (bch->n-bch->ecc_bits)) return -EINVAL; /* if caller does not provide syndromes, compute them */ if (!syn) { if (!calc_ecc) { /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; encode_bch(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); } /* load received ecc or assume it was XORed in calc_ecc */ if (recv_ecc) { load_ecc8(bch, bch->ecc_buf2, recv_ecc); /* XOR received and calculated ecc */ for (i = 0, sum = 0; i < (int)ecc_words; i++) { bch->ecc_buf[i] ^= bch->ecc_buf2[i]; sum |= bch->ecc_buf[i]; } if (!sum) /* no error found */ return 0; } compute_syndromes(bch, bch->ecc_buf, bch->syn); syn = bch->syn; } err = compute_error_locator_polynomial(bch, syn); if (err > 0) { nroots = find_poly_roots(bch, 1, bch->elp, errloc); if (err != nroots) err = -1; } if (err > 0) { /* post-process raw error locations for easier correction */ nbits = (len*8)+bch->ecc_bits; for (i = 0; i < err; i++) { if (errloc[i] >= nbits) { err = -1; break; } errloc[i] = nbits-1-errloc[i]; errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } EXPORT_SYMBOL_GPL(decode_bch); /* * generate Galois field lookup tables */ static int build_gf_tables(struct bch_control *bch, unsigned int poly) { unsigned int i, x = 1; const unsigned int k = 1 << deg(poly); /* primitive polynomial must be of degree m */ if (k != (1u << GF_M(bch))) return -1; for (i = 0; i < GF_N(bch); i++) { bch->a_pow_tab[i] = x; bch->a_log_tab[x] = i; if (i && (x == 1)) /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ return -1; x <<= 1; if (x & k) x ^= poly; } bch->a_pow_tab[GF_N(bch)] = 1; bch->a_log_tab[0] = 0; return 0; } /* * compute generator polynomial remainder tables for fast encoding */ static void build_mod8_tables(struct bch_control *bch, const uint32_t *g) { int i, j, b, d; uint32_t data, hi, lo, *tab; const int l = BCH_ECC_WORDS(bch); const int plen = DIV_ROUND_UP(bch->ecc_bits+1, 32); const int ecclen = DIV_ROUND_UP(bch->ecc_bits, 32); memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab)); for (i = 0; i < 256; i++) { /* p(X)=i is a small polynomial of weight <= 8 */ for (b = 0; b < 4; b++) { /* we want to compute (p(X).X^(8*b+deg(g))) mod g(X) */ tab = bch->mod8_tab + (b*256+i)*l; data = i << (8*b); while (data) { d = deg(data); /* subtract X^d.g(X) from p(X).X^(8*b+deg(g)) */ data ^= g[0] >> (31-d); for (j = 0; j < ecclen; j++) { hi = (d < 31) ? g[j] << (d+1) : 0; lo = (j+1 < plen) ? g[j+1] >> (31-d) : 0; tab[j] ^= hi|lo; } } } } } /* * build a base for factoring degree 2 polynomials */ static int build_deg2_base(struct bch_control *bch) { const int m = GF_M(bch); int i, j, r; unsigned int sum, x, y, remaining, ak = 0, xi[m]; /* find k s.t. Tr(a^k) = 1 and 0 <= k < m */ for (i = 0; i < m; i++) { for (j = 0, sum = 0; j < m; j++) sum ^= a_pow(bch, i*(1 << j)); if (sum) { ak = bch->a_pow_tab[i]; break; } } /* find xi, i=0..m-1 such that xi^2+xi = a^i+Tr(a^i).a^k */ remaining = m; memset(xi, 0, sizeof(xi)); for (x = 0; (x <= GF_N(bch)) && remaining; x++) { y = gf_sqr(bch, x)^x; for (i = 0; i < 2; i++) { r = a_log(bch, y); if (y && (r < m) && !xi[r]) { bch->xi_tab[r] = x; xi[r] = 1; remaining--; dbg("x%d = %x\n", r, x); break; } y ^= ak; } } /* should not happen but check anyway */ return remaining ? -1 : 0; } static void *bch_alloc(size_t size, int *err) { void *ptr; ptr = kmalloc(size, GFP_KERNEL); if (ptr == NULL) *err = 1; return ptr; } /* * compute generator polynomial for given (m,t) parameters. */ static uint32_t *compute_generator_polynomial(struct bch_control *bch) { const unsigned int m = GF_M(bch); const unsigned int t = GF_T(bch); int n, err = 0; unsigned int i, j, nbits, r, word, *roots; struct gf_poly *g; uint32_t *genpoly; g = bch_alloc(GF_POLY_SZ(m*t), &err); roots = bch_alloc((bch->n+1)*sizeof(*roots), &err); genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err); if (err) { kfree(genpoly); genpoly = NULL; goto finish; } /* enumerate all roots of g(X) */ memset(roots , 0, (bch->n+1)*sizeof(*roots)); for (i = 0; i < t; i++) { for (j = 0, r = 2*i+1; j < m; j++) { roots[r] = 1; r = mod_s(bch, 2*r); } } /* build generator polynomial g(X) */ g->deg = 0; g->c[0] = 1; for (i = 0; i < GF_N(bch); i++) { if (roots[i]) { /* multiply g(X) by (X+root) */ r = bch->a_pow_tab[i]; g->c[g->deg+1] = 1; for (j = g->deg; j > 0; j--) g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1]; g->c[0] = gf_mul(bch, g->c[0], r); g->deg++; } } /* store left-justified binary representation of g(X) */ n = g->deg+1; i = 0; while (n > 0) { nbits = (n > 32) ? 32 : n; for (j = 0, word = 0; j < nbits; j++) { if (g->c[n-1-j]) word |= 1u << (31-j); } genpoly[i++] = word; n -= nbits; } bch->ecc_bits = g->deg; finish: kfree(g); kfree(roots); return genpoly; } /** * init_bch - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical * path. Usually, init_bch() should be called on module/driver init and * free_bch() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument * @prim_poly, or let init_bch() use its default polynomial. * * Once init_bch() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) { int err = 0; unsigned int i, words; uint32_t *genpoly; struct bch_control *bch = NULL; const int min_m = 5; const int max_m = 15; /* default primitive polynomials */ static const unsigned int prim_poly_tab[] = { 0x25, 0x43, 0x83, 0x11d, 0x211, 0x409, 0x805, 0x1053, 0x201b, 0x402b, 0x8003, }; #if defined(CONFIG_BCH_CONST_PARAMS) if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) { printk(KERN_ERR "bch encoder/decoder was configured to support " "parameters m=%d, t=%d only!\n", CONFIG_BCH_CONST_M, CONFIG_BCH_CONST_T); goto fail; } #endif if ((m < min_m) || (m > max_m)) /* * values of m greater than 15 are not currently supported; * supporting m > 15 would require changing table base type * (uint16_t) and a small patch in matrix transposition */ goto fail; /* sanity checks */ if ((t < 1) || (m*t >= ((1 << m)-1))) /* invalid t value */ goto fail; /* select a primitive polynomial for generating GF(2^m) */ if (prim_poly == 0) prim_poly = prim_poly_tab[m-min_m]; bch = kzalloc(sizeof(*bch), GFP_KERNEL); if (bch == NULL) goto fail; bch->m = m; bch->t = t; bch->n = (1 << m)-1; words = DIV_ROUND_UP(m*t, 32); bch->ecc_bytes = DIV_ROUND_UP(m*t, 8); bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err); bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err); bch->mod8_tab = bch_alloc(words*1024*sizeof(*bch->mod8_tab), &err); bch->ecc_buf = bch_alloc(words*sizeof(*bch->ecc_buf), &err); bch->ecc_buf2 = bch_alloc(words*sizeof(*bch->ecc_buf2), &err); bch->xi_tab = bch_alloc(m*sizeof(*bch->xi_tab), &err); bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); if (err) goto fail; err = build_gf_tables(bch, prim_poly); if (err) goto fail; /* use generator polynomial for computing encoding tables */ genpoly = compute_generator_polynomial(bch); if (genpoly == NULL) goto fail; build_mod8_tables(bch, genpoly); kfree(genpoly); err = build_deg2_base(bch); if (err) goto fail; return bch; fail: free_bch(bch); return NULL; } EXPORT_SYMBOL_GPL(init_bch); /** * free_bch - free the BCH control structure * @bch: BCH control structure to release */ void free_bch(struct bch_control *bch) { unsigned int i; if (bch) { kfree(bch->a_pow_tab); kfree(bch->a_log_tab); kfree(bch->mod8_tab); kfree(bch->ecc_buf); kfree(bch->ecc_buf2); kfree(bch->xi_tab); kfree(bch->syn); kfree(bch->cache); kfree(bch->elp); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) kfree(bch->poly_2t[i]); kfree(bch); } } EXPORT_SYMBOL_GPL(free_bch); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); MODULE_DESCRIPTION("Binary BCH encoder/decoder");
gpl-2.0
faux123/Endeavoru
drivers/net/skfp/queue.c
13133
4083
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ /* SMT Event Queue Management */ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #ifndef lint static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ; #endif #define PRINTF(a,b,c) /* * init event queue management */ void ev_init(struct s_smc *smc) { smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ; } /* * add event to queue */ void queue_event(struct s_smc *smc, int class, int event) { PRINTF("queue class %d event %d\n",class,event) ; smc->q.ev_put->class = class ; smc->q.ev_put->event = event ; if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT]) smc->q.ev_put = smc->q.ev_queue ; if (smc->q.ev_put == smc->q.ev_get) { SMT_ERR_LOG(smc,SMT_E0137, SMT_E0137_MSG) ; } } /* * timer_event is called from HW timer package. */ void timer_event(struct s_smc *smc, u_long token) { PRINTF("timer event class %d token %d\n", EV_T_CLASS(token), EV_T_EVENT(token)) ; queue_event(smc,EV_T_CLASS(token),EV_T_EVENT(token)); } /* * event dispatcher * while event queue is not empty * get event from queue * send command to state machine * end */ void ev_dispatcher(struct s_smc *smc) { struct event_queue *ev ; /* pointer into queue */ int class ; ev = smc->q.ev_get ; PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ; while (ev != smc->q.ev_put) { PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ; switch(class = ev->class) { case EVENT_ECM : /* Entity Corordination Man. */ ecm(smc,(int)ev->event) ; break ; case EVENT_CFM : /* Configuration Man. */ cfm(smc,(int)ev->event) ; break ; case EVENT_RMT : /* Ring Man. */ rmt(smc,(int)ev->event) ; break ; case EVENT_SMT : smt_event(smc,(int)ev->event) ; break ; #ifdef CONCENTRATOR case 99 : timer_test_event(smc,(int)ev->event) ; break ; #endif case EVENT_PCMA : /* PHY A */ case EVENT_PCMB : /* PHY B */ default : if (class >= EVENT_PCMA && class < EVENT_PCMA + NUMPHYS) { pcm(smc,class - EVENT_PCMA,(int)ev->event) ; break ; } SMT_PANIC(smc,SMT_E0121, SMT_E0121_MSG) ; return ; } if (++ev == &smc->q.ev_queue[MAX_EVENT]) ev = smc->q.ev_queue ; /* Renew get: it is used in queue_events to detect overruns */ smc->q.ev_get = ev; } } /* * smt_online connects to or disconnects from the ring * MUST be called to initiate connection establishment * * on 0 disconnect * on 1 connect */ u_short smt_online(struct s_smc *smc, int on) { queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ; ev_dispatcher(smc) ; return smc->mib.fddiSMTCF_State; } /* * set SMT flag to value * flag flag name * value flag value * dump current flag setting */ #ifdef CONCENTRATOR void do_smt_flag(struct s_smc *smc, char *flag, int value) { #ifdef DEBUG struct smt_debug *deb; SK_UNUSED(smc) ; #ifdef DEBUG_BRD deb = &smc->debug; #else deb = &debug; #endif if (!strcmp(flag,"smt")) deb->d_smt = value ; else if (!strcmp(flag,"smtf")) deb->d_smtf = value ; else if (!strcmp(flag,"pcm")) deb->d_pcm = value ; else if (!strcmp(flag,"rmt")) deb->d_rmt = value ; else if (!strcmp(flag,"cfm")) deb->d_cfm = value ; else if (!strcmp(flag,"ecm")) deb->d_ecm = value ; printf("smt %d\n",deb->d_smt) ; printf("smtf %d\n",deb->d_smtf) ; printf("pcm %d\n",deb->d_pcm) ; printf("rmt %d\n",deb->d_rmt) ; printf("cfm %d\n",deb->d_cfm) ; printf("ecm %d\n",deb->d_ecm) ; #endif /* DEBUG */ } #endif
gpl-2.0
friedrich420/Sprint-Note-4-Android-5.1.1-Kernel
drivers/video/atafb_iplan2p8.c
14925
8377
/* * linux/drivers/video/iplan2p8.c -- Low level frame buffer operations for * interleaved bitplanes à la Atari (8 * planes, 2 bytes interleave) * * Created 5 Apr 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/setup.h> #include "atafb.h" #define BPL 8 #include "atafb_utils.h" /* Copies a 8 plane column from 's', height 'h', to 'd'. */ /* This expands a 8 bit color into two longs for two movepl (8 plane) * operations. */ void atafb_iplan2p8_copyarea(struct fb_info *info, u_long next_line, int sy, int sx, int dy, int dx, int height, int width) { /* bmove() has to distinguish two major cases: If both, source and * destination, start at even addresses or both are at odd * addresses, just the first odd and last even column (if present) * require special treatment (memmove_col()). The rest between * then can be copied by normal operations, because all adjacent * bytes are affected and are to be stored in the same order. * The pathological case is when the move should go from an odd * address to an even or vice versa. Since the bytes in the plane * words must be assembled in new order, it seems wisest to make * all movements by memmove_col(). */ u8 *src, *dst; u32 *s, *d; int w, l , i, j; u_int colsize; u_int upwards = (dy < sy) || (dy == sy && dx < sx); colsize = height; if (!((sx ^ dx) & 15)) { /* odd->odd or even->even */ if (upwards) { src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL); if (sx & 15) { memmove32_col(dst, src, 0xff00ff, height, next_line - BPL * 2); src += BPL * 2; dst += BPL * 2; width -= 8; } w = width >> 4; if (w) { s = (u32 *)src; d = (u32 *)dst; w *= BPL / 2; l = next_line - w * 4; for (j = height; j > 0; j--) { for (i = w; i > 0; i--) *d++ = *s++; s = (u32 *)((u8 *)s + l); d = (u32 *)((u8 *)d + l); } } if (width & 15) memmove32_col(dst + width / (8 / BPL), src + width / (8 / BPL), 0xff00ff00, height, next_line - BPL * 2); } else { src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL); if ((sx + width) & 15) { src -= BPL * 2; dst -= BPL * 2; memmove32_col(dst, src, 0xff00ff00, colsize, -next_line - BPL * 2); width -= 8; } w = width >> 4; if (w) { s = (u32 *)src; d = (u32 *)dst; w *= BPL / 2; l = next_line - w * 4; for (j = height; j > 0; j--) { for (i = w; i > 0; i--) *--d = *--s; s = (u32 *)((u8 *)s - l); d = (u32 *)((u8 *)d - l); } } if (sx & 15) memmove32_col(dst - (width - 16) / (8 / BPL), src - (width - 16) / (8 / BPL), 0xff00ff, colsize, -next_line - BPL * 2); } } else { /* odd->even or even->odd */ if (upwards) { u32 *src32, *dst32; u32 pval[4], v, v1, mask; int i, j, w, f; src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL); mask = 0xff00ff00; f = 0; w = width; if (sx & 15) { f = 1; w += 8; } if ((sx + width) & 15) f |= 2; w >>= 4; for (i = height; i; i--) { src32 = (u32 *)src; dst32 = (u32 *)dst; if (f & 1) { pval[0] = (*src32++ << 8) & mask; pval[1] = (*src32++ << 8) & mask; pval[2] = (*src32++ << 8) & mask; pval[3] = (*src32++ << 8) & mask; } else { pval[0] = dst32[0] & mask; pval[1] = dst32[1] & mask; pval[2] = dst32[2] & mask; pval[3] = dst32[3] & mask; } for (j = w; j > 0; j--) { v = *src32++; v1 = v & mask; *dst32++ = pval[0] | (v1 >> 8); pval[0] = (v ^ v1) << 8; v = *src32++; v1 = v & mask; *dst32++ = pval[1] | (v1 >> 8); pval[1] = (v ^ v1) << 8; v = *src32++; v1 = v & mask; *dst32++ = pval[2] | (v1 >> 8); pval[2] = (v ^ v1) << 8; v = *src32++; v1 = v & mask; *dst32++ = pval[3] | (v1 >> 8); pval[3] = (v ^ v1) << 8; } if (f & 2) { dst32[0] = (dst32[0] & mask) | pval[0]; dst32[1] = (dst32[1] & mask) | pval[1]; dst32[2] = (dst32[2] & mask) | pval[2]; dst32[3] = (dst32[3] & mask) | pval[3]; } src += next_line; dst += next_line; } } else { u32 *src32, *dst32; u32 pval[4], v, v1, mask; int i, j, w, f; src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL); dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL); mask = 0xff00ff; f = 0; w = width; if ((dx + width) & 15) f = 1; if (sx & 15) { f |= 2; w += 8; } w >>= 4; for (i = height; i; i--) { src32 = (u32 *)src; dst32 = (u32 *)dst; if (f & 1) { pval[0] = dst32[-1] & mask; pval[1] = dst32[-2] & mask; pval[2] = dst32[-3] & mask; pval[3] = dst32[-4] & mask; } else { pval[0] = (*--src32 >> 8) & mask; pval[1] = (*--src32 >> 8) & mask; pval[2] = (*--src32 >> 8) & mask; pval[3] = (*--src32 >> 8) & mask; } for (j = w; j > 0; j--) { v = *--src32; v1 = v & mask; *--dst32 = pval[0] | (v1 << 8); pval[0] = (v ^ v1) >> 8; v = *--src32; v1 = v & mask; *--dst32 = pval[1] | (v1 << 8); pval[1] = (v ^ v1) >> 8; v = *--src32; v1 = v & mask; *--dst32 = pval[2] | (v1 << 8); pval[2] = (v ^ v1) >> 8; v = *--src32; v1 = v & mask; *--dst32 = pval[3] | (v1 << 8); pval[3] = (v ^ v1) >> 8; } if (!(f & 2)) { dst32[-1] = (dst32[-1] & mask) | pval[0]; dst32[-2] = (dst32[-2] & mask) | pval[1]; dst32[-3] = (dst32[-3] & mask) | pval[2]; dst32[-4] = (dst32[-4] & mask) | pval[3]; } src -= next_line; dst -= next_line; } } } } void atafb_iplan2p8_fillrect(struct fb_info *info, u_long next_line, u32 color, int sy, int sx, int height, int width) { u32 *dest; int rows, i; u32 cval[4]; dest = (u32 *)(info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL)); if (sx & 15) { u8 *dest8 = (u8 *)dest + 1; expand8_col2mask(color, cval); for (i = height; i; i--) { fill8_col(dest8, cval); dest8 += next_line; } dest += BPL / 2; width -= 8; } expand16_col2mask(color, cval); rows = width >> 4; if (rows) { u32 *d = dest; u32 off = next_line - rows * BPL * 2; for (i = height; i; i--) { d = fill16_col(d, rows, cval); d = (u32 *)((long)d + off); } dest += rows * BPL / 2; width &= 15; } if (width) { u8 *dest8 = (u8 *)dest; expand8_col2mask(color, cval); for (i = height; i; i--) { fill8_col(dest8, cval); dest8 += next_line; } } } void atafb_iplan2p8_linefill(struct fb_info *info, u_long next_line, int dy, int dx, u32 width, const u8 *data, u32 bgcolor, u32 fgcolor) { u32 *dest; const u16 *data16; int rows; u32 fgm[4], bgm[4], m; dest = (u32 *)(info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL)); if (dx & 15) { fill8_2col((u8 *)dest + 1, fgcolor, bgcolor, *data++); dest += BPL / 2; width -= 8; } if (width >= 16) { data16 = (const u16 *)data; expand16_2col2mask(fgcolor, bgcolor, fgm, bgm); for (rows = width / 16; rows; rows--) { u16 d = *data16++; m = d | ((u32)d << 16); *dest++ = (m & fgm[0]) ^ bgm[0]; *dest++ = (m & fgm[1]) ^ bgm[1]; *dest++ = (m & fgm[2]) ^ bgm[2]; *dest++ = (m & fgm[3]) ^ bgm[3]; } data = (const u8 *)data16; width &= 15; } if (width) fill8_2col((u8 *)dest, fgcolor, bgcolor, *data); } #ifdef MODULE MODULE_LICENSE("GPL"); int init_module(void) { return 0; } void cleanup_module(void) { } #endif /* MODULE */ /* * Visible symbols for modules */ EXPORT_SYMBOL(atafb_iplan2p8_copyarea); EXPORT_SYMBOL(atafb_iplan2p8_fillrect); EXPORT_SYMBOL(atafb_iplan2p8_linefill);
gpl-2.0
linux-shield/kernel
kernel/trace/trace_events.c
78
61785
/* * event tracer * * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> * * - Added format output of fields of the trace point. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. * */ #define pr_fmt(fmt) fmt #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/delay.h> #include <asm/setup.h> #include "trace_output.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM "TRACE_SYSTEM" DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); static LIST_HEAD(ftrace_common_fields); #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) static struct kmem_cache *field_cachep; static struct kmem_cache *file_cachep; #define SYSTEM_FL_FREE_NAME (1 << 31) static inline int system_refcount(struct event_subsystem *system) { return system->ref_count & ~SYSTEM_FL_FREE_NAME; } static int system_refcount_inc(struct event_subsystem *system) { return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; } static int system_refcount_dec(struct event_subsystem *system) { return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; } /* Double loops, do not use break, only goto's work */ #define do_for_each_event_file(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ list_for_each_entry(file, &tr->events, list) #define do_for_each_event_file_safe(tr, file) \ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ struct ftrace_event_file *___n; \ list_for_each_entry_safe(file, ___n, &tr->events, list) #define while_for_each_event_file() \ } static struct list_head * trace_get_fields(struct ftrace_event_call *event_call) { if (!event_call->class->get_fields) return &event_call->class->fields; return event_call->class->get_fields(event_call); } static struct ftrace_event_field * __find_event_field(struct list_head *head, char *name) { struct ftrace_event_field *field; list_for_each_entry(field, head, link) { if (!strcmp(field->name, name)) return field; } return NULL; } struct ftrace_event_field * trace_find_event_field(struct ftrace_event_call *call, char *name) { struct ftrace_event_field *field; struct list_head *head; field = __find_event_field(&ftrace_common_fields, name); if (field) return field; head = trace_get_fields(call); return __find_event_field(head, name); } static int __trace_define_field(struct list_head *head, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct ftrace_event_field *field; field = kmem_cache_alloc(field_cachep, GFP_TRACE); if (!field) return -ENOMEM; field->name = name; field->type = type; if (filter_type == FILTER_OTHER) field->filter_type = filter_assign_type(type); else field->filter_type = filter_type; field->offset = offset; field->size = size; field->is_signed = is_signed; list_add(&field->link, head); return 0; } int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { struct list_head *head; if (WARN_ON(!call->class)) return 0; head = trace_get_fields(call); return __trace_define_field(head, type, name, offset, size, is_signed, filter_type); } EXPORT_SYMBOL_GPL(trace_define_field); #define __common_field(type, item) \ ret = __trace_define_field(&ftrace_common_fields, #type, \ "common_" #item, \ offsetof(typeof(ent), item), \ sizeof(ent.item), \ is_signed_type(type), FILTER_OTHER); \ if (ret) \ return ret; static int trace_define_common_fields(void) { int ret; struct trace_entry ent; __common_field(unsigned short, type); __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); return ret; } static void trace_destroy_fields(struct ftrace_event_call *call) { struct ftrace_event_field *field, *next; struct list_head *head; head = trace_get_fields(call); list_for_each_entry_safe(field, next, head, link) { list_del(&field->link); kmem_cache_free(field_cachep, field); } } int trace_event_raw_init(struct ftrace_event_call *call) { int id; id = register_ftrace_event(&call->event); if (!id) return -ENODEV; return 0; } EXPORT_SYMBOL_GPL(trace_event_raw_init); void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, struct ftrace_event_file *ftrace_file, unsigned long len) { struct ftrace_event_call *event_call = ftrace_file->event_call; local_save_flags(fbuffer->flags); fbuffer->pc = preempt_count(); fbuffer->ftrace_file = ftrace_file; fbuffer->event = trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, event_call->event.type, len, fbuffer->flags, fbuffer->pc); if (!fbuffer->event) return NULL; fbuffer->entry = ring_buffer_event_data(fbuffer->event); return fbuffer->entry; } EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) { event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type, void *data) { struct ftrace_event_file *file = data; WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); switch (type) { case TRACE_REG_REGISTER: return tracepoint_probe_register(call->tp, call->class->probe, file); case TRACE_REG_UNREGISTER: tracepoint_probe_unregister(call->tp, call->class->probe, file); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return tracepoint_probe_register(call->tp, call->class->perf_probe, call); case TRACE_REG_PERF_UNREGISTER: tracepoint_probe_unregister(call->tp, call->class->perf_probe, call); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif } return 0; } EXPORT_SYMBOL_GPL(ftrace_event_reg); void trace_event_enable_cmd_record(bool enable) { struct ftrace_event_file *file; struct trace_array *tr; mutex_lock(&event_mutex); do_for_each_event_file(tr, file) { if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) continue; if (enable) { tracing_start_cmdline_record(); set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); } else { tracing_stop_cmdline_record(); clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); } } while_for_each_event_file(); mutex_unlock(&event_mutex); } static int __ftrace_event_enable_disable(struct ftrace_event_file *file, int enable, int soft_disable) { struct ftrace_event_call *call = file->event_call; int ret = 0; int disable; switch (enable) { case 0: /* * When soft_disable is set and enable is cleared, the sm_ref * reference counter is decremented. If it reaches 0, we want * to clear the SOFT_DISABLED flag but leave the event in the * state that it was. That is, if the event was enabled and * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED * is set we do not want the event to be enabled before we * clear the bit. * * When soft_disable is not set but the SOFT_MODE flag is, * we do nothing. Do not disable the tracepoint, otherwise * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. */ if (soft_disable) { if (atomic_dec_return(&file->sm_ref) > 0) break; disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); } else disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { tracing_stop_cmdline_record(); clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); } call->class->reg(call, TRACE_REG_UNREGISTER, file); } /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); else clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); break; case 1: /* * When soft_disable is set and enable is set, we want to * register the tracepoint for the event, but leave the event * as is. That means, if the event was already enabled, we do * nothing (but set SOFT_MODE). If the event is disabled, we * set SOFT_DISABLED before enabling the event tracepoint, so * it still seems to be disabled. */ if (!soft_disable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); else { if (atomic_inc_return(&file->sm_ref) > 1) break; set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); } if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { /* Keep the event disabled, when going to SOFT_MODE. */ if (soft_disable) set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); if (trace_flags & TRACE_ITER_RECORD_CMD) { tracing_start_cmdline_record(); set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); } ret = call->class->reg(call, TRACE_REG_REGISTER, file); if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " "%s\n", ftrace_event_name(call)); break; } set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); /* WAS_ENABLED gets set but never cleared. */ call->flags |= TRACE_EVENT_FL_WAS_ENABLED; } break; } return ret; } int trace_event_enable_disable(struct ftrace_event_file *file, int enable, int soft_disable) { return __ftrace_event_enable_disable(file, enable, soft_disable); } static int ftrace_event_enable_disable(struct ftrace_event_file *file, int enable) { return __ftrace_event_enable_disable(file, enable, 0); } static void ftrace_clear_events(struct trace_array *tr) { struct ftrace_event_file *file; mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { ftrace_event_enable_disable(file, 0); } mutex_unlock(&event_mutex); } static void __put_system(struct event_subsystem *system) { struct event_filter *filter = system->filter; WARN_ON_ONCE(system_refcount(system) == 0); if (system_refcount_dec(system)) return; list_del(&system->list); if (filter) { kfree(filter->filter_string); kfree(filter); } if (system->ref_count & SYSTEM_FL_FREE_NAME) kfree(system->name); kfree(system); } static void __get_system(struct event_subsystem *system) { WARN_ON_ONCE(system_refcount(system) == 0); system_refcount_inc(system); } static void __get_system_dir(struct ftrace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); dir->ref_count++; __get_system(dir->subsystem); } static void __put_system_dir(struct ftrace_subsystem_dir *dir) { WARN_ON_ONCE(dir->ref_count == 0); /* If the subsystem is about to be freed, the dir must be too */ WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); __put_system(dir->subsystem); if (!--dir->ref_count) kfree(dir); } static void put_system(struct ftrace_subsystem_dir *dir) { mutex_lock(&event_mutex); __put_system_dir(dir); mutex_unlock(&event_mutex); } static void remove_subsystem(struct ftrace_subsystem_dir *dir) { if (!dir) return; if (!--dir->nr_events) { debugfs_remove_recursive(dir->entry); list_del(&dir->list); __put_system_dir(dir); } } static void remove_event_file_dir(struct ftrace_event_file *file) { struct dentry *dir = file->dir; struct dentry *child; if (dir) { spin_lock(&dir->d_lock); /* probably unneeded */ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { if (child->d_inode) /* probably unneeded */ child->d_inode->i_private = NULL; } spin_unlock(&dir->d_lock); debugfs_remove_recursive(dir); } list_del(&file->list); remove_subsystem(file->system); free_event_filter(file->filter); kmem_cache_free(file_cachep, file); } /* * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. */ static int __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, const char *sub, const char *event, int set) { struct ftrace_event_file *file; struct ftrace_event_call *call; const char *name; int ret = -EINVAL; list_for_each_entry(file, &tr->events, list) { call = file->event_call; name = ftrace_event_name(call); if (!name || !call->class || !call->class->reg) continue; if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; if (match && strcmp(match, name) != 0 && strcmp(match, call->class->system) != 0) continue; if (sub && strcmp(sub, call->class->system) != 0) continue; if (event && strcmp(event, name) != 0) continue; ftrace_event_enable_disable(file, set); ret = 0; } return ret; } static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, const char *sub, const char *event, int set) { int ret; mutex_lock(&event_mutex); ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); mutex_unlock(&event_mutex); return ret; } static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) { char *event = NULL, *sub = NULL, *match; /* * The buf format can be <subsystem>:<event-name> * *:<event-name> means any event by that name. * :<event-name> is the same. * * <subsystem>:* means all events in that subsystem * <subsystem>: means the same. * * <name> (no ':') means all events in a subsystem with * the name <name> or any event that matches <name> */ match = strsep(&buf, ":"); if (buf) { sub = match; event = buf; match = NULL; if (!strlen(sub) || strcmp(sub, "*") == 0) sub = NULL; if (!strlen(event) || strcmp(event, "*") == 0) event = NULL; } return __ftrace_set_clr_event(tr, match, sub, event, set); } /** * trace_set_clr_event - enable or disable an event * @system: system name to match (NULL for any system) * @event: event name to match (NULL for all events, within system) * @set: 1 to enable, 0 to disable * * This is a way for other parts of the kernel to enable or disable * event recording. * * Returns 0 on success, -EINVAL if the parameters do not match any * registered events. */ int trace_set_clr_event(const char *system, const char *event, int set) { struct trace_array *tr = top_trace_array(); if (!tr) return -ENODEV; return __ftrace_set_clr_event(tr, NULL, system, event, set); } EXPORT_SYMBOL_GPL(trace_set_clr_event); /* 128 should be much more than enough */ #define EVENT_BUF_SIZE 127 static ssize_t ftrace_event_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_parser parser; struct seq_file *m = file->private_data; struct trace_array *tr = m->private; ssize_t read, ret; if (!cnt) return 0; ret = tracing_update_buffers(); if (ret < 0) return ret; if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) return -ENOMEM; read = trace_get_user(&parser, ubuf, cnt, ppos); if (read >= 0 && trace_parser_loaded((&parser))) { int set = 1; if (*parser.buffer == '!') set = 0; parser.buffer[parser.idx] = 0; ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); if (ret) goto out_put; } ret = read; out_put: trace_parser_put(&parser); return ret; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_file *file = v; struct ftrace_event_call *call; struct trace_array *tr = m->private; (*pos)++; list_for_each_entry_continue(file, &tr->events, list) { call = file->event_call; /* * The ftrace subsystem is for showing formats only. * They can not be enabled or disabled via the event files. */ if (call->class && call->class->reg) return file; } return NULL; } static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_event_file *file; struct trace_array *tr = m->private; loff_t l; mutex_lock(&event_mutex); file = list_entry(&tr->events, struct ftrace_event_file, list); for (l = 0; l <= *pos; ) { file = t_next(m, file, &l); if (!file) break; } return file; } static void * s_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_file *file = v; struct trace_array *tr = m->private; (*pos)++; list_for_each_entry_continue(file, &tr->events, list) { if (file->flags & FTRACE_EVENT_FL_ENABLED) return file; } return NULL; } static void *s_start(struct seq_file *m, loff_t *pos) { struct ftrace_event_file *file; struct trace_array *tr = m->private; loff_t l; mutex_lock(&event_mutex); file = list_entry(&tr->events, struct ftrace_event_file, list); for (l = 0; l <= *pos; ) { file = s_next(m, file, &l); if (!file) break; } return file; } static int t_show(struct seq_file *m, void *v) { struct ftrace_event_file *file = v; struct ftrace_event_call *call = file->event_call; if (strcmp(call->class->system, TRACE_SYSTEM) != 0) seq_printf(m, "%s:", call->class->system); seq_printf(m, "%s\n", ftrace_event_name(call)); return 0; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&event_mutex); } static ssize_t event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_file *file; unsigned long flags; char buf[4] = "0"; mutex_lock(&event_mutex); file = event_file_data(filp); if (likely(file)) flags = file->flags; mutex_unlock(&event_mutex); if (!file) return -ENODEV; if (flags & FTRACE_EVENT_FL_ENABLED && !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) strcpy(buf, "1"); if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || flags & FTRACE_EVENT_FL_SOFT_MODE) strcat(buf, "*"); strcat(buf, "\n"); return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); } static ssize_t event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_file *file; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; ret = tracing_update_buffers(); if (ret < 0) return ret; switch (val) { case 0: case 1: ret = -ENODEV; mutex_lock(&event_mutex); file = event_file_data(filp); if (likely(file)) ret = ftrace_event_enable_disable(file, val); mutex_unlock(&event_mutex); break; default: return -EINVAL; } *ppos += cnt; return ret ? ret : cnt; } static ssize_t system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { const char set_to_char[4] = { '?', '0', '1', 'X' }; struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; struct ftrace_event_call *call; struct ftrace_event_file *file; struct trace_array *tr = dir->tr; char buf[2]; int set = 0; int ret; mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { call = file->event_call; if (!ftrace_event_name(call) || !call->class || !call->class->reg) continue; if (system && strcmp(call->class->system, system->name) != 0) continue; /* * We need to find out if all the events are set * or if all events or cleared, or if we have * a mixture. */ set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); /* * If we have a mixture, no need to look further. */ if (set == 3) break; } mutex_unlock(&event_mutex); buf[0] = set_to_char[set]; buf[1] = '\n'; ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); return ret; } static ssize_t system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; const char *name = NULL; unsigned long val; ssize_t ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; ret = tracing_update_buffers(); if (ret < 0) return ret; if (val != 0 && val != 1) return -EINVAL; /* * Opening of "enable" adds a ref count to system, * so the name is safe to use. */ if (system) name = system->name; ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); if (ret) goto out; ret = cnt; out: *ppos += cnt; return ret; } enum { FORMAT_HEADER = 1, FORMAT_FIELD_SEPERATOR = 2, FORMAT_PRINTFMT = 3, }; static void *f_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_event_call *call = event_file_data(m->private); struct list_head *common_head = &ftrace_common_fields; struct list_head *head = trace_get_fields(call); struct list_head *node = v; (*pos)++; switch ((unsigned long)v) { case FORMAT_HEADER: node = common_head; break; case FORMAT_FIELD_SEPERATOR: node = head; break; case FORMAT_PRINTFMT: /* all done */ return NULL; } node = node->prev; if (node == common_head) return (void *)FORMAT_FIELD_SEPERATOR; else if (node == head) return (void *)FORMAT_PRINTFMT; else return node; } static int f_show(struct seq_file *m, void *v) { struct ftrace_event_call *call = event_file_data(m->private); struct ftrace_event_field *field; const char *array_descriptor; switch ((unsigned long)v) { case FORMAT_HEADER: seq_printf(m, "name: %s\n", ftrace_event_name(call)); seq_printf(m, "ID: %d\n", call->event.type); seq_printf(m, "format:\n"); return 0; case FORMAT_FIELD_SEPERATOR: seq_putc(m, '\n'); return 0; case FORMAT_PRINTFMT: seq_printf(m, "\nprint fmt: %s\n", call->print_fmt); return 0; } field = list_entry(v, struct ftrace_event_field, link); /* * Smartly shows the array type(except dynamic array). * Normal: * field:TYPE VAR * If TYPE := TYPE[LEN], it is shown: * field:TYPE VAR[LEN] */ array_descriptor = strchr(field->type, '['); if (!strncmp(field->type, "__data_loc", 10)) array_descriptor = NULL; if (!array_descriptor) seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", field->type, field->name, field->offset, field->size, !!field->is_signed); else seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); return 0; } static void *f_start(struct seq_file *m, loff_t *pos) { void *p = (void *)FORMAT_HEADER; loff_t l = 0; /* ->stop() is called even if ->start() fails */ mutex_lock(&event_mutex); if (!event_file_data(m->private)) return ERR_PTR(-ENODEV); while (l < *pos && p) p = f_next(m, p, &l); return p; } static void f_stop(struct seq_file *m, void *p) { mutex_unlock(&event_mutex); } static const struct seq_operations trace_format_seq_ops = { .start = f_start, .next = f_next, .stop = f_stop, .show = f_show, }; static int trace_format_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; ret = seq_open(file, &trace_format_seq_ops); if (ret < 0) return ret; m = file->private_data; m->private = file; return 0; } static ssize_t event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int id = (long)event_file_data(filp); char buf[32]; int len; if (*ppos) return 0; if (unlikely(!id)) return -ENODEV; len = sprintf(buf, "%d\n", id); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } static ssize_t event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_file *file; struct trace_seq *s; int r = -ENODEV; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); mutex_lock(&event_mutex); file = event_file_data(filp); if (file) print_event_filter(file, s); mutex_unlock(&event_mutex); if (file) r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static ssize_t event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_event_file *file; char *buf; int err = -ENODEV; if (cnt >= PAGE_SIZE) return -EINVAL; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return -ENOMEM; if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; } buf[cnt] = '\0'; mutex_lock(&event_mutex); file = event_file_data(filp); if (file) err = apply_event_filter(file, buf); mutex_unlock(&event_mutex); free_page((unsigned long) buf); if (err < 0) return err; *ppos += cnt; return cnt; } static LIST_HEAD(event_subsystems); static int subsystem_open(struct inode *inode, struct file *filp) { struct event_subsystem *system = NULL; struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ struct trace_array *tr; int ret; if (tracing_is_disabled()) return -ENODEV; /* Make sure the system still exists */ mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(dir, &tr->systems, list) { if (dir == inode->i_private) { /* Don't open systems with no events */ if (dir->nr_events) { __get_system_dir(dir); system = dir->subsystem; } goto exit_loop; } } } exit_loop: mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); if (!system) return -ENODEV; /* Some versions of gcc think dir can be uninitialized here */ WARN_ON(!dir); /* Still need to increment the ref count of the system */ if (trace_array_get(tr) < 0) { put_system(dir); return -ENODEV; } ret = tracing_open_generic(inode, filp); if (ret < 0) { trace_array_put(tr); put_system(dir); } return ret; } static int system_tr_open(struct inode *inode, struct file *filp) { struct ftrace_subsystem_dir *dir; struct trace_array *tr = inode->i_private; int ret; if (tracing_is_disabled()) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; /* Make a temporary dir that has no system but points to tr */ dir = kzalloc(sizeof(*dir), GFP_KERNEL); if (!dir) { trace_array_put(tr); return -ENOMEM; } dir->tr = tr; ret = tracing_open_generic(inode, filp); if (ret < 0) { trace_array_put(tr); kfree(dir); return ret; } filp->private_data = dir; return 0; } static int subsystem_release(struct inode *inode, struct file *file) { struct ftrace_subsystem_dir *dir = file->private_data; trace_array_put(dir->tr); /* * If dir->subsystem is NULL, then this is a temporary * descriptor that was made for a trace_array to enable * all subsystems. */ if (dir->subsystem) put_system(dir); else kfree(dir); return 0; } static ssize_t subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_subsystem_dir *dir = filp->private_data; struct event_subsystem *system = dir->subsystem; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); print_subsystem_event_filter(system, s); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static ssize_t subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct ftrace_subsystem_dir *dir = filp->private_data; char *buf; int err; if (cnt >= PAGE_SIZE) return -EINVAL; buf = (char *)__get_free_page(GFP_TEMPORARY); if (!buf) return -ENOMEM; if (copy_from_user(buf, ubuf, cnt)) { free_page((unsigned long) buf); return -EFAULT; } buf[cnt] = '\0'; err = apply_subsystem_event_filter(dir, buf); free_page((unsigned long) buf); if (err < 0) return err; *ppos += cnt; return cnt; } static ssize_t show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int (*func)(struct trace_seq *s) = filp->private_data; struct trace_seq *s; int r; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); func(s); r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); return r; } static int ftrace_event_avail_open(struct inode *inode, struct file *file); static int ftrace_event_set_open(struct inode *inode, struct file *file); static int ftrace_event_release(struct inode *inode, struct file *file); static const struct seq_operations show_event_seq_ops = { .start = t_start, .next = t_next, .show = t_show, .stop = t_stop, }; static const struct seq_operations show_set_event_seq_ops = { .start = s_start, .next = s_next, .show = t_show, .stop = t_stop, }; static const struct file_operations ftrace_avail_fops = { .open = ftrace_event_avail_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ftrace_set_event_fops = { .open = ftrace_event_set_open, .read = seq_read, .write = ftrace_event_write, .llseek = seq_lseek, .release = ftrace_event_release, }; static const struct file_operations ftrace_enable_fops = { .open = tracing_open_generic, .read = event_enable_read, .write = event_enable_write, .llseek = default_llseek, }; static const struct file_operations ftrace_event_format_fops = { .open = trace_format_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations ftrace_event_id_fops = { .read = event_id_read, .llseek = default_llseek, }; static const struct file_operations ftrace_event_filter_fops = { .open = tracing_open_generic, .read = event_filter_read, .write = event_filter_write, .llseek = default_llseek, }; static const struct file_operations ftrace_subsystem_filter_fops = { .open = subsystem_open, .read = subsystem_filter_read, .write = subsystem_filter_write, .llseek = default_llseek, .release = subsystem_release, }; static const struct file_operations ftrace_system_enable_fops = { .open = subsystem_open, .read = system_enable_read, .write = system_enable_write, .llseek = default_llseek, .release = subsystem_release, }; static const struct file_operations ftrace_tr_enable_fops = { .open = system_tr_open, .read = system_enable_read, .write = system_enable_write, .llseek = default_llseek, .release = subsystem_release, }; static const struct file_operations ftrace_show_header_fops = { .open = tracing_open_generic, .read = show_header, .llseek = default_llseek, }; static int ftrace_event_open(struct inode *inode, struct file *file, const struct seq_operations *seq_ops) { struct seq_file *m; int ret; ret = seq_open(file, seq_ops); if (ret < 0) return ret; m = file->private_data; /* copy tr over to seq ops */ m->private = inode->i_private; return ret; } static int ftrace_event_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return seq_release(inode, file); } static int ftrace_event_avail_open(struct inode *inode, struct file *file) { const struct seq_operations *seq_ops = &show_event_seq_ops; return ftrace_event_open(inode, file, seq_ops); } static int ftrace_event_set_open(struct inode *inode, struct file *file) { const struct seq_operations *seq_ops = &show_set_event_seq_ops; struct trace_array *tr = inode->i_private; int ret; if (trace_array_get(tr) < 0) return -ENODEV; if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_clear_events(tr); ret = ftrace_event_open(inode, file, seq_ops); if (ret < 0) trace_array_put(tr); return ret; } static struct event_subsystem * create_new_subsystem(const char *name) { struct event_subsystem *system; /* need to create new entry */ system = kmalloc(sizeof(*system), GFP_KERNEL); if (!system) return NULL; system->ref_count = 1; /* Only allocate if dynamic (kprobes and modules) */ if (!core_kernel_data((unsigned long)name)) { system->ref_count |= SYSTEM_FL_FREE_NAME; system->name = kstrdup(name, GFP_KERNEL); if (!system->name) goto out_free; } else system->name = name; system->filter = NULL; system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); if (!system->filter) goto out_free; list_add(&system->list, &event_subsystems); return system; out_free: if (system->ref_count & SYSTEM_FL_FREE_NAME) kfree(system->name); kfree(system); return NULL; } static struct dentry * event_subsystem_dir(struct trace_array *tr, const char *name, struct ftrace_event_file *file, struct dentry *parent) { struct ftrace_subsystem_dir *dir; struct event_subsystem *system; struct dentry *entry; /* First see if we did not already create this dir */ list_for_each_entry(dir, &tr->systems, list) { system = dir->subsystem; if (strcmp(system->name, name) == 0) { dir->nr_events++; file->system = dir; return dir->entry; } } /* Now see if the system itself exists. */ list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) break; } /* Reset system variable when not found */ if (&system->list == &event_subsystems) system = NULL; dir = kmalloc(sizeof(*dir), GFP_KERNEL); if (!dir) goto out_fail; if (!system) { system = create_new_subsystem(name); if (!system) goto out_free; } else __get_system(system); dir->entry = debugfs_create_dir(name, parent); if (!dir->entry) { pr_warn("Failed to create system directory %s\n", name); __put_system(system); goto out_free; } dir->tr = tr; dir->ref_count = 1; dir->nr_events = 1; dir->subsystem = system; file->system = dir; entry = debugfs_create_file("filter", 0644, dir->entry, dir, &ftrace_subsystem_filter_fops); if (!entry) { kfree(system->filter); system->filter = NULL; pr_warn("Could not create debugfs '%s/filter' entry\n", name); } trace_create_file("enable", 0644, dir->entry, dir, &ftrace_system_enable_fops); list_add(&dir->list, &tr->systems); return dir->entry; out_free: kfree(dir); out_fail: /* Only print this message if failed on memory allocation */ if (!dir || !system) pr_warn("No memory to create event subsystem %s\n", name); return NULL; } static int event_create_dir(struct dentry *parent, struct ftrace_event_file *file) { struct ftrace_event_call *call = file->event_call; struct trace_array *tr = file->tr; struct list_head *head; struct dentry *d_events; const char *name; int ret; /* * If the trace point header did not define TRACE_SYSTEM * then the system would be called "TRACE_SYSTEM". */ if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { d_events = event_subsystem_dir(tr, call->class->system, file, parent); if (!d_events) return -ENOMEM; } else d_events = parent; name = ftrace_event_name(call); file->dir = debugfs_create_dir(name, d_events); if (!file->dir) { pr_warn("Could not create debugfs '%s' directory\n", name); return -1; } if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) trace_create_file("enable", 0644, file->dir, file, &ftrace_enable_fops); #ifdef CONFIG_PERF_EVENTS if (call->event.type && call->class->reg) trace_create_file("id", 0444, file->dir, (void *)(long)call->event.type, &ftrace_event_id_fops); #endif /* * Other events may have the same class. Only update * the fields if they are not already defined. */ head = trace_get_fields(call); if (list_empty(head)) { ret = call->class->define_fields(call); if (ret < 0) { pr_warn("Could not initialize trace point events/%s\n", name); return -1; } } trace_create_file("filter", 0644, file->dir, file, &ftrace_event_filter_fops); trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); trace_create_file("format", 0444, file->dir, call, &ftrace_event_format_fops); return 0; } static void remove_event_from_tracers(struct ftrace_event_call *call) { struct ftrace_event_file *file; struct trace_array *tr; do_for_each_event_file_safe(tr, file) { if (file->event_call != call) continue; remove_event_file_dir(file); /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ break; } while_for_each_event_file(); } static void event_remove(struct ftrace_event_call *call) { struct trace_array *tr; struct ftrace_event_file *file; do_for_each_event_file(tr, file) { if (file->event_call != call) continue; ftrace_event_enable_disable(file, 0); /* * The do_for_each_event_file() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ break; } while_for_each_event_file(); if (call->event.funcs) __unregister_ftrace_event(&call->event); remove_event_from_tracers(call); list_del(&call->list); } static int event_init(struct ftrace_event_call *call) { int ret = 0; const char *name; name = ftrace_event_name(call); if (WARN_ON(!name)) return -EINVAL; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0 && ret != -ENOSYS) pr_warn("Could not initialize trace events/%s\n", name); } return ret; } static int __register_event(struct ftrace_event_call *call, struct module *mod) { int ret; ret = event_init(call); if (ret < 0) return ret; list_add(&call->list, &ftrace_events); call->mod = mod; return 0; } static struct ftrace_event_file * trace_create_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL; file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); atomic_set(&file->tm_ref, 0); INIT_LIST_HEAD(&file->triggers); list_add(&file->list, &tr->events); return file; } /* Add an event to a trace directory */ static int __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; return event_create_dir(tr->event_dir, file); } /* * Just create a decriptor for early init. A descriptor is required * for enabling events at boot. We want to enable events before * the filesystem is initialized. */ static __init int __trace_early_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) { struct ftrace_event_file *file; file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; return 0; } struct ftrace_module_file_ops; static void __add_event_to_tracers(struct ftrace_event_call *call); /* Add an additional event_call dynamically */ int trace_add_event_call(struct ftrace_event_call *call) { int ret; mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); ret = __register_event(call, NULL); if (ret >= 0) __add_event_to_tracers(call); mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); return ret; } /* * Must be called under locking of trace_types_lock, event_mutex and * trace_event_sem. */ static void __trace_remove_event_call(struct ftrace_event_call *call) { event_remove(call); trace_destroy_fields(call); free_event_filter(call->filter); call->filter = NULL; } static int probe_remove_event_call(struct ftrace_event_call *call) { struct trace_array *tr; struct ftrace_event_file *file; #ifdef CONFIG_PERF_EVENTS if (call->perf_refcount) return -EBUSY; #endif do_for_each_event_file(tr, file) { if (file->event_call != call) continue; /* * We can't rely on ftrace_event_enable_disable(enable => 0) * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress * TRACE_REG_UNREGISTER. */ if (file->flags & FTRACE_EVENT_FL_ENABLED) return -EBUSY; /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this * trace_array, we use break to jump to the next * trace_array. */ break; } while_for_each_event_file(); __trace_remove_event_call(call); return 0; } /* Remove an event_call */ int trace_remove_event_call(struct ftrace_event_call *call) { int ret; mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); down_write(&trace_event_sem); ret = probe_remove_event_call(call); up_write(&trace_event_sem); mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); return ret; } #define for_each_event(event, start, end) \ for (event = start; \ (unsigned long)event < (unsigned long)end; \ event++) #ifdef CONFIG_MODULES static void trace_module_add_events(struct module *mod) { struct ftrace_event_call **call, **start, **end; if (!mod->num_trace_events) return; /* Don't add infrastructure for mods without tracepoints */ if (trace_module_has_bad_taint(mod)) { pr_err("%s: module has bad taint, not creating trace events\n", mod->name); return; } start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; for_each_event(call, start, end) { __register_event(*call, mod); __add_event_to_tracers(*call); } } static void trace_module_remove_events(struct module *mod) { struct ftrace_event_call *call, *p; bool clear_trace = false; down_write(&trace_event_sem); list_for_each_entry_safe(call, p, &ftrace_events, list) { if (call->mod == mod) { if (call->flags & TRACE_EVENT_FL_WAS_ENABLED) clear_trace = true; __trace_remove_event_call(call); } } up_write(&trace_event_sem); /* * It is safest to reset the ring buffer if the module being unloaded * registered any events that were used. The only worry is if * a new module gets loaded, and takes on the same id as the events * of this module. When printing out the buffer, traced events left * over from this module may be passed to the new module events and * unexpected results may occur. */ if (clear_trace) tracing_reset_all_online_cpus(); } static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); switch (val) { case MODULE_STATE_COMING: trace_module_add_events(mod); break; case MODULE_STATE_GOING: trace_module_remove_events(mod); break; } mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ /* Create a new event directory structure for a trace directory. */ static void __trace_add_event_dirs(struct trace_array *tr) { struct ftrace_event_call *call; int ret; list_for_each_entry(call, &ftrace_events, list) { ret = __trace_add_new_event(call, tr); if (ret < 0) pr_warn("Could not create directory for event %s\n", ftrace_event_name(call)); } } struct ftrace_event_file * find_event_file(struct trace_array *tr, const char *system, const char *event) { struct ftrace_event_file *file; struct ftrace_event_call *call; const char *name; list_for_each_entry(file, &tr->events, list) { call = file->event_call; name = ftrace_event_name(call); if (!name || !call->class || !call->class->reg) continue; if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; if (strcmp(event, name) == 0 && strcmp(system, call->class->system) == 0) return file; } return NULL; } #ifdef CONFIG_DYNAMIC_FTRACE /* Avoid typos */ #define ENABLE_EVENT_STR "enable_event" #define DISABLE_EVENT_STR "disable_event" struct event_probe_data { struct ftrace_event_file *file; unsigned long count; int ref; bool enable; }; static void event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (!data) return; if (data->enable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); else set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); } static void event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (!data) return; if (!data->count) return; /* Skip if the event is in a state we want to switch to */ if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) return; if (data->count != -1) (data->count)--; event_enable_probe(ip, parent_ip, _data); } static int event_enable_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *_data) { struct event_probe_data *data = _data; seq_printf(m, "%ps:", (void *)ip); seq_printf(m, "%s:%s:%s", data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, data->file->event_call->class->system, ftrace_event_name(data->file->event_call)); if (data->count == -1) seq_printf(m, ":unlimited\n"); else seq_printf(m, ":count=%ld\n", data->count); return 0; } static int event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; data->ref++; return 0; } static void event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, void **_data) { struct event_probe_data **pdata = (struct event_probe_data **)_data; struct event_probe_data *data = *pdata; if (WARN_ON_ONCE(data->ref <= 0)) return; data->ref--; if (!data->ref) { /* Remove the SOFT_MODE flag */ __ftrace_event_enable_disable(data->file, 0, 1); module_put(data->file->event_call->mod); kfree(data); } *pdata = NULL; } static struct ftrace_probe_ops event_enable_probe_ops = { .func = event_enable_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_enable_count_probe_ops = { .func = event_enable_count_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_disable_probe_ops = { .func = event_enable_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static struct ftrace_probe_ops event_disable_count_probe_ops = { .func = event_enable_count_probe, .print = event_enable_print, .init = event_enable_init, .free = event_enable_free, }; static int event_enable_func(struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enabled) { struct trace_array *tr = top_trace_array(); struct ftrace_event_file *file; struct ftrace_probe_ops *ops; struct event_probe_data *data; const char *system; const char *event; char *number; bool enable; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enabled || !param) return -EINVAL; system = strsep(&param, ":"); if (!param) return -EINVAL; event = strsep(&param, ":"); mutex_lock(&event_mutex); ret = -EINVAL; file = find_event_file(tr, system, event); if (!file) goto out; enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; if (enable) ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; else ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; if (glob[0] == '!') { unregister_ftrace_function_probe_func(glob+1, ops); ret = 0; goto out; } ret = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out; data->enable = enable; data->count = -1; data->file = file; if (!param) goto out_reg; number = strsep(&param, ":"); ret = -EINVAL; if (!strlen(number)) goto out_free; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, &data->count); if (ret) goto out_free; out_reg: /* Don't let event modules unload while probe registered */ ret = try_module_get(file->event_call->mod); if (!ret) { ret = -EBUSY; goto out_free; } ret = __ftrace_event_enable_disable(file, 1, 1); if (ret < 0) goto out_put; ret = register_ftrace_function_probe(glob, ops, data); /* * The above returns on success the # of functions enabled, * but if it didn't find any functions it returns zero. * Consider no functions a failure too. */ if (!ret) { ret = -ENOENT; goto out_disable; } else if (ret < 0) goto out_disable; /* Just return zero, not the number of enabled functions */ ret = 0; out: mutex_unlock(&event_mutex); return ret; out_disable: __ftrace_event_enable_disable(file, 0, 1); out_put: module_put(file->event_call->mod); out_free: kfree(data); goto out; } static struct ftrace_func_command event_enable_cmd = { .name = ENABLE_EVENT_STR, .func = event_enable_func, }; static struct ftrace_func_command event_disable_cmd = { .name = DISABLE_EVENT_STR, .func = event_enable_func, }; static __init int register_event_cmds(void) { int ret; ret = register_ftrace_command(&event_enable_cmd); if (WARN_ON(ret < 0)) return ret; ret = register_ftrace_command(&event_disable_cmd); if (WARN_ON(ret < 0)) unregister_ftrace_command(&event_enable_cmd); return ret; } #else static inline int register_event_cmds(void) { return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ /* * The top level array has already had its ftrace_event_file * descriptors created in order to allow for early events to * be recorded. This function is called after the debugfs has been * initialized, and we now have to create the files associated * to the events. */ static __init void __trace_early_add_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file; int ret; list_for_each_entry(file, &tr->events, list) { ret = event_create_dir(tr->event_dir, file); if (ret < 0) pr_warn("Could not create directory for event %s\n", ftrace_event_name(file->event_call)); } } /* * For early boot up, the top trace array requires to have * a list of events that can be enabled. This must be done before * the filesystem is set up in order to allow events to be traced * early. */ static __init void __trace_early_add_events(struct trace_array *tr) { struct ftrace_event_call *call; int ret; list_for_each_entry(call, &ftrace_events, list) { /* Early boot up should not have any modules loaded */ if (WARN_ON_ONCE(call->mod)) continue; ret = __trace_early_add_new_event(call, tr); if (ret < 0) pr_warn("Could not create early event %s\n", ftrace_event_name(call)); } } /* Remove the event directory structure for a trace directory. */ static void __trace_remove_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file, *next; list_for_each_entry_safe(file, next, &tr->events, list) remove_event_file_dir(file); } static void __add_event_to_tracers(struct ftrace_event_call *call) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) __trace_add_new_event(call, tr); } extern struct ftrace_event_call *__start_ftrace_events[]; extern struct ftrace_event_call *__stop_ftrace_events[]; static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; static __init int setup_trace_event(char *str) { strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); ring_buffer_expanded = true; tracing_selftest_disabled = true; return 1; } __setup("trace_event=", setup_trace_event); /* Expects to have event_mutex held when called */ static int create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) { struct dentry *d_events; struct dentry *entry; entry = debugfs_create_file("set_event", 0644, parent, tr, &ftrace_set_event_fops); if (!entry) { pr_warn("Could not create debugfs 'set_event' entry\n"); return -ENOMEM; } d_events = debugfs_create_dir("events", parent); if (!d_events) { pr_warn("Could not create debugfs 'events' directory\n"); return -ENOMEM; } /* ring buffer internal formats */ trace_create_file("header_page", 0444, d_events, ring_buffer_print_page_header, &ftrace_show_header_fops); trace_create_file("header_event", 0444, d_events, ring_buffer_print_entry_header, &ftrace_show_header_fops); trace_create_file("enable", 0644, d_events, tr, &ftrace_tr_enable_fops); tr->event_dir = d_events; return 0; } /** * event_trace_add_tracer - add a instance of a trace_array to events * @parent: The parent dentry to place the files/directories for events in * @tr: The trace array associated with these events * * When a new instance is created, it needs to set up its events * directory, as well as other files associated with events. It also * creates the event hierachry in the @parent/events directory. * * Returns 0 on success. */ int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) { int ret; mutex_lock(&event_mutex); ret = create_event_toplevel_files(parent, tr); if (ret) goto out_unlock; down_write(&trace_event_sem); __trace_add_event_dirs(tr); up_write(&trace_event_sem); out_unlock: mutex_unlock(&event_mutex); return ret; } /* * The top trace array already had its file descriptors created. * Now the files themselves need to be created. */ static __init int early_event_add_tracer(struct dentry *parent, struct trace_array *tr) { int ret; mutex_lock(&event_mutex); ret = create_event_toplevel_files(parent, tr); if (ret) goto out_unlock; down_write(&trace_event_sem); __trace_early_add_event_dirs(tr); up_write(&trace_event_sem); out_unlock: mutex_unlock(&event_mutex); return ret; } int event_trace_del_tracer(struct trace_array *tr) { mutex_lock(&event_mutex); /* Disable any event triggers and associated soft-disabled events */ clear_event_triggers(tr); /* Disable any running events */ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); /* Access to events are within rcu_read_lock_sched() */ synchronize_sched(); down_write(&trace_event_sem); __trace_remove_event_dirs(tr); debugfs_remove_recursive(tr->event_dir); up_write(&trace_event_sem); tr->event_dir = NULL; mutex_unlock(&event_mutex); return 0; } static __init int event_trace_memsetup(void) { field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); return 0; } static __init int event_trace_enable(void) { struct trace_array *tr = top_trace_array(); struct ftrace_event_call **iter, *call; char *buf = bootup_event_buf; char *token; int ret; if (!tr) return -ENODEV; for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { call = *iter; ret = event_init(call); if (!ret) list_add(&call->list, &ftrace_events); } /* * We need the top trace array to have a working set of trace * points at early init, before the debug files and directories * are created. Create the file entries now, and attach them * to the actual file dentries later. */ __trace_early_add_events(tr); while (true) { token = strsep(&buf, ","); if (!token) break; if (!*token) continue; ret = ftrace_set_clr_event(tr, token, 1); if (ret) pr_warn("Failed to enable trace event: %s\n", token); } trace_printk_start_comm(); register_event_cmds(); register_trigger_cmds(); return 0; } static __init int event_trace_init(void) { struct trace_array *tr; struct dentry *d_tracer; struct dentry *entry; int ret; tr = top_trace_array(); if (!tr) return -ENODEV; d_tracer = tracing_init_dentry(); if (!d_tracer) return 0; entry = debugfs_create_file("available_events", 0444, d_tracer, tr, &ftrace_avail_fops); if (!entry) pr_warn("Could not create debugfs 'available_events' entry\n"); if (trace_define_common_fields()) pr_warn("tracing: Failed to allocate common fields"); ret = early_event_add_tracer(d_tracer, tr); if (ret) return ret; #ifdef CONFIG_MODULES ret = register_module_notifier(&trace_module_nb); if (ret) pr_warn("Failed to register trace events module notifier\n"); #endif return 0; } early_initcall(event_trace_memsetup); core_initcall(event_trace_enable); fs_initcall(event_trace_init); #ifdef CONFIG_FTRACE_STARTUP_TEST static DEFINE_SPINLOCK(test_spinlock); static DEFINE_SPINLOCK(test_spinlock_irq); static DEFINE_MUTEX(test_mutex); static __init void test_work(struct work_struct *dummy) { spin_lock(&test_spinlock); spin_lock_irq(&test_spinlock_irq); udelay(1); spin_unlock_irq(&test_spinlock_irq); spin_unlock(&test_spinlock); mutex_lock(&test_mutex); msleep(1); mutex_unlock(&test_mutex); } static __init int event_test_thread(void *unused) { void *test_malloc; test_malloc = kmalloc(1234, GFP_KERNEL); if (!test_malloc) pr_info("failed to kmalloc\n"); schedule_on_each_cpu(test_work); kfree(test_malloc); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; } /* * Do various things that may trigger events. */ static __init void event_test_stuff(void) { struct task_struct *test_thread; test_thread = kthread_run(event_test_thread, NULL, "test-events"); msleep(1); kthread_stop(test_thread); } /* * For every trace event defined, we will test each trace point separately, * and then by groups, and finally all trace points. */ static __init void event_trace_self_tests(void) { struct ftrace_subsystem_dir *dir; struct ftrace_event_file *file; struct ftrace_event_call *call; struct event_subsystem *system; struct trace_array *tr; int ret; tr = top_trace_array(); if (!tr) return; pr_info("Running tests on trace events:\n"); list_for_each_entry(file, &tr->events, list) { call = file->event_call; /* Only test those that have a probe */ if (!call->class || !call->class->probe) continue; /* * Testing syscall events here is pretty useless, but * we still do it if configured. But this is time consuming. * What we really need is a user thread to perform the * syscalls as we test. */ #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS if (call->class->system && strcmp(call->class->system, "syscalls") == 0) continue; #endif pr_info("Testing event %s: ", ftrace_event_name(call)); /* * If an event is already enabled, someone is using * it and the self test should not be on. */ if (file->flags & FTRACE_EVENT_FL_ENABLED) { pr_warn("Enabled event during self test!\n"); WARN_ON_ONCE(1); continue; } ftrace_event_enable_disable(file, 1); event_test_stuff(); ftrace_event_enable_disable(file, 0); pr_cont("OK\n"); } /* Now test at the sub system level */ pr_info("Running tests on trace event systems:\n"); list_for_each_entry(dir, &tr->systems, list) { system = dir->subsystem; /* the ftrace system is special, skip it */ if (strcmp(system->name, "ftrace") == 0) continue; pr_info("Testing event system %s: ", system->name); ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); if (WARN_ON_ONCE(ret)) { pr_warn("error enabling system %s\n", system->name); continue; } event_test_stuff(); ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); if (WARN_ON_ONCE(ret)) { pr_warn("error disabling system %s\n", system->name); continue; } pr_cont("OK\n"); } /* Test with all events enabled */ pr_info("Running tests on all trace events:\n"); pr_info("Testing all events: "); ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); if (WARN_ON_ONCE(ret)) { pr_warn("error enabling all events\n"); return; } event_test_stuff(); /* reset sysname */ ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); if (WARN_ON_ONCE(ret)) { pr_warn("error disabling all events\n"); return; } pr_cont("OK\n"); } #ifdef CONFIG_FUNCTION_TRACER static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static void function_test_events_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct ftrace_entry *entry; unsigned long flags; long disabled; int cpu; int pc; pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); if (disabled != 1) goto out; local_save_flags(flags); event = trace_current_buffer_lock_reserve(&buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; trace_buffer_unlock_commit(buffer, event, flags, pc); out: atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); preempt_enable_notrace(); } static struct ftrace_ops trace_ops __initdata = { .func = function_test_events_call, .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static __init void event_trace_self_test_with_function(void) { int ret; ret = register_ftrace_function(&trace_ops); if (WARN_ON(ret < 0)) { pr_info("Failed to enable function tracer for event tests\n"); return; } pr_info("Running tests again, along with the function tracer\n"); event_trace_self_tests(); unregister_ftrace_function(&trace_ops); } #else static __init void event_trace_self_test_with_function(void) { } #endif static __init int event_trace_self_tests_init(void) { if (!tracing_selftest_disabled) { event_trace_self_tests(); event_trace_self_test_with_function(); } return 0; } late_initcall(event_trace_self_tests_init); #endif
gpl-2.0
ignacio28/android_kernel_lge_msm8610-2
arch/arm/mach-msm/lge/board-8610-w5-open-eu-gpiomux.c
78
14897
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/ioport.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include <mach/board_lge.h> static struct gpiomux_setting gpio_i2c_config = { .func = GPIOMUX_FUNC_3, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; /* GPIO I2C */ static struct gpiomux_setting gpio_common_i2c_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gpio_cam_i2c_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting wcnss_5wire_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting wcnss_5wire_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting lcd_en_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting lcd_en_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting lcd_te_act_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting lcd_te_sus_config = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, .dir = GPIOMUX_IN, }; static struct gpiomux_setting gpio_keys_active = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting gpio_keys_suspend = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; /* define gpio used as interrupt input */ static struct gpiomux_setting gpio_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting gpio_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config msm_gpio_int_configs[] __initdata = { { .gpio = 84, .settings = { [GPIOMUX_ACTIVE] = &gpio_int_act_cfg, [GPIOMUX_SUSPENDED] = &gpio_int_sus_cfg, }, }, }; static struct msm_gpiomux_config msm_lcd_configs[] __initdata = { { .gpio = 41, .settings = { [GPIOMUX_ACTIVE] = &lcd_en_act_cfg, [GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg, }, }, { .gpio = 12, .settings = { [GPIOMUX_ACTIVE] = &lcd_te_act_config, [GPIOMUX_SUSPENDED] = &lcd_te_sus_config, }, }, }; static struct msm_gpiomux_config msm_blsp_configs[] __initdata = { { .gpio = 2, /* BLSP1 QUP1 I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &gpio_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 3, /* BLSP1 QUP1 I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &gpio_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 4, /* GPIO I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &gpio_common_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_common_i2c_config, }, }, { .gpio = 5, /* GPIO I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &gpio_common_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_common_i2c_config, }, }, { .gpio = 10, /* BLSP1 QUP3 I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &gpio_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 11, /* BLSP1 QUP3 I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &gpio_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_i2c_config, }, }, { .gpio = 16, /* BLSP1 QUP6 I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &gpio_cam_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_cam_i2c_config, }, }, { .gpio = 17, /* BLSP1 QUP6 I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &gpio_cam_i2c_config, [GPIOMUX_SUSPENDED] = &gpio_cam_i2c_config, }, }, }; static struct msm_gpiomux_config wcnss_5wire_interface[] = { { .gpio = 23, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 24, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 25, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 26, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 27, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, }; static struct gpiomux_setting gpio_suspend_config[] = { { .func = GPIOMUX_FUNC_GPIO, /* IN-NP */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /* O-LOW */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }, }; static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_1, /*active 1*/ /* 0 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_1, /*suspend*/ /* 1 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*i2c suspend*/ /* 2 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_GPIO, /*active 0*/ /* 3 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*suspend 0*/ /* 4 */ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, }; /* */ static struct msm_gpiomux_config msm_sensor_configs[] __initdata = { { .gpio = 13, /* CAM_MCLK0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 14, /* CAM_MCLK1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 15, /* VT_CAM_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 16, /* CAM_I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 17, /* CAM_I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 18, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 20, /* CAM1_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 21, /* CAM1_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 91, /* VANA_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, }; static struct msm_gpiomux_config msm_sensor_configs_rev_b[] __initdata = { { .gpio = 13, /* CAM_MCLK0 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 14, /* CAM_MCLK1 */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &cam_settings[1], }, }, { .gpio = 15, /* VT_CAM_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 16, /* CAM_I2C_SDA */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 17, /* CAM_I2C_SCL */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[0], }, }, { .gpio = 18, /* FLASH_LED_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[0], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 20, /* CAM1_STANDBY_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 21, /* CAM1_RESET_N */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, { .gpio = 85, /* VANA_LDO_EN */ .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &gpio_suspend_config[1], }, }, }; static struct msm_gpiomux_config msm_keypad_configs[] __initdata = { { .gpio = 72, /* Volume UP */ .settings = { [GPIOMUX_ACTIVE] = &gpio_keys_active, [GPIOMUX_SUSPENDED] = &gpio_keys_suspend, }, }, { .gpio = 73, /* Volume Down */ .settings = { [GPIOMUX_ACTIVE] = &gpio_keys_active, [GPIOMUX_SUSPENDED] = &gpio_keys_suspend, }, }, { .gpio = 74, /* Home */ .settings = { [GPIOMUX_ACTIVE] = &gpio_keys_active, [GPIOMUX_SUSPENDED] = &gpio_keys_suspend, }, }, { .gpio = 75, /* Q-Memo */ .settings = { [GPIOMUX_ACTIVE] = &gpio_keys_active, [GPIOMUX_SUSPENDED] = &gpio_keys_suspend, }, }, }; static struct gpiomux_setting sd_card_det_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; #ifdef CONFIG_MACH_LGE static struct gpiomux_setting sd_card_det_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; #else static struct gpiomux_setting sd_card_det_suspend_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; #endif static struct msm_gpiomux_config sd_card_det[] __initdata = { { .gpio = 42, .settings = { [GPIOMUX_ACTIVE] = &sd_card_det_active_config, [GPIOMUX_SUSPENDED] = &sd_card_det_suspend_config, }, }, }; static struct gpiomux_setting mms100s_ts_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting mms100s_ts_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting mms100s_ts_ldo_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct gpiomux_setting mms100s_ts_ldo_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct msm_gpiomux_config mms100s_ts_configs[] __initdata = { { .gpio = 0, /* MMS100s INT */ .settings = { [GPIOMUX_ACTIVE] = &mms100s_ts_int_act_cfg, [GPIOMUX_SUSPENDED] = &mms100s_ts_int_sus_cfg, }, }, { .gpio = 62, /* GPIO LDO */ .settings = { [GPIOMUX_ACTIVE] = &mms100s_ts_ldo_act_cfg, [GPIOMUX_SUSPENDED] = &mms100s_ts_ldo_sus_cfg, }, }, }; static struct gpiomux_setting aps_ts_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting aps_ts_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_IN, }; static struct gpiomux_setting aps_ts_reset_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting aps_ts_reset_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct msm_gpiomux_config aps_ts_configs[] __initdata = { { .gpio = 0, /* APS_TS RESET */ .settings = { [GPIOMUX_ACTIVE] = &aps_ts_reset_act_cfg, [GPIOMUX_SUSPENDED] = &aps_ts_reset_sus_cfg, }, }, { .gpio = 1, /* APS_TS INT */ .settings = { [GPIOMUX_ACTIVE] = &aps_ts_int_act_cfg, [GPIOMUX_SUSPENDED] = &aps_ts_int_sus_cfg, }, }, }; static struct gpiomux_setting ags04_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting ags04_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct msm_gpiomux_config ags04_ts_configs[] __initdata = { { .gpio = 82, .settings = { [GPIOMUX_ACTIVE] = &ags04_int_act_cfg, [GPIOMUX_SUSPENDED] = &ags04_int_sus_cfg, }, }, }; void __init msm8610_init_gpiomux(void) { int rc; hw_rev_type revision = lge_get_board_revno(); rc = msm_gpiomux_init_dt(); if (rc) { pr_err("%s failed %d\n", __func__, rc); return; } msm_gpiomux_install(msm_blsp_configs, ARRAY_SIZE(msm_blsp_configs)); if (revision > HW_REV_A) { msm_gpiomux_install(mms100s_ts_configs, ARRAY_SIZE(mms100s_ts_configs)); } else { msm_gpiomux_install(aps_ts_configs, ARRAY_SIZE(aps_ts_configs)); msm_gpiomux_install(ags04_ts_configs, ARRAY_SIZE(ags04_ts_configs)); } msm_gpiomux_install(wcnss_5wire_interface, ARRAY_SIZE(wcnss_5wire_interface)); msm_gpiomux_install_nowrite(msm_lcd_configs, ARRAY_SIZE(msm_lcd_configs)); msm_gpiomux_install(msm_keypad_configs, ARRAY_SIZE(msm_keypad_configs)); msm_gpiomux_install(sd_card_det, ARRAY_SIZE(sd_card_det)); /* */ if(revision == HW_REV_0) { msm_gpiomux_install(msm_sensor_configs, ARRAY_SIZE(msm_sensor_configs)); printk(KERN_ERR " [Camera] below HW_REV_0 is using power source from PM\n"); } else if(revision == HW_REV_A) { msm_gpiomux_install(msm_sensor_configs, ARRAY_SIZE(msm_sensor_configs)); printk(KERN_ERR " [Camera] greater than HW_REV_A is using power source from Ex-LDO used GPIO\n"); } else { msm_gpiomux_install(msm_sensor_configs_rev_b, ARRAY_SIZE(msm_sensor_configs_rev_b)); printk(KERN_ERR " [Camera] In greater than HW_REV_B, MAIN_CAM0_RESET_N has been changed from GPIO_98 to GPIO_114\n"); } /* */ msm_gpiomux_install(msm_gpio_int_configs, ARRAY_SIZE(msm_gpio_int_configs)); }
gpl-2.0
alexey6600/kernel_sony_tetra_2
drivers/net/virtio_net.c
78
44166
/* A network driver using virtio. * * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/cpu.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* Chain pages by the private ptr. */ struct page *pages; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of this receive queue: input.$index */ char name[40]; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Has control virtqueue */ bool has_cvq; /* enable config space updates */ bool config_enable; /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Work struct for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* Per-cpu variable to show the mapping from CPU to virtqueue */ int __percpu *vq_index; /* CPU hot plug notifier */ struct notifier_block nb; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int size = min((unsigned)PAGE_SIZE - offset, *len); int i = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, i, page, offset, size); skb->data_len += size; skb->len += size; skb->truesize += PAGE_SIZE; skb_shinfo(skb)->nr_frags++; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; *len -= size; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct receive_queue *rq, struct page *page, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); return skb; } static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); struct page *page; int num_buf, i, len; num_buf = hdr->mhdr.num_buffers; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; return -EINVAL; } page = virtqueue_get_buf(rq->vq, &len); if (!page) { pr_debug("%s: rx error: %d buffers missing\n", skb->dev->name, hdr->mhdr.num_buffers); skb->dev->stats.rx_length_errors++; return -EINVAL; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --rq->num; } return 0; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct page *page; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(rq, buf); else dev_kfree_skb(buf); return; } if (!vi->mergeable_rx_bufs && !vi->big_packets) { skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); } else { page = buf; skb = page_to_skb(rq, page, len); if (unlikely(!skb)) { dev->stats.rx_dropped++; give_pages(rq, page); return; } if (vi->mergeable_rx_bufs) if (receive_mergeable(rq, skb)) { dev_kfree_skb(skb); return; } } hdr = skb_vnet_hdr(skb); u64_stats_update_begin(&stats->rx_syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: net_warn_ratelimited("%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) { struct page *page; int err; page = get_a_page(rq, gfp); if (!page) return -ENOMEM; sg_init_one(rq->sg, page_address(page), PAGE_SIZE); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(rq, gfp); else err = add_recvbuf_small(rq, gfp); oom = err == -ENOMEM; if (err) break; ++rq->num; } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; virtqueue_kick(rq->vq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&rq->napi); } } static void virtnet_napi_enable(struct receive_queue *rq) { napi_enable(&rq->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rq->vq); local_bh_disable(); __napi_schedule(&rq->napi); local_bh_enable(); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; void *buf; unsigned int r, len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(rq, buf, len); --rq->num; received++; } if (rq->num < rq->max / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); napi_complete(napi); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); __napi_schedule(napi); goto again; } } return received; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } return 0; } static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); u64_stats_update_begin(&stats->tx_syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->tx_syncp); dev_kfree_skb_any(skb); } } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } hdr->mhdr.num_buffers = 0; /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); else sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } return NETDEV_TX_OK; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out, struct scatterlist *in) { struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; /* Add header */ sg_init_one(&hdr, &ctrl, sizeof(ctrl)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; if (in) sgs[out_num + in_num++] = in; /* Add return status. */ sg_init_one(&stat, &status, sizeof(status)); sgs[out_num + in_num++] = &stat; BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) < 0); virtqueue_kick(vi->cvq); /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp)) cpu_relax(); return status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr = p; struct scatterlist sg; ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), addr->sa_data, dev->addr_len); } eth_commit_mac_addr_change(dev, p); return 0; } static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->curr_queue_pairs; i++) napi_schedule(&vi->rq[i].napi); } #endif static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; s.virtqueue_pairs = queue_pairs; sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); return 0; } static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); return 0; } static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); return 0; } static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) { int i; int cpu; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, -1); virtqueue_set_affinity(vi->sq[i].vq, -1); } vi->affinity_hint_set = false; } i = 0; for_each_online_cpu(cpu) { if (cpu == hcpu) { *per_cpu_ptr(vi->vq_index, cpu) = -1; } else { *per_cpu_ptr(vi->vq_index, cpu) = ++i % vi->curr_queue_pairs; } } } static void virtnet_set_affinity(struct virtnet_info *vi) { int i; int cpu; /* In multiqueue mode, when the number of cpu is equal to the number of * queue pairs, we let the queue pairs to be private to one cpu by * setting the affinity hint to eliminate the contention. */ if (vi->curr_queue_pairs == 1 || vi->max_queue_pairs != num_online_cpus()) { virtnet_clean_affinity(vi, -1); return; } i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); *per_cpu_ptr(vi->vq_index, cpu) = i; i++; } vi->affinity_hint_set = true; } static int virtnet_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); switch(action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_DEAD: virtnet_set_affinity(vi); break; case CPU_DOWN_PREPARE: virtnet_clean_affinity(vi, (long)hcpu); break; default: break; } return NOTIFY_OK; } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs) return -EINVAL; get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); return err; } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } /* To avoid contending a lock hold by a vcpu who would exit to host, select the * txq based on the processor id. */ static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) { int txq; struct virtnet_info *vi = netdev_priv(dev); if (skb_rx_queue_recorded(skb)) { txq = skb_get_rx_queue(skb); } else { txq = *__this_cpu_ptr(vi->vq_index); if (txq == -1) txq = 0; } while (unlikely(txq >= dev->real_num_tx_queues)) txq -= dev->real_num_tx_queues; return txq; } static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, .ndo_get_stats64 = virtnet_stats, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, .ndo_select_queue = virtnet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; mutex_lock(&vi->config_lock); if (!vi->config_enable) goto done; if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { netdev_notify_peers(vi->dev); virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) goto done; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } done: mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { kfree(vi->rq); kfree(vi->sq); } static void free_receive_bufs(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); } } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) dev_kfree_skb(buf); } for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; } BUG_ON(vi->rq[i].num != 0); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi, -1); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, names); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); return 0; err_find: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } return 0; err_rq: kfree(vi->sq); err_sq: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; get_online_cpus(); virtnet_set_affinity(vi); put_online_cpus(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } static int virtnet_probe(struct virtio_device *vdev) { int i, err; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; /* Find if host supports multiqueue virtio_net device */ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, offsetof(struct virtio_net_config, max_virtqueue_pairs), &max_queue_pairs); /* We need at least 2 queue's */ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ } dev->vlan_features = dev->features; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len) < 0) eth_hw_addr_random(dev); /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; vi->vq_index = alloc_percpu(int); if (vi->vq_index == NULL) goto free_stats; mutex_init(&vi->config_lock); vi->config_enable = true; INIT_WORK(&vi->config_work, virtnet_config_changed_work); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free_index; netif_set_real_num_tx_queues(dev, 1); netif_set_real_num_rx_queues(dev, 1); err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].num == 0) { free_unused_bufs(vi); err = -ENOMEM; goto free_recv_bufs; } } vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_recv_bufs; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_recv_bufs: free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); virtnet_del_vqs(vi); free_index: free_percpu(vi->vq_index); free_stats: free_percpu(vi->stats); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device. */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); unregister_netdev(vi->dev); remove_vq_common(vi); flush_work(&vi->config_work); free_percpu(vi->vq_index); free_percpu(vi->stats); free_netdev(vi->dev); } #ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int i; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); } remove_vq_common(vi); flush_work(&vi->config_work); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err, i; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); netif_device_attach(vi->dev); for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); err = register_hotcpu_notifier(&vi->nb); if (err) return err; return 0; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; module_virtio_driver(virtio_net_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL");
gpl-2.0
ballock/kernel-xenial-bbr
drivers/media/dvb-core/dvb_ca_en50221.c
334
46231
/* * dvb_ca.c: generic DVB functions for EN50221 CAM interfaces * * Copyright (C) 2004 Andrew de Quincey * * Parts of this file were based on sources as follows: * * Copyright (C) 2003 Ralph Metzler <rjkm@metzlerbros.de> * * based on code: * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/kthread.h> #include "dvb_ca_en50221.h" #include "dvb_ringbuffer.h" static int dvb_ca_en50221_debug; module_param_named(cam_debug, dvb_ca_en50221_debug, int, 0644); MODULE_PARM_DESC(cam_debug, "enable verbose debug messages"); #define dprintk if (dvb_ca_en50221_debug) printk #define INIT_TIMEOUT_SECS 10 #define HOST_LINK_BUF_SIZE 0x200 #define RX_BUFFER_SIZE 65535 #define MAX_RX_PACKETS_PER_ITERATION 10 #define CTRLIF_DATA 0 #define CTRLIF_COMMAND 1 #define CTRLIF_STATUS 1 #define CTRLIF_SIZE_LOW 2 #define CTRLIF_SIZE_HIGH 3 #define CMDREG_HC 1 /* Host control */ #define CMDREG_SW 2 /* Size write */ #define CMDREG_SR 4 /* Size read */ #define CMDREG_RS 8 /* Reset interface */ #define CMDREG_FRIE 0x40 /* Enable FR interrupt */ #define CMDREG_DAIE 0x80 /* Enable DA interrupt */ #define IRQEN (CMDREG_DAIE) #define STATUSREG_RE 1 /* read error */ #define STATUSREG_WE 2 /* write error */ #define STATUSREG_FR 0x40 /* module free */ #define STATUSREG_DA 0x80 /* data available */ #define STATUSREG_TXERR (STATUSREG_RE|STATUSREG_WE) /* general transfer error */ #define DVB_CA_SLOTSTATE_NONE 0 #define DVB_CA_SLOTSTATE_UNINITIALISED 1 #define DVB_CA_SLOTSTATE_RUNNING 2 #define DVB_CA_SLOTSTATE_INVALID 3 #define DVB_CA_SLOTSTATE_WAITREADY 4 #define DVB_CA_SLOTSTATE_VALIDATE 5 #define DVB_CA_SLOTSTATE_WAITFR 6 #define DVB_CA_SLOTSTATE_LINKINIT 7 /* Information on a CA slot */ struct dvb_ca_slot { /* current state of the CAM */ int slot_state; /* mutex used for serializing access to one CI slot */ struct mutex slot_lock; /* Number of CAMCHANGES that have occurred since last processing */ atomic_t camchange_count; /* Type of last CAMCHANGE */ int camchange_type; /* base address of CAM config */ u32 config_base; /* value to write into Config Control register */ u8 config_option; /* if 1, the CAM supports DA IRQs */ u8 da_irq_supported:1; /* size of the buffer to use when talking to the CAM */ int link_buf_size; /* buffer for incoming packets */ struct dvb_ringbuffer rx_buffer; /* timer used during various states of the slot */ unsigned long timeout; }; /* Private CA-interface information */ struct dvb_ca_private { /* pointer back to the public data structure */ struct dvb_ca_en50221 *pub; /* the DVB device */ struct dvb_device *dvbdev; /* Flags describing the interface (DVB_CA_FLAG_*) */ u32 flags; /* number of slots supported by this CA interface */ unsigned int slot_count; /* information on each slot */ struct dvb_ca_slot *slot_info; /* wait queues for read() and write() operations */ wait_queue_head_t wait_queue; /* PID of the monitoring thread */ struct task_struct *thread; /* Flag indicating if the CA device is open */ unsigned int open:1; /* Flag indicating the thread should wake up now */ unsigned int wakeup:1; /* Delay the main thread should use */ unsigned long delay; /* Slot to start looking for data to read from in the next user-space read operation */ int next_read_slot; /* mutex serializing ioctls */ struct mutex ioctl_mutex; }; static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca); static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount); static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount); /** * Safely find needle in haystack. * * @haystack: Buffer to look in. * @hlen: Number of bytes in haystack. * @needle: Buffer to find. * @nlen: Number of bytes in needle. * @return Pointer into haystack needle was found at, or NULL if not found. */ static char *findstr(char * haystack, int hlen, char * needle, int nlen) { int i; if (hlen < nlen) return NULL; for (i = 0; i <= hlen - nlen; i++) { if (!strncmp(haystack + i, needle, nlen)) return haystack + i; } return NULL; } /* ******************************************************************************** */ /* EN50221 physical interface functions */ /** * dvb_ca_en50221_check_camstatus - Check CAM status. */ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) { int slot_status; int cam_present_now; int cam_changed; /* IRQ mode */ if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) { return (atomic_read(&ca->slot_info[slot].camchange_count) != 0); } /* poll mode */ slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open); cam_present_now = (slot_status & DVB_CA_EN50221_POLL_CAM_PRESENT) ? 1 : 0; cam_changed = (slot_status & DVB_CA_EN50221_POLL_CAM_CHANGED) ? 1 : 0; if (!cam_changed) { int cam_present_old = (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE); cam_changed = (cam_present_now != cam_present_old); } if (cam_changed) { if (!cam_present_now) { ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; } else { ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED; } atomic_set(&ca->slot_info[slot].camchange_count, 1); } else { if ((ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) && (slot_status & DVB_CA_EN50221_POLL_CAM_READY)) { // move to validate state if reset is completed ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; } } return cam_changed; } /** * dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS * register on a CAM interface, checking for errors and timeout. * * @ca: CA instance. * @slot: Slot on interface. * @waitfor: Flags to wait for. * @timeout_ms: Timeout in milliseconds. * * @return 0 on success, nonzero on error. */ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, u8 waitfor, int timeout_hz) { unsigned long timeout; unsigned long start; dprintk("%s\n", __func__); /* loop until timeout elapsed */ start = jiffies; timeout = jiffies + timeout_hz; while (1) { /* read the status and check for error */ int res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); if (res < 0) return -EIO; /* if we got the flags, it was successful! */ if (res & waitfor) { dprintk("%s succeeded timeout:%lu\n", __func__, jiffies - start); return 0; } /* check for timeout */ if (time_after(jiffies, timeout)) { break; } /* wait for a bit */ msleep(1); } dprintk("%s failed timeout:%lu\n", __func__, jiffies - start); /* if we get here, we've timed out */ return -ETIMEDOUT; } /** * dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM. * * @ca: CA instance. * @slot: Slot id. * * @return 0 on success, nonzero on failure. */ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) { int ret; int buf_size; u8 buf[2]; dprintk("%s\n", __func__); /* we'll be determining these during this function */ ca->slot_info[slot].da_irq_supported = 0; /* set the host link buffer size temporarily. it will be overwritten with the * real negotiated size later. */ ca->slot_info[slot].link_buf_size = 2; /* read the buffer size from the CAM */ if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) return ret; if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) return ret; if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) return -EIO; if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) return ret; /* store it, and choose the minimum of our buffer and the CAM's buffer size */ buf_size = (buf[0] << 8) | buf[1]; if (buf_size > HOST_LINK_BUF_SIZE) buf_size = HOST_LINK_BUF_SIZE; ca->slot_info[slot].link_buf_size = buf_size; buf[0] = buf_size >> 8; buf[1] = buf_size & 0xff; dprintk("Chosen link buffer size of %i\n", buf_size); /* write the buffer size to the CAM */ if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SW)) != 0) return ret; if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10)) != 0) return ret; if ((ret = dvb_ca_en50221_write_data(ca, slot, buf, 2)) != 2) return -EIO; if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0) return ret; /* success */ return 0; } /** * dvb_ca_en50221_read_tuple - Read a tuple from attribute memory. * * @ca: CA instance. * @slot: Slot id. * @address: Address to read from. Updated. * @tupleType: Tuple id byte. Updated. * @tupleLength: Tuple length. Updated. * @tuple: Dest buffer for tuple (must be 256 bytes). Updated. * * @return 0 on success, nonzero on error. */ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot, int *address, int *tupleType, int *tupleLength, u8 * tuple) { int i; int _tupleType; int _tupleLength; int _address = *address; /* grab the next tuple length and type */ if ((_tupleType = ca->pub->read_attribute_mem(ca->pub, slot, _address)) < 0) return _tupleType; if (_tupleType == 0xff) { dprintk("END OF CHAIN TUPLE type:0x%x\n", _tupleType); *address += 2; *tupleType = _tupleType; *tupleLength = 0; return 0; } if ((_tupleLength = ca->pub->read_attribute_mem(ca->pub, slot, _address + 2)) < 0) return _tupleLength; _address += 4; dprintk("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength); /* read in the whole tuple */ for (i = 0; i < _tupleLength; i++) { tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot, _address + (i * 2)); dprintk(" 0x%02x: 0x%02x %c\n", i, tuple[i] & 0xff, ((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.'); } _address += (_tupleLength * 2); // success *tupleType = _tupleType; *tupleLength = _tupleLength; *address = _address; return 0; } /** * dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module, * extracting Config register, and checking it is a DVB CAM module. * * @ca: CA instance. * @slot: Slot id. * * @return 0 on success, <0 on failure. */ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) { int address = 0; int tupleLength; int tupleType; u8 tuple[257]; char *dvb_str; int rasz; int status; int got_cftableentry = 0; int end_chain = 0; int i; u16 manfid = 0; u16 devid = 0; // CISTPL_DEVICE_0A if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; if (tupleType != 0x1D) return -EINVAL; // CISTPL_DEVICE_0C if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; if (tupleType != 0x1C) return -EINVAL; // CISTPL_VERS_1 if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; if (tupleType != 0x15) return -EINVAL; // CISTPL_MANFID if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; if (tupleType != 0x20) return -EINVAL; if (tupleLength != 4) return -EINVAL; manfid = (tuple[1] << 8) | tuple[0]; devid = (tuple[3] << 8) | tuple[2]; // CISTPL_CONFIG if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; if (tupleType != 0x1A) return -EINVAL; if (tupleLength < 3) return -EINVAL; /* extract the configbase */ rasz = tuple[0] & 3; if (tupleLength < (3 + rasz + 14)) return -EINVAL; ca->slot_info[slot].config_base = 0; for (i = 0; i < rasz + 1; i++) { ca->slot_info[slot].config_base |= (tuple[2 + i] << (8 * i)); } /* check it contains the correct DVB string */ dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8); if (dvb_str == NULL) return -EINVAL; if (tupleLength < ((dvb_str - (char *) tuple) + 12)) return -EINVAL; /* is it a version we support? */ if (strncmp(dvb_str + 8, "1.00", 4)) { printk("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n", ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]); return -EINVAL; } /* process the CFTABLE_ENTRY tuples, and any after those */ while ((!end_chain) && (address < 0x1000)) { if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0) return status; switch (tupleType) { case 0x1B: // CISTPL_CFTABLE_ENTRY if (tupleLength < (2 + 11 + 17)) break; /* if we've already parsed one, just use it */ if (got_cftableentry) break; /* get the config option */ ca->slot_info[slot].config_option = tuple[0] & 0x3f; /* OK, check it contains the correct strings */ if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) || (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL)) break; got_cftableentry = 1; break; case 0x14: // CISTPL_NO_LINK break; case 0xFF: // CISTPL_END end_chain = 1; break; default: /* Unknown tuple type - just skip this tuple and move to the next one */ dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", tupleType, tupleLength); break; } } if ((address > 0x1000) || (!got_cftableentry)) return -EINVAL; dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n", manfid, devid, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option); // success! return 0; } /** * dvb_ca_en50221_set_configoption - Set CAM's configoption correctly. * * @ca: CA instance. * @slot: Slot containing the CAM. */ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot) { int configoption; dprintk("%s\n", __func__); /* set the config option */ ca->pub->write_attribute_mem(ca->pub, slot, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option); /* check it */ configoption = ca->pub->read_attribute_mem(ca->pub, slot, ca->slot_info[slot].config_base); dprintk("Set configoption 0x%x, read configoption 0x%x\n", ca->slot_info[slot].config_option, configoption & 0x3f); /* fine! */ return 0; } /** * dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control * interface. It reads a buffer of data from the CAM. The data can either * be stored in a supplied buffer, or automatically be added to the slot's * rx_buffer. * * @ca: CA instance. * @slot: Slot to read from. * @ebuf: If non-NULL, the data will be written to this buffer. If NULL, * the data will be added into the buffering system as a normal fragment. * @ecount: Size of ebuf. Ignored if ebuf is NULL. * * @return Number of bytes read, or < 0 on error */ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount) { int bytes_read; int status; u8 buf[HOST_LINK_BUF_SIZE]; int i; dprintk("%s\n", __func__); /* check if we have space for a link buf in the rx_buffer */ if (ebuf == NULL) { int buf_free; if (ca->slot_info[slot].rx_buffer.data == NULL) { status = -EIO; goto exit; } buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { status = -EAGAIN; goto exit; } } /* check if there is data available */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) goto exit; if (!(status & STATUSREG_DA)) { /* no data */ status = 0; goto exit; } /* read the amount of data */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) goto exit; bytes_read = status << 8; if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0) goto exit; bytes_read |= status; /* check it will fit */ if (ebuf == NULL) { if (bytes_read > ca->slot_info[slot].link_buf_size) { printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", ca->dvbdev->adapter->num, bytes_read, ca->slot_info[slot].link_buf_size); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } if (bytes_read < 2) { printk("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } } else { if (bytes_read > ecount) { printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", ca->dvbdev->adapter->num); status = -EIO; goto exit; } } /* fill the buffer */ for (i = 0; i < bytes_read; i++) { /* read byte and check */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) goto exit; /* OK, store it in the buffer */ buf[i] = status; } /* check for read error (RE should now be 0) */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) goto exit; if (status & STATUSREG_RE) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } /* OK, add it to the receive buffer, or copy into external buffer if supplied */ if (ebuf == NULL) { if (ca->slot_info[slot].rx_buffer.data == NULL) { status = -EIO; goto exit; } dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, buf, bytes_read); } else { memcpy(ebuf, buf, bytes_read); } dprintk("Received CA packet for slot %i connection id 0x%x last_frag:%i size:0x%x\n", slot, buf[0], (buf[1] & 0x80) == 0, bytes_read); /* wake up readers when a last_fragment is received */ if ((buf[1] & 0x80) == 0x00) { wake_up_interruptible(&ca->wait_queue); } status = bytes_read; exit: return status; } /** * dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control * interface. It writes a buffer of data to a CAM. * * @ca: CA instance. * @slot: Slot to write to. * @ebuf: The data in this buffer is treated as a complete link-level packet to * be written. * @count: Size of ebuf. * * @return Number of bytes written, or < 0 on error. */ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * buf, int bytes_write) { int status; int i; dprintk("%s\n", __func__); /* sanity check */ if (bytes_write > ca->slot_info[slot].link_buf_size) return -EINVAL; /* it is possible we are dealing with a single buffer implementation, thus if there is data available for read or if there is even a read already in progress, we do nothing but awake the kernel thread to process the data if necessary. */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) goto exitnowrite; if (status & (STATUSREG_DA | STATUSREG_RE)) { if (status & STATUSREG_DA) dvb_ca_en50221_thread_wakeup(ca); status = -EAGAIN; goto exitnowrite; } /* OK, set HC bit */ if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_HC)) != 0) goto exit; /* check if interface is still free */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) goto exit; if (!(status & STATUSREG_FR)) { /* it wasn't free => try again later */ status = -EAGAIN; goto exit; } /* send the amount of data */ if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) goto exit; if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW, bytes_write & 0xff)) != 0) goto exit; /* send the buffer */ for (i = 0; i < bytes_write; i++) { if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA, buf[i])) != 0) goto exit; } /* check for write error (WE should now be 0) */ if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) goto exit; if (status & STATUSREG_WE) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; status = -EIO; goto exit; } status = bytes_write; dprintk("Wrote CA packet for slot %i, connection id 0x%x last_frag:%i size:0x%x\n", slot, buf[0], (buf[1] & 0x80) == 0, bytes_write); exit: ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); exitnowrite: return status; } EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq); /* ******************************************************************************** */ /* EN50221 higher level functions */ /** * dvb_ca_en50221_camready_irq - A CAM has been removed => shut it down. * * @ca: CA instance. * @slot: Slot to shut down. */ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot) { dprintk("%s\n", __func__); ca->pub->slot_shutdown(ca->pub, slot); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; /* need to wake up all processes to check if they're now trying to write to a defunct CAM */ wake_up_interruptible(&ca->wait_queue); dprintk("Slot %i shutdown\n", slot); /* success */ return 0; } EXPORT_SYMBOL(dvb_ca_en50221_camready_irq); /** * dvb_ca_en50221_camready_irq - A CAMCHANGE IRQ has occurred. * * @ca: CA instance. * @slot: Slot concerned. * @change_type: One of the DVB_CA_CAMCHANGE_* values. */ void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int change_type) { struct dvb_ca_private *ca = pubca->private; dprintk("CAMCHANGE IRQ slot:%i change_type:%i\n", slot, change_type); switch (change_type) { case DVB_CA_EN50221_CAMCHANGE_REMOVED: case DVB_CA_EN50221_CAMCHANGE_INSERTED: break; default: return; } ca->slot_info[slot].camchange_type = change_type; atomic_inc(&ca->slot_info[slot].camchange_count); dvb_ca_en50221_thread_wakeup(ca); } EXPORT_SYMBOL(dvb_ca_en50221_frda_irq); /** * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. * * @ca: CA instance. * @slot: Slot concerned. */ void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot) { struct dvb_ca_private *ca = pubca->private; dprintk("CAMREADY IRQ slot:%i\n", slot); if (ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE; dvb_ca_en50221_thread_wakeup(ca); } } /** * An FR or DA IRQ has occurred. * * @ca: CA instance. * @slot: Slot concerned. */ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot) { struct dvb_ca_private *ca = pubca->private; int flags; dprintk("FR/DA IRQ slot:%i\n", slot); switch (ca->slot_info[slot].slot_state) { case DVB_CA_SLOTSTATE_LINKINIT: flags = ca->pub->read_cam_control(pubca, slot, CTRLIF_STATUS); if (flags & STATUSREG_DA) { dprintk("CAM supports DA IRQ\n"); ca->slot_info[slot].da_irq_supported = 1; } break; case DVB_CA_SLOTSTATE_RUNNING: if (ca->open) dvb_ca_en50221_thread_wakeup(ca); break; } } /* ******************************************************************************** */ /* EN50221 thread functions */ /** * Wake up the DVB CA thread * * @ca: CA instance. */ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca) { dprintk("%s\n", __func__); ca->wakeup = 1; mb(); wake_up_process(ca->thread); } /** * Update the delay used by the thread. * * @ca: CA instance. */ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca) { int delay; int curdelay = 100000000; int slot; /* Beware of too high polling frequency, because one polling * call might take several hundred milliseconds until timeout! */ for (slot = 0; slot < ca->slot_count; slot++) { switch (ca->slot_info[slot].slot_state) { default: case DVB_CA_SLOTSTATE_NONE: delay = HZ * 60; /* 60s */ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) delay = HZ * 5; /* 5s */ break; case DVB_CA_SLOTSTATE_INVALID: delay = HZ * 60; /* 60s */ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) delay = HZ / 10; /* 100ms */ break; case DVB_CA_SLOTSTATE_UNINITIALISED: case DVB_CA_SLOTSTATE_WAITREADY: case DVB_CA_SLOTSTATE_VALIDATE: case DVB_CA_SLOTSTATE_WAITFR: case DVB_CA_SLOTSTATE_LINKINIT: delay = HZ / 10; /* 100ms */ break; case DVB_CA_SLOTSTATE_RUNNING: delay = HZ * 60; /* 60s */ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) delay = HZ / 10; /* 100ms */ if (ca->open) { if ((!ca->slot_info[slot].da_irq_supported) || (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA))) delay = HZ / 10; /* 100ms */ } break; } if (delay < curdelay) curdelay = delay; } ca->delay = curdelay; } /** * Kernel thread which monitors CA slots for CAM changes, and performs data transfers. */ static int dvb_ca_en50221_thread(void *data) { struct dvb_ca_private *ca = data; int slot; int flags; int status; int pktcount; void *rxbuf; dprintk("%s\n", __func__); /* choose the correct initial delay */ dvb_ca_en50221_thread_update_delay(ca); /* main loop */ while (!kthread_should_stop()) { /* sleep for a bit */ if (!ca->wakeup) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(ca->delay); if (kthread_should_stop()) return 0; } ca->wakeup = 0; /* go through all the slots processing them */ for (slot = 0; slot < ca->slot_count; slot++) { mutex_lock(&ca->slot_info[slot].slot_lock); // check the cam status + deal with CAMCHANGEs while (dvb_ca_en50221_check_camstatus(ca, slot)) { /* clear down an old CI slot if necessary */ if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) dvb_ca_en50221_slot_shutdown(ca, slot); /* if a CAM is NOW present, initialise it */ if (ca->slot_info[slot].camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_UNINITIALISED; } /* we've handled one CAMCHANGE */ dvb_ca_en50221_thread_update_delay(ca); atomic_dec(&ca->slot_info[slot].camchange_count); } // CAM state machine switch (ca->slot_info[slot].slot_state) { case DVB_CA_SLOTSTATE_NONE: case DVB_CA_SLOTSTATE_INVALID: // no action needed break; case DVB_CA_SLOTSTATE_UNINITIALISED: ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITREADY; ca->pub->slot_reset(ca->pub, slot); ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); break; case DVB_CA_SLOTSTATE_WAITREADY: if (time_after(jiffies, ca->slot_info[slot].timeout)) { printk("dvb_ca adaptor %d: PC card did not respond :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } // no other action needed; will automatically change state when ready break; case DVB_CA_SLOTSTATE_VALIDATE: if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) { /* we need this extra check for annoying interfaces like the budget-av */ if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && (ca->pub->poll_slot_status)) { status = ca->pub->poll_slot_status(ca->pub, slot, 0); if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; dvb_ca_en50221_thread_update_delay(ca); break; } } printk("dvb_ca adapter %d: Invalid PC card inserted :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } if (dvb_ca_en50221_set_configoption(ca, slot) != 0) { printk("dvb_ca adapter %d: Unable to initialise CAM :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } if (ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, CMDREG_RS) != 0) { printk("dvb_ca adapter %d: Unable to reset CAM IF\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } dprintk("DVB CAM validated successfully\n"); ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITFR; ca->wakeup = 1; break; case DVB_CA_SLOTSTATE_WAITFR: if (time_after(jiffies, ca->slot_info[slot].timeout)) { printk("dvb_ca adapter %d: DVB CAM did not respond :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); if (flags & STATUSREG_FR) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; ca->wakeup = 1; } break; case DVB_CA_SLOTSTATE_LINKINIT: if (dvb_ca_en50221_link_init(ca, slot) != 0) { /* we need this extra check for annoying interfaces like the budget-av */ if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && (ca->pub->poll_slot_status)) { status = ca->pub->poll_slot_status(ca->pub, slot, 0); if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; dvb_ca_en50221_thread_update_delay(ca); break; } } printk("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } if (ca->slot_info[slot].rx_buffer.data == NULL) { rxbuf = vmalloc(RX_BUFFER_SIZE); if (rxbuf == NULL) { printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; dvb_ca_en50221_thread_update_delay(ca); break; } dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE); } ca->pub->slot_ts_enable(ca->pub, slot); ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING; dvb_ca_en50221_thread_update_delay(ca); printk("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", ca->dvbdev->adapter->num); break; case DVB_CA_SLOTSTATE_RUNNING: if (!ca->open) break; // poll slots for data pktcount = 0; while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) > 0) { if (!ca->open) break; /* if a CAMCHANGE occurred at some point, do not do any more processing of this slot */ if (dvb_ca_en50221_check_camstatus(ca, slot)) { // we dont want to sleep on the next iteration so we can handle the cam change ca->wakeup = 1; break; } /* check if we've hit our limit this time */ if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) { // dont sleep; there is likely to be more data to read ca->wakeup = 1; break; } } break; } mutex_unlock(&ca->slot_info[slot].slot_lock); } } return 0; } /* ******************************************************************************** */ /* EN50221 IO interface functions */ /** * Real ioctl implementation. * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them. * * @inode: Inode concerned. * @file: File concerned. * @cmd: IOCTL command. * @arg: Associated argument. * * @return 0 on success, <0 on error. */ static int dvb_ca_en50221_io_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; int err = 0; int slot; dprintk("%s\n", __func__); if (mutex_lock_interruptible(&ca->ioctl_mutex)) return -ERESTARTSYS; switch (cmd) { case CA_RESET: for (slot = 0; slot < ca->slot_count; slot++) { mutex_lock(&ca->slot_info[slot].slot_lock); if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) { dvb_ca_en50221_slot_shutdown(ca, slot); if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) dvb_ca_en50221_camchange_irq(ca->pub, slot, DVB_CA_EN50221_CAMCHANGE_INSERTED); } mutex_unlock(&ca->slot_info[slot].slot_lock); } ca->next_read_slot = 0; dvb_ca_en50221_thread_wakeup(ca); break; case CA_GET_CAP: { struct ca_caps *caps = parg; caps->slot_num = ca->slot_count; caps->slot_type = CA_CI_LINK; caps->descr_num = 0; caps->descr_type = 0; break; } case CA_GET_SLOT_INFO: { struct ca_slot_info *info = parg; if ((info->num > ca->slot_count) || (info->num < 0)) { err = -EINVAL; goto out_unlock; } info->type = CA_CI_LINK; info->flags = 0; if ((ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_NONE) && (ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_INVALID)) { info->flags = CA_CI_MODULE_PRESENT; } if (ca->slot_info[info->num].slot_state == DVB_CA_SLOTSTATE_RUNNING) { info->flags |= CA_CI_MODULE_READY; } break; } default: err = -EINVAL; break; } out_unlock: mutex_unlock(&ca->ioctl_mutex); return err; } /** * Wrapper for ioctl implementation. * * @inode: Inode concerned. * @file: File concerned. * @cmd: IOCTL command. * @arg: Associated argument. * * @return 0 on success, <0 on error. */ static long dvb_ca_en50221_io_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); } /** * Implementation of write() syscall. * * @file: File structure. * @buf: Source buffer. * @count: Size of source buffer. * @ppos: Position in file (ignored). * * @return Number of bytes read, or <0 on error. */ static ssize_t dvb_ca_en50221_io_write(struct file *file, const char __user * buf, size_t count, loff_t * ppos) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; u8 slot, connection_id; int status; u8 fragbuf[HOST_LINK_BUF_SIZE]; int fragpos = 0; int fraglen; unsigned long timeout; int written; dprintk("%s\n", __func__); /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ if (count < 2) return -EINVAL; /* extract slot & connection id */ if (copy_from_user(&slot, buf, 1)) return -EFAULT; if (copy_from_user(&connection_id, buf + 1, 1)) return -EFAULT; buf += 2; count -= 2; /* check if the slot is actually running */ if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) return -EINVAL; /* fragment the packets & store in the buffer */ while (fragpos < count) { fraglen = ca->slot_info[slot].link_buf_size - 2; if (fraglen < 0) break; if (fraglen > HOST_LINK_BUF_SIZE - 2) fraglen = HOST_LINK_BUF_SIZE - 2; if ((count - fragpos) < fraglen) fraglen = count - fragpos; fragbuf[0] = connection_id; fragbuf[1] = ((fragpos + fraglen) < count) ? 0x80 : 0x00; status = copy_from_user(fragbuf + 2, buf + fragpos, fraglen); if (status) { status = -EFAULT; goto exit; } timeout = jiffies + HZ / 2; written = 0; while (!time_after(jiffies, timeout)) { /* check the CAM hasn't been removed/reset in the meantime */ if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) { status = -EIO; goto exit; } mutex_lock(&ca->slot_info[slot].slot_lock); status = dvb_ca_en50221_write_data(ca, slot, fragbuf, fraglen + 2); mutex_unlock(&ca->slot_info[slot].slot_lock); if (status == (fraglen + 2)) { written = 1; break; } if (status != -EAGAIN) goto exit; msleep(1); } if (!written) { status = -EIO; goto exit; } fragpos += fraglen; } status = count + 2; exit: return status; } /** * Condition for waking up in dvb_ca_en50221_io_read_condition */ static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, int *result, int *_slot) { int slot; int slot_count = 0; int idx; size_t fraglen; int connection_id = -1; int found = 0; u8 hdr[2]; slot = ca->next_read_slot; while ((slot_count < ca->slot_count) && (!found)) { if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) goto nextslot; if (ca->slot_info[slot].rx_buffer.data == NULL) { return 0; } idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); while (idx != -1) { dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); if (connection_id == -1) connection_id = hdr[0]; if ((hdr[0] == connection_id) && ((hdr[1] & 0x80) == 0)) { *_slot = slot; found = 1; break; } idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); } nextslot: slot = (slot + 1) % ca->slot_count; slot_count++; } ca->next_read_slot = slot; return found; } /** * Implementation of read() syscall. * * @file: File structure. * @buf: Destination buffer. * @count: Size of destination buffer. * @ppos: Position in file (ignored). * * @return Number of bytes read, or <0 on error. */ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf, size_t count, loff_t * ppos) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; int status; int result = 0; u8 hdr[2]; int slot; int connection_id = -1; size_t idx, idx2; int last_fragment = 0; size_t fraglen; int pktlen; int dispose = 0; dprintk("%s\n", __func__); /* Outgoing packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ if (count < 2) return -EINVAL; /* wait for some data */ if ((status = dvb_ca_en50221_io_read_condition(ca, &result, &slot)) == 0) { /* if we're in nonblocking mode, exit immediately */ if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; /* wait for some data */ status = wait_event_interruptible(ca->wait_queue, dvb_ca_en50221_io_read_condition (ca, &result, &slot)); } if ((status < 0) || (result < 0)) { if (result) return result; return status; } idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen); pktlen = 2; do { if (idx == -1) { printk("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n", ca->dvbdev->adapter->num); status = -EIO; goto exit; } dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2); if (connection_id == -1) connection_id = hdr[0]; if (hdr[0] == connection_id) { if (pktlen < count) { if ((pktlen + fraglen - 2) > count) { fraglen = count - pktlen; } else { fraglen -= 2; } if ((status = dvb_ringbuffer_pkt_read_user(&ca->slot_info[slot].rx_buffer, idx, 2, buf + pktlen, fraglen)) < 0) { goto exit; } pktlen += fraglen; } if ((hdr[1] & 0x80) == 0) last_fragment = 1; dispose = 1; } idx2 = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen); if (dispose) dvb_ringbuffer_pkt_dispose(&ca->slot_info[slot].rx_buffer, idx); idx = idx2; dispose = 0; } while (!last_fragment); hdr[0] = slot; hdr[1] = connection_id; status = copy_to_user(buf, hdr, 2); if (status) { status = -EFAULT; goto exit; } status = pktlen; exit: return status; } /** * Implementation of file open syscall. * * @inode: Inode concerned. * @file: File concerned. * * @return 0 on success, <0 on failure. */ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; int err; int i; dprintk("%s\n", __func__); if (!try_module_get(ca->pub->owner)) return -EIO; err = dvb_generic_open(inode, file); if (err < 0) { module_put(ca->pub->owner); return err; } for (i = 0; i < ca->slot_count; i++) { if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) { if (ca->slot_info[i].rx_buffer.data != NULL) { /* it is safe to call this here without locks because * ca->open == 0. Data is not read in this case */ dvb_ringbuffer_flush(&ca->slot_info[i].rx_buffer); } } } ca->open = 1; dvb_ca_en50221_thread_update_delay(ca); dvb_ca_en50221_thread_wakeup(ca); return 0; } /** * Implementation of file close syscall. * * @inode: Inode concerned. * @file: File concerned. * * @return 0 on success, <0 on failure. */ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; int err; dprintk("%s\n", __func__); /* mark the CA device as closed */ ca->open = 0; dvb_ca_en50221_thread_update_delay(ca); err = dvb_generic_release(inode, file); module_put(ca->pub->owner); return err; } /** * Implementation of poll() syscall. * * @file: File concerned. * @wait: poll wait table. * * @return Standard poll mask. */ static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table * wait) { struct dvb_device *dvbdev = file->private_data; struct dvb_ca_private *ca = dvbdev->priv; unsigned int mask = 0; int slot; int result = 0; dprintk("%s\n", __func__); if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { mask |= POLLIN; } /* if there is something, return now */ if (mask) return mask; /* wait for something to happen */ poll_wait(file, &ca->wait_queue, wait); if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) { mask |= POLLIN; } return mask; } EXPORT_SYMBOL(dvb_ca_en50221_init); static const struct file_operations dvb_ca_fops = { .owner = THIS_MODULE, .read = dvb_ca_en50221_io_read, .write = dvb_ca_en50221_io_write, .unlocked_ioctl = dvb_ca_en50221_io_ioctl, .open = dvb_ca_en50221_io_open, .release = dvb_ca_en50221_io_release, .poll = dvb_ca_en50221_io_poll, .llseek = noop_llseek, }; static const struct dvb_device dvbdev_ca = { .priv = NULL, .users = 1, .readers = 1, .writers = 1, #if defined(CONFIG_MEDIA_CONTROLLER_DVB) .name = "dvb-ca-en50221", #endif .fops = &dvb_ca_fops, }; /* ******************************************************************************** */ /* Initialisation/shutdown functions */ /** * Initialise a new DVB CA EN50221 interface device. * * @dvb_adapter: DVB adapter to attach the new CA device to. * @ca: The dvb_ca instance. * @flags: Flags describing the CA device (DVB_CA_FLAG_*). * @slot_count: Number of slots supported. * * @return 0 on success, nonzero on failure */ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, struct dvb_ca_en50221 *pubca, int flags, int slot_count) { int ret; struct dvb_ca_private *ca = NULL; int i; dprintk("%s\n", __func__); if (slot_count < 1) return -EINVAL; /* initialise the system data */ if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto exit; } ca->pub = pubca; ca->flags = flags; ca->slot_count = slot_count; if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) { ret = -ENOMEM; goto free_ca; } init_waitqueue_head(&ca->wait_queue); ca->open = 0; ca->wakeup = 0; ca->next_read_slot = 0; pubca->private = ca; /* register the DVB device */ ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA); if (ret) goto free_slot_info; /* now initialise each slot */ for (i = 0; i < slot_count; i++) { memset(&ca->slot_info[i], 0, sizeof(struct dvb_ca_slot)); ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE; atomic_set(&ca->slot_info[i].camchange_count, 0); ca->slot_info[i].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED; mutex_init(&ca->slot_info[i].slot_lock); } mutex_init(&ca->ioctl_mutex); if (signal_pending(current)) { ret = -EINTR; goto unregister_device; } mb(); /* create a kthread for monitoring this CA device */ ca->thread = kthread_run(dvb_ca_en50221_thread, ca, "kdvb-ca-%i:%i", ca->dvbdev->adapter->num, ca->dvbdev->id); if (IS_ERR(ca->thread)) { ret = PTR_ERR(ca->thread); printk("dvb_ca_init: failed to start kernel_thread (%d)\n", ret); goto unregister_device; } return 0; unregister_device: dvb_unregister_device(ca->dvbdev); free_slot_info: kfree(ca->slot_info); free_ca: kfree(ca); exit: pubca->private = NULL; return ret; } EXPORT_SYMBOL(dvb_ca_en50221_release); /** * Release a DVB CA EN50221 interface device. * * @ca_dev: The dvb_device_t instance for the CA device. * @ca: The associated dvb_ca instance. */ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) { struct dvb_ca_private *ca = pubca->private; int i; dprintk("%s\n", __func__); /* shutdown the thread if there was one */ kthread_stop(ca->thread); for (i = 0; i < ca->slot_count; i++) { dvb_ca_en50221_slot_shutdown(ca, i); vfree(ca->slot_info[i].rx_buffer.data); } kfree(ca->slot_info); dvb_unregister_device(ca->dvbdev); kfree(ca); pubca->private = NULL; }
gpl-2.0
nickholtus/xperia-2012-kernel-2.6.35
fs/btrfs/extent-tree.c
846
218217
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/sort.h> #include <linux/rcupdate.h> #include <linux/kthread.h> #include <linux/slab.h> #include "compat.h" #include "hash.h" #include "ctree.h" #include "disk-io.h" #include "print-tree.h" #include "transaction.h" #include "volumes.h" #include "locking.h" #include "free-space-cache.h" static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc); static int update_reserved_bytes(struct btrfs_block_group_cache *cache, u64 num_bytes, int reserve, int sinfo); static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner_objectid, u64 owner_offset, int refs_to_drop, struct btrfs_delayed_extent_op *extra_op); static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, struct extent_buffer *leaf, struct btrfs_extent_item *ei); static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, u64 flags, u64 owner, u64 offset, struct btrfs_key *ins, int ref_mod); static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, u64 flags, struct btrfs_disk_key *key, int level, struct btrfs_key *ins); static int do_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 alloc_bytes, u64 flags, int force); static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); static void dump_space_info(struct btrfs_space_info *info, u64 bytes, int dump_block_groups); static noinline int block_group_cache_done(struct btrfs_block_group_cache *cache) { smp_mb(); return cache->cached == BTRFS_CACHE_FINISHED; } static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) { return (cache->flags & bits) == bits; } void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); } void btrfs_put_block_group(struct btrfs_block_group_cache *cache) { if (atomic_dec_and_test(&cache->count)) { WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved_pinned > 0); kfree(cache); } } /* * this adds the block group to the fs_info rb tree for the block group * cache */ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, struct btrfs_block_group_cache *block_group) { struct rb_node **p; struct rb_node *parent = NULL; struct btrfs_block_group_cache *cache; spin_lock(&info->block_group_cache_lock); p = &info->block_group_cache_tree.rb_node; while (*p) { parent = *p; cache = rb_entry(parent, struct btrfs_block_group_cache, cache_node); if (block_group->key.objectid < cache->key.objectid) { p = &(*p)->rb_left; } else if (block_group->key.objectid > cache->key.objectid) { p = &(*p)->rb_right; } else { spin_unlock(&info->block_group_cache_lock); return -EEXIST; } } rb_link_node(&block_group->cache_node, parent, p); rb_insert_color(&block_group->cache_node, &info->block_group_cache_tree); spin_unlock(&info->block_group_cache_lock); return 0; } /* * This will return the block group at or after bytenr if contains is 0, else * it will return the block group that contains the bytenr */ static struct btrfs_block_group_cache * block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, int contains) { struct btrfs_block_group_cache *cache, *ret = NULL; struct rb_node *n; u64 end, start; spin_lock(&info->block_group_cache_lock); n = info->block_group_cache_tree.rb_node; while (n) { cache = rb_entry(n, struct btrfs_block_group_cache, cache_node); end = cache->key.objectid + cache->key.offset - 1; start = cache->key.objectid; if (bytenr < start) { if (!contains && (!ret || start < ret->key.objectid)) ret = cache; n = n->rb_left; } else if (bytenr > start) { if (contains && bytenr <= end) { ret = cache; break; } n = n->rb_right; } else { ret = cache; break; } } if (ret) btrfs_get_block_group(ret); spin_unlock(&info->block_group_cache_lock); return ret; } static int add_excluded_extent(struct btrfs_root *root, u64 start, u64 num_bytes) { u64 end = start + num_bytes - 1; set_extent_bits(&root->fs_info->freed_extents[0], start, end, EXTENT_UPTODATE, GFP_NOFS); set_extent_bits(&root->fs_info->freed_extents[1], start, end, EXTENT_UPTODATE, GFP_NOFS); return 0; } static void free_excluded_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { u64 start, end; start = cache->key.objectid; end = start + cache->key.offset - 1; clear_extent_bits(&root->fs_info->freed_extents[0], start, end, EXTENT_UPTODATE, GFP_NOFS); clear_extent_bits(&root->fs_info->freed_extents[1], start, end, EXTENT_UPTODATE, GFP_NOFS); } static int exclude_super_stripes(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { u64 bytenr; u64 *logical; int stripe_len; int i, nr, ret; if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; cache->bytes_super += stripe_len; ret = add_excluded_extent(root, cache->key.objectid, stripe_len); BUG_ON(ret); } for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); ret = btrfs_rmap_block(&root->fs_info->mapping_tree, cache->key.objectid, bytenr, 0, &logical, &nr, &stripe_len); BUG_ON(ret); while (nr--) { cache->bytes_super += stripe_len; ret = add_excluded_extent(root, logical[nr], stripe_len); BUG_ON(ret); } kfree(logical); } return 0; } static struct btrfs_caching_control * get_caching_control(struct btrfs_block_group_cache *cache) { struct btrfs_caching_control *ctl; spin_lock(&cache->lock); if (cache->cached != BTRFS_CACHE_STARTED) { spin_unlock(&cache->lock); return NULL; } ctl = cache->caching_ctl; atomic_inc(&ctl->count); spin_unlock(&cache->lock); return ctl; } static void put_caching_control(struct btrfs_caching_control *ctl) { if (atomic_dec_and_test(&ctl->count)) kfree(ctl); } /* * this is only called by cache_block_group, since we could have freed extents * we need to check the pinned_extents for any extents that can't be used yet * since their free space will be released as soon as the transaction commits. */ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end) { u64 extent_start, extent_end, size, total_added = 0; int ret; while (start < end) { ret = find_first_extent_bit(info->pinned_extents, start, &extent_start, &extent_end, EXTENT_DIRTY | EXTENT_UPTODATE); if (ret) break; if (extent_start <= start) { start = extent_end + 1; } else if (extent_start > start && extent_start < end) { size = extent_start - start; total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); start = extent_end + 1; } else { break; } } if (start < end) { size = end - start; total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); } return total_added; } static int caching_kthread(void *data) { struct btrfs_block_group_cache *block_group = data; struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_caching_control *caching_ctl = block_group->caching_ctl; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; u64 total_found = 0; u64 last = 0; u32 nritems; int ret = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; exclude_super_stripes(extent_root, block_group); spin_lock(&block_group->space_info->lock); block_group->space_info->bytes_readonly += block_group->bytes_super; spin_unlock(&block_group->space_info->lock); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* * We don't want to deadlock with somebody trying to allocate a new * extent for the extent root while also trying to search the extent * root to add free space. So we skip locking and search the commit * root, since its read-only */ path->skip_locking = 1; path->search_commit_root = 1; path->reada = 2; key.objectid = last; key.offset = 0; key.type = BTRFS_EXTENT_ITEM_KEY; again: mutex_lock(&caching_ctl->mutex); /* need to make sure the commit_root doesn't disappear */ down_read(&fs_info->extent_commit_sem); ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto err; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); while (1) { smp_mb(); if (fs_info->closing > 1) { last = (u64)-1; break; } if (path->slots[0] < nritems) { btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); } else { ret = find_next_key(path, 0, &key); if (ret) break; caching_ctl->progress = last; btrfs_release_path(extent_root, path); up_read(&fs_info->extent_commit_sem); mutex_unlock(&caching_ctl->mutex); if (btrfs_transaction_in_commit(fs_info)) schedule_timeout(1); else cond_resched(); goto again; } if (key.objectid < block_group->key.objectid) { path->slots[0]++; continue; } if (key.objectid >= block_group->key.objectid + block_group->key.offset) break; if (key.type == BTRFS_EXTENT_ITEM_KEY) { total_found += add_new_free_space(block_group, fs_info, last, key.objectid); last = key.objectid + key.offset; if (total_found > (1024 * 1024 * 2)) { total_found = 0; wake_up(&caching_ctl->wait); } } path->slots[0]++; } ret = 0; total_found += add_new_free_space(block_group, fs_info, last, block_group->key.objectid + block_group->key.offset); caching_ctl->progress = (u64)-1; spin_lock(&block_group->lock); block_group->caching_ctl = NULL; block_group->cached = BTRFS_CACHE_FINISHED; spin_unlock(&block_group->lock); err: btrfs_free_path(path); up_read(&fs_info->extent_commit_sem); free_excluded_extents(extent_root, block_group); mutex_unlock(&caching_ctl->mutex); wake_up(&caching_ctl->wait); put_caching_control(caching_ctl); atomic_dec(&block_group->space_info->caching_threads); btrfs_put_block_group(block_group); return 0; } static int cache_block_group(struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_caching_control *caching_ctl; struct task_struct *tsk; int ret = 0; smp_mb(); if (cache->cached != BTRFS_CACHE_NO) return 0; caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); BUG_ON(!caching_ctl); INIT_LIST_HEAD(&caching_ctl->list); mutex_init(&caching_ctl->mutex); init_waitqueue_head(&caching_ctl->wait); caching_ctl->block_group = cache; caching_ctl->progress = cache->key.objectid; /* one for caching kthread, one for caching block group list */ atomic_set(&caching_ctl->count, 2); spin_lock(&cache->lock); if (cache->cached != BTRFS_CACHE_NO) { spin_unlock(&cache->lock); kfree(caching_ctl); return 0; } cache->caching_ctl = caching_ctl; cache->cached = BTRFS_CACHE_STARTED; spin_unlock(&cache->lock); down_write(&fs_info->extent_commit_sem); list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); up_write(&fs_info->extent_commit_sem); atomic_inc(&cache->space_info->caching_threads); btrfs_get_block_group(cache); tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", cache->key.objectid); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); printk(KERN_ERR "error running thread %d\n", ret); BUG(); } return ret; } /* * return the block group that starts at or after bytenr */ static struct btrfs_block_group_cache * btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) { struct btrfs_block_group_cache *cache; cache = block_group_cache_tree_search(info, bytenr, 0); return cache; } /* * return the block group that contains the given bytenr */ struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr) { struct btrfs_block_group_cache *cache; cache = block_group_cache_tree_search(info, bytenr, 1); return cache; } static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, u64 flags) { struct list_head *head = &info->space_info; struct btrfs_space_info *found; flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA; rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags == flags) { rcu_read_unlock(); return found; } } rcu_read_unlock(); return NULL; } /* * after adding space to the filesystem, we need to clear the full flags * on all the space infos. */ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) { struct list_head *head = &info->space_info; struct btrfs_space_info *found; rcu_read_lock(); list_for_each_entry_rcu(found, head, list) found->full = 0; rcu_read_unlock(); } static u64 div_factor(u64 num, int factor) { if (factor == 10) return num; num *= factor; do_div(num, 10); return num; } u64 btrfs_find_block_group(struct btrfs_root *root, u64 search_start, u64 search_hint, int owner) { struct btrfs_block_group_cache *cache; u64 used; u64 last = max(search_hint, search_start); u64 group_start = 0; int full_search = 0; int factor = 9; int wrapped = 0; again: while (1) { cache = btrfs_lookup_first_block_group(root->fs_info, last); if (!cache) break; spin_lock(&cache->lock); last = cache->key.objectid + cache->key.offset; used = btrfs_block_group_used(&cache->item); if ((full_search || !cache->ro) && block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) { if (used + cache->pinned + cache->reserved < div_factor(cache->key.offset, factor)) { group_start = cache->key.objectid; spin_unlock(&cache->lock); btrfs_put_block_group(cache); goto found; } } spin_unlock(&cache->lock); btrfs_put_block_group(cache); cond_resched(); } if (!wrapped) { last = search_start; wrapped = 1; goto again; } if (!full_search && factor < 10) { last = search_start; full_search = 1; factor = 10; goto again; } found: return group_start; } /* simple helper to search for an existing extent at a given offset */ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) { int ret; struct btrfs_key key; struct btrfs_path *path; path = btrfs_alloc_path(); BUG_ON(!path); key.objectid = start; key.offset = len; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 0, 0); btrfs_free_path(path); return ret; } /* * helper function to lookup reference count and flags of extent. * * the head node for delayed ref is used to store the sum of all the * reference count modifications queued up in the rbtree. the head * node may also store the extent flags to set. This way you can check * to see what the reference count and extent flags would be if all of * the delayed refs are not processed. */ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 *refs, u64 *flags) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_path *path; struct btrfs_extent_item *ei; struct extent_buffer *leaf; struct btrfs_key key; u32 item_size; u64 num_refs; u64 extent_flags; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; if (!trans) { path->skip_locking = 1; path->search_commit_root = 1; } again: ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out_free; if (ret == 0) { leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if (item_size >= sizeof(*ei)) { ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); num_refs = btrfs_extent_refs(leaf, ei); extent_flags = btrfs_extent_flags(leaf, ei); } else { #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 struct btrfs_extent_item_v0 *ei0; BUG_ON(item_size != sizeof(*ei0)); ei0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item_v0); num_refs = btrfs_extent_refs_v0(leaf, ei0); /* FIXME: this isn't correct for data */ extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; #else BUG(); #endif } BUG_ON(num_refs == 0); } else { num_refs = 0; extent_flags = 0; ret = 0; } if (!trans) goto out; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(root->fs_info->extent_root, path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); goto again; } if (head->extent_op && head->extent_op->update_flags) extent_flags |= head->extent_op->flags_to_set; else BUG_ON(num_refs == 0); num_refs += head->node.ref_mod; mutex_unlock(&head->mutex); } spin_unlock(&delayed_refs->lock); out: WARN_ON(num_refs == 0); if (refs) *refs = num_refs; if (flags) *flags = extent_flags; out_free: btrfs_free_path(path); return ret; } /* * Back reference rules. Back refs have three main goals: * * 1) differentiate between all holders of references to an extent so that * when a reference is dropped we can make sure it was a valid reference * before freeing the extent. * * 2) Provide enough information to quickly find the holders of an extent * if we notice a given block is corrupted or bad. * * 3) Make it easy to migrate blocks for FS shrinking or storage pool * maintenance. This is actually the same as #2, but with a slightly * different use case. * * There are two kinds of back refs. The implicit back refs is optimized * for pointers in non-shared tree blocks. For a given pointer in a block, * back refs of this kind provide information about the block's owner tree * and the pointer's key. These information allow us to find the block by * b-tree searching. The full back refs is for pointers in tree blocks not * referenced by their owner trees. The location of tree block is recorded * in the back refs. Actually the full back refs is generic, and can be * used in all cases the implicit back refs is used. The major shortcoming * of the full back refs is its overhead. Every time a tree block gets * COWed, we have to update back refs entry for all pointers in it. * * For a newly allocated tree block, we use implicit back refs for * pointers in it. This means most tree related operations only involve * implicit back refs. For a tree block created in old transaction, the * only way to drop a reference to it is COW it. So we can detect the * event that tree block loses its owner tree's reference and do the * back refs conversion. * * When a tree block is COW'd through a tree, there are four cases: * * The reference count of the block is one and the tree is the block's * owner tree. Nothing to do in this case. * * The reference count of the block is one and the tree is not the * block's owner tree. In this case, full back refs is used for pointers * in the block. Remove these full back refs, add implicit back refs for * every pointers in the new block. * * The reference count of the block is greater than one and the tree is * the block's owner tree. In this case, implicit back refs is used for * pointers in the block. Add full back refs for every pointers in the * block, increase lower level extents' reference counts. The original * implicit back refs are entailed to the new block. * * The reference count of the block is greater than one and the tree is * not the block's owner tree. Add implicit back refs for every pointer in * the new block, increase lower level extents' reference count. * * Back Reference Key composing: * * The key objectid corresponds to the first byte in the extent, * The key type is used to differentiate between types of back refs. * There are different meanings of the key offset for different types * of back refs. * * File extents can be referenced by: * * - multiple snapshots, subvolumes, or different generations in one subvol * - different files inside a single subvolume * - different offsets inside a file (bookend extents in file.c) * * The extent ref structure for the implicit back refs has fields for: * * - Objectid of the subvolume root * - objectid of the file holding the reference * - original offset in the file * - how many bookend extents * * The key offset for the implicit back refs is hash of the first * three fields. * * The extent ref structure for the full back refs has field for: * * - number of pointers in the tree leaf * * The key offset for the implicit back refs is the first byte of * the tree leaf * * When a file extent is allocated, The implicit back refs is used. * the fields are filled in: * * (root_key.objectid, inode objectid, offset in file, 1) * * When a file extent is removed file truncation, we find the * corresponding implicit back refs and check the following fields: * * (btrfs_header_owner(leaf), inode objectid, offset in file) * * Btree extents can be referenced by: * * - Different subvolumes * * Both the implicit back refs and the full back refs for tree blocks * only consist of key. The key offset for the implicit back refs is * objectid of block's owner tree. The key offset for the full back refs * is the first byte of parent block. * * When implicit back refs is used, information about the lowest key and * level of the tree block are required. These information are stored in * tree block info structure. */ #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 static int convert_extent_item_v0(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 owner, u32 extra_size) { struct btrfs_extent_item *item; struct btrfs_extent_item_v0 *ei0; struct btrfs_extent_ref_v0 *ref0; struct btrfs_tree_block_info *bi; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; u32 new_size = sizeof(*item); u64 refs; int ret; leaf = path->nodes[0]; BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ei0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item_v0); refs = btrfs_extent_refs_v0(leaf, ei0); if (owner == (u64)-1) { while (1) { if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; BUG_ON(ret > 0); leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); BUG_ON(key.objectid != found_key.objectid); if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) { path->slots[0]++; continue; } ref0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref_v0); owner = btrfs_ref_objectid_v0(leaf, ref0); break; } } btrfs_release_path(root, path); if (owner < BTRFS_FIRST_FREE_OBJECTID) new_size += sizeof(*bi); new_size -= sizeof(*ei0); ret = btrfs_search_slot(trans, root, &key, path, new_size + extra_size, 1); if (ret < 0) return ret; BUG_ON(ret); ret = btrfs_extend_item(trans, root, path, new_size); BUG_ON(ret); leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, item, refs); /* FIXME: get real generation */ btrfs_set_extent_generation(leaf, item, 0); if (owner < BTRFS_FIRST_FREE_OBJECTID) { btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK | BTRFS_BLOCK_FLAG_FULL_BACKREF); bi = (struct btrfs_tree_block_info *)(item + 1); /* FIXME: get first key of the block */ memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi)); btrfs_set_tree_block_level(leaf, bi, (int)owner); } else { btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA); } btrfs_mark_buffer_dirty(leaf); return 0; } #endif static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) { u32 high_crc = ~(u32)0; u32 low_crc = ~(u32)0; __le64 lenum; lenum = cpu_to_le64(root_objectid); high_crc = crc32c(high_crc, &lenum, sizeof(lenum)); lenum = cpu_to_le64(owner); low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); lenum = cpu_to_le64(offset); low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); return ((u64)high_crc << 31) ^ (u64)low_crc; } static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref) { return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), btrfs_extent_data_ref_objectid(leaf, ref), btrfs_extent_data_ref_offset(leaf, ref)); } static int match_extent_data_ref(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref, u64 root_objectid, u64 owner, u64 offset) { if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || btrfs_extent_data_ref_objectid(leaf, ref) != owner || btrfs_extent_data_ref_offset(leaf, ref) != offset) return 0; return 1; } static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset) { struct btrfs_key key; struct btrfs_extent_data_ref *ref; struct extent_buffer *leaf; u32 nritems; int ret; int recow; int err = -ENOENT; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_DATA_REF_KEY; key.offset = parent; } else { key.type = BTRFS_EXTENT_DATA_REF_KEY; key.offset = hash_extent_data_ref(root_objectid, owner, offset); } again: recow = 0; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { err = ret; goto fail; } if (parent) { if (!ret) return 0; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 key.type = BTRFS_EXTENT_REF_V0_KEY; btrfs_release_path(root, path); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { err = ret; goto fail; } if (!ret) return 0; #endif goto fail; } leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); while (1) { if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret < 0) err = ret; if (ret) goto fail; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); recow = 1; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != bytenr || key.type != BTRFS_EXTENT_DATA_REF_KEY) goto fail; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) { if (recow) { btrfs_release_path(root, path); goto again; } err = 0; break; } path->slots[0]++; } fail: return err; } static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) { struct btrfs_key key; struct extent_buffer *leaf; u32 size; u32 num_refs; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_DATA_REF_KEY; key.offset = parent; size = sizeof(struct btrfs_shared_data_ref); } else { key.type = BTRFS_EXTENT_DATA_REF_KEY; key.offset = hash_extent_data_ref(root_objectid, owner, offset); size = sizeof(struct btrfs_extent_data_ref); } ret = btrfs_insert_empty_item(trans, root, path, &key, size); if (ret && ret != -EEXIST) goto fail; leaf = path->nodes[0]; if (parent) { struct btrfs_shared_data_ref *ref; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); if (ret == 0) { btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); } else { num_refs = btrfs_shared_data_ref_count(leaf, ref); num_refs += refs_to_add; btrfs_set_shared_data_ref_count(leaf, ref, num_refs); } } else { struct btrfs_extent_data_ref *ref; while (ret == -EEXIST) { ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) break; btrfs_release_path(root, path); key.offset++; ret = btrfs_insert_empty_item(trans, root, path, &key, size); if (ret && ret != -EEXIST) goto fail; leaf = path->nodes[0]; } ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (ret == 0) { btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, ref, owner); btrfs_set_extent_data_ref_offset(leaf, ref, offset); btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); } else { num_refs = btrfs_extent_data_ref_count(leaf, ref); num_refs += refs_to_add; btrfs_set_extent_data_ref_count(leaf, ref, num_refs); } } btrfs_mark_buffer_dirty(leaf); ret = 0; fail: btrfs_release_path(root, path); return ret; } static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int refs_to_drop) { struct btrfs_key key; struct btrfs_extent_data_ref *ref1 = NULL; struct btrfs_shared_data_ref *ref2 = NULL; struct extent_buffer *leaf; u32 num_refs = 0; int ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { ref2 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); num_refs = btrfs_shared_data_ref_count(leaf, ref2); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref_v0); num_refs = btrfs_ref_count_v0(leaf, ref0); #endif } else { BUG(); } BUG_ON(num_refs < refs_to_drop); num_refs -= refs_to_drop; if (num_refs == 0) { ret = btrfs_del_item(trans, root, path); } else { if (key.type == BTRFS_EXTENT_DATA_REF_KEY) btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); else if (key.type == BTRFS_SHARED_DATA_REF_KEY) btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 else { struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref_v0); btrfs_set_ref_count_v0(leaf, ref0, num_refs); } #endif btrfs_mark_buffer_dirty(leaf); } return ret; } static noinline u32 extent_data_ref_count(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref) { struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_data_ref *ref1; struct btrfs_shared_data_ref *ref2; u32 num_refs = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (iref) { if (btrfs_extent_inline_ref_type(leaf, iref) == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else { ref2 = (struct btrfs_shared_data_ref *)(iref + 1); num_refs = btrfs_shared_data_ref_count(leaf, ref2); } } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { ref2 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); num_refs = btrfs_shared_data_ref_count(leaf, ref2); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref_v0); num_refs = btrfs_ref_count_v0(leaf, ref0); #endif } else { WARN_ON(1); } return num_refs; } static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) { struct btrfs_key key; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) ret = -ENOENT; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (ret == -ENOENT && parent) { btrfs_release_path(root, path); key.type = BTRFS_EXTENT_REF_V0_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) ret = -ENOENT; } #endif return ret; } static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) { struct btrfs_key key; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_release_path(root, path); return ret; } static inline int extent_ref_type(u64 parent, u64 owner) { int type; if (owner < BTRFS_FIRST_FREE_OBJECTID) { if (parent > 0) type = BTRFS_SHARED_BLOCK_REF_KEY; else type = BTRFS_TREE_BLOCK_REF_KEY; } else { if (parent > 0) type = BTRFS_SHARED_DATA_REF_KEY; else type = BTRFS_EXTENT_DATA_REF_KEY; } return type; } static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key) { for (; level < BTRFS_MAX_LEVEL; level++) { if (!path->nodes[level]) break; if (path->slots[level] + 1 >= btrfs_header_nritems(path->nodes[level])) continue; if (level == 0) btrfs_item_key_to_cpu(path->nodes[level], key, path->slots[level] + 1); else btrfs_node_key_to_cpu(path->nodes[level], key, path->slots[level] + 1); return 0; } return 1; } /* * look for inline back ref. if back ref is found, *ref_ret is set * to the address of inline back ref, and 0 is returned. * * if back ref isn't found, *ref_ret is set to the address where it * should be inserted, and -ENOENT is returned. * * if insert is true and there are too many inline back refs, the path * points to the extent item, and -EAGAIN is returned. * * NOTE: inline back refs are ordered in the same way that back ref * items in the tree are ordered. */ static noinline_for_stack int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int insert) { struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; u64 flags; u64 item_size; unsigned long ptr; unsigned long end; int extra_size; int type; int want; int ret; int err = 0; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; want = extent_ref_type(parent, owner); if (insert) { extra_size = btrfs_extent_inline_ref_size(want); path->keep_locks = 1; } else extra_size = -1; ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); if (ret < 0) { err = ret; goto out; } BUG_ON(ret); leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { if (!insert) { err = -ENOENT; goto out; } ret = convert_extent_item_v0(trans, root, path, owner, extra_size); if (ret < 0) { err = ret; goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); } #endif BUG_ON(item_size < sizeof(*ei)); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); flags = btrfs_extent_flags(leaf, ei); ptr = (unsigned long)(ei + 1); end = (unsigned long)ei + item_size; if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { ptr += sizeof(struct btrfs_tree_block_info); BUG_ON(ptr > end); } else { BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); } err = -ENOENT; while (1) { if (ptr >= end) { WARN_ON(ptr > end); break; } iref = (struct btrfs_extent_inline_ref *)ptr; type = btrfs_extent_inline_ref_type(leaf, iref); if (want < type) break; if (want > type) { ptr += btrfs_extent_inline_ref_size(type); continue; } if (type == BTRFS_EXTENT_DATA_REF_KEY) { struct btrfs_extent_data_ref *dref; dref = (struct btrfs_extent_data_ref *)(&iref->offset); if (match_extent_data_ref(leaf, dref, root_objectid, owner, offset)) { err = 0; break; } if (hash_extent_data_ref_item(leaf, dref) < hash_extent_data_ref(root_objectid, owner, offset)) break; } else { u64 ref_offset; ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); if (parent > 0) { if (parent == ref_offset) { err = 0; break; } if (ref_offset < parent) break; } else { if (root_objectid == ref_offset) { err = 0; break; } if (ref_offset < root_objectid) break; } } ptr += btrfs_extent_inline_ref_size(type); } if (err == -ENOENT && insert) { if (item_size + extra_size >= BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { err = -EAGAIN; goto out; } /* * To add new inline back ref, we have to make sure * there is no corresponding back ref item. * For simplicity, we just do not add new inline back * ref if there is any kind of item for this block */ if (find_next_key(path, 0, &key) == 0 && key.objectid == bytenr && key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { err = -EAGAIN; goto out; } } *ref_ret = (struct btrfs_extent_inline_ref *)ptr; out: if (insert) { path->keep_locks = 0; btrfs_unlock_up_safe(path, 1); } return err; } /* * helper to add new inline back ref */ static noinline_for_stack int setup_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct extent_buffer *leaf; struct btrfs_extent_item *ei; unsigned long ptr; unsigned long end; unsigned long item_offset; u64 refs; int size; int type; int ret; leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); item_offset = (unsigned long)iref - (unsigned long)ei; type = extent_ref_type(parent, owner); size = btrfs_extent_inline_ref_size(type); ret = btrfs_extend_item(trans, root, path, size); BUG_ON(ret); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); refs += refs_to_add; btrfs_set_extent_refs(leaf, ei, refs); if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); ptr = (unsigned long)ei + item_offset; end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); if (ptr < end - size) memmove_extent_buffer(leaf, ptr + size, ptr, end - size - ptr); iref = (struct btrfs_extent_inline_ref *)ptr; btrfs_set_extent_inline_ref_type(leaf, iref, type); if (type == BTRFS_EXTENT_DATA_REF_KEY) { struct btrfs_extent_data_ref *dref; dref = (struct btrfs_extent_data_ref *)(&iref->offset); btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, dref, owner); btrfs_set_extent_data_ref_offset(leaf, dref, offset); btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); } else if (type == BTRFS_SHARED_DATA_REF_KEY) { struct btrfs_shared_data_ref *sref; sref = (struct btrfs_shared_data_ref *)(iref + 1); btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else { btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_mark_buffer_dirty(leaf); return 0; } static int lookup_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset) { int ret; ret = lookup_inline_extent_backref(trans, root, path, ref_ret, bytenr, num_bytes, parent, root_objectid, owner, offset, 0); if (ret != -ENOENT) return ret; btrfs_release_path(root, path); *ref_ret = NULL; if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, root_objectid); } else { ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, root_objectid, owner, offset); } return ret; } /* * helper to update/remove inline back ref */ static noinline_for_stack int update_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_mod, struct btrfs_delayed_extent_op *extent_op) { struct extent_buffer *leaf; struct btrfs_extent_item *ei; struct btrfs_extent_data_ref *dref = NULL; struct btrfs_shared_data_ref *sref = NULL; unsigned long ptr; unsigned long end; u32 item_size; int size; int type; int ret; u64 refs; leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); refs += refs_to_mod; btrfs_set_extent_refs(leaf, ei, refs); if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); type = btrfs_extent_inline_ref_type(leaf, iref); if (type == BTRFS_EXTENT_DATA_REF_KEY) { dref = (struct btrfs_extent_data_ref *)(&iref->offset); refs = btrfs_extent_data_ref_count(leaf, dref); } else if (type == BTRFS_SHARED_DATA_REF_KEY) { sref = (struct btrfs_shared_data_ref *)(iref + 1); refs = btrfs_shared_data_ref_count(leaf, sref); } else { refs = 1; BUG_ON(refs_to_mod != -1); } BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); refs += refs_to_mod; if (refs > 0) { if (type == BTRFS_EXTENT_DATA_REF_KEY) btrfs_set_extent_data_ref_count(leaf, dref, refs); else btrfs_set_shared_data_ref_count(leaf, sref, refs); } else { size = btrfs_extent_inline_ref_size(type); item_size = btrfs_item_size_nr(leaf, path->slots[0]); ptr = (unsigned long)iref; end = (unsigned long)ei + item_size; if (ptr + size < end) memmove_extent_buffer(leaf, ptr, ptr + size, end - ptr - size); item_size -= size; ret = btrfs_truncate_item(trans, root, path, item_size, 1); BUG_ON(ret); } btrfs_mark_buffer_dirty(leaf); return 0; } static noinline_for_stack int insert_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_extent_inline_ref *iref; int ret; ret = lookup_inline_extent_backref(trans, root, path, &iref, bytenr, num_bytes, parent, root_objectid, owner, offset, 1); if (ret == 0) { BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); ret = update_inline_extent_backref(trans, root, path, iref, refs_to_add, extent_op); } else if (ret == -ENOENT) { ret = setup_inline_extent_backref(trans, root, path, iref, parent, root_objectid, owner, offset, refs_to_add, extent_op); } return ret; } static int insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) { int ret; if (owner < BTRFS_FIRST_FREE_OBJECTID) { BUG_ON(refs_to_add != 1); ret = insert_tree_block_ref(trans, root, path, bytenr, parent, root_objectid); } else { ret = insert_extent_data_ref(trans, root, path, bytenr, parent, root_objectid, owner, offset, refs_to_add); } return ret; } static int remove_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_drop, int is_data) { int ret; BUG_ON(!is_data && refs_to_drop != 1); if (iref) { ret = update_inline_extent_backref(trans, root, path, iref, -refs_to_drop, NULL); } else if (is_data) { ret = remove_extent_data_ref(trans, root, path, refs_to_drop); } else { ret = btrfs_del_item(trans, root, path); } return ret; } static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); } static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes) { int ret; u64 map_length = num_bytes; struct btrfs_multi_bio *multi = NULL; if (!btrfs_test_opt(root, DISCARD)) return 0; /* Tell the block device(s) that the sectors can be discarded */ ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, bytenr, &map_length, &multi, 0); if (!ret) { struct btrfs_bio_stripe *stripe = multi->stripes; int i; if (map_length > num_bytes) map_length = num_bytes; for (i = 0; i < multi->num_stripes; i++, stripe++) { btrfs_issue_discard(stripe->dev->bdev, stripe->physical, map_length); } kfree(multi); } return ret; } int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset) { int ret; BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID && root_objectid == BTRFS_TREE_LOG_OBJECTID); if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, parent, root_objectid, (int)owner, BTRFS_ADD_DELAYED_REF, NULL); } else { ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, parent, root_objectid, owner, offset, BTRFS_ADD_DELAYED_REF, NULL); } return ret; } static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_extent_item *item; u64 refs; int ret; int err = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 1; path->leave_spinning = 1; /* this will setup the path even if it fails to insert the back ref */ ret = insert_inline_extent_backref(trans, root->fs_info->extent_root, path, bytenr, num_bytes, parent, root_objectid, owner, offset, refs_to_add, extent_op); if (ret == 0) goto out; if (ret != -EAGAIN) { err = ret; goto out; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, item); btrfs_set_extent_refs(leaf, item, refs + refs_to_add); if (extent_op) __run_delayed_extent_op(extent_op, leaf, item); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root->fs_info->extent_root, path); path->reada = 1; path->leave_spinning = 1; /* now insert the actual backref */ ret = insert_extent_backref(trans, root->fs_info->extent_root, path, bytenr, parent, root_objectid, owner, offset, refs_to_add); BUG_ON(ret); out: btrfs_free_path(path); return err; } static int run_delayed_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { int ret = 0; struct btrfs_delayed_data_ref *ref; struct btrfs_key ins; u64 parent = 0; u64 ref_root = 0; u64 flags = 0; ins.objectid = node->bytenr; ins.offset = node->num_bytes; ins.type = BTRFS_EXTENT_ITEM_KEY; ref = btrfs_delayed_node_to_data_ref(node); if (node->type == BTRFS_SHARED_DATA_REF_KEY) parent = ref->parent; else ref_root = ref->root; if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { if (extent_op) { BUG_ON(extent_op->update_key); flags |= extent_op->flags_to_set; } ret = alloc_reserved_file_extent(trans, root, parent, ref_root, flags, ref->objectid, ref->offset, &ins, node->ref_mod); } else if (node->action == BTRFS_ADD_DELAYED_REF) { ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, node->num_bytes, parent, ref_root, ref->objectid, ref->offset, node->ref_mod, extent_op); } else if (node->action == BTRFS_DROP_DELAYED_REF) { ret = __btrfs_free_extent(trans, root, node->bytenr, node->num_bytes, parent, ref_root, ref->objectid, ref->offset, node->ref_mod, extent_op); } else { BUG(); } return ret; } static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, struct extent_buffer *leaf, struct btrfs_extent_item *ei) { u64 flags = btrfs_extent_flags(leaf, ei); if (extent_op->update_flags) { flags |= extent_op->flags_to_set; btrfs_set_extent_flags(leaf, ei, flags); } if (extent_op->update_key) { struct btrfs_tree_block_info *bi; BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); bi = (struct btrfs_tree_block_info *)(ei + 1); btrfs_set_tree_block_key(leaf, bi, &extent_op->key); } } static int run_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_key key; struct btrfs_path *path; struct btrfs_extent_item *ei; struct extent_buffer *leaf; u32 item_size; int ret; int err = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = node->bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = node->num_bytes; path->reada = 1; path->leave_spinning = 1; ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path, 0, 1); if (ret < 0) { err = ret; goto out; } if (ret > 0) { err = -EIO; goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { ret = convert_extent_item_v0(trans, root->fs_info->extent_root, path, (u64)-1, 0); if (ret < 0) { err = ret; goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); } #endif BUG_ON(item_size < sizeof(*ei)); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); __run_delayed_extent_op(extent_op, leaf, ei); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return err; } static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { int ret = 0; struct btrfs_delayed_tree_ref *ref; struct btrfs_key ins; u64 parent = 0; u64 ref_root = 0; ins.objectid = node->bytenr; ins.offset = node->num_bytes; ins.type = BTRFS_EXTENT_ITEM_KEY; ref = btrfs_delayed_node_to_tree_ref(node); if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) parent = ref->parent; else ref_root = ref->root; BUG_ON(node->ref_mod != 1); if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { BUG_ON(!extent_op || !extent_op->update_flags || !extent_op->update_key); ret = alloc_reserved_tree_block(trans, root, parent, ref_root, extent_op->flags_to_set, &extent_op->key, ref->level, &ins); } else if (node->action == BTRFS_ADD_DELAYED_REF) { ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, node->num_bytes, parent, ref_root, ref->level, 0, 1, extent_op); } else if (node->action == BTRFS_DROP_DELAYED_REF) { ret = __btrfs_free_extent(trans, root, node->bytenr, node->num_bytes, parent, ref_root, ref->level, 0, 1, extent_op); } else { BUG(); } return ret; } /* helper function to actually process a single delayed ref entry */ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, int insert_reserved) { int ret; if (btrfs_delayed_ref_is_head(node)) { struct btrfs_delayed_ref_head *head; /* * we've hit the end of the chain and we were supposed * to insert this extent into the tree. But, it got * deleted before we ever needed to insert it, so all * we have to do is clean up the accounting */ BUG_ON(extent_op); head = btrfs_delayed_node_to_head(node); if (insert_reserved) { btrfs_pin_extent(root, node->bytenr, node->num_bytes, 1); if (head->is_data) { ret = btrfs_del_csums(trans, root, node->bytenr, node->num_bytes); BUG_ON(ret); } } mutex_unlock(&head->mutex); return 0; } if (node->type == BTRFS_TREE_BLOCK_REF_KEY || node->type == BTRFS_SHARED_BLOCK_REF_KEY) ret = run_delayed_tree_ref(trans, root, node, extent_op, insert_reserved); else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || node->type == BTRFS_SHARED_DATA_REF_KEY) ret = run_delayed_data_ref(trans, root, node, extent_op, insert_reserved); else BUG(); return ret; } static noinline struct btrfs_delayed_ref_node * select_delayed_ref(struct btrfs_delayed_ref_head *head) { struct rb_node *node; struct btrfs_delayed_ref_node *ref; int action = BTRFS_ADD_DELAYED_REF; again: /* * select delayed ref of type BTRFS_ADD_DELAYED_REF first. * this prevents ref count from going down to zero when * there still are pending delayed ref. */ node = rb_prev(&head->node.rb_node); while (1) { if (!node) break; ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); if (ref->bytenr != head->node.bytenr) break; if (ref->action == action) return ref; node = rb_prev(node); } if (action == BTRFS_ADD_DELAYED_REF) { action = BTRFS_DROP_DELAYED_REF; goto again; } return NULL; } static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct list_head *cluster) { struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_ref_head *locked_ref = NULL; struct btrfs_delayed_extent_op *extent_op; int ret; int count = 0; int must_insert_reserved = 0; delayed_refs = &trans->transaction->delayed_refs; while (1) { if (!locked_ref) { /* pick a new head ref from the cluster list */ if (list_empty(cluster)) break; locked_ref = list_entry(cluster->next, struct btrfs_delayed_ref_head, cluster); /* grab the lock that says we are going to process * all the refs for this head */ ret = btrfs_delayed_ref_lock(trans, locked_ref); /* * we may have dropped the spin lock to get the head * mutex lock, and that might have given someone else * time to free the head. If that's true, it has been * removed from our list and we can move on. */ if (ret == -EAGAIN) { locked_ref = NULL; count++; continue; } } /* * record the must insert reserved flag before we * drop the spin lock. */ must_insert_reserved = locked_ref->must_insert_reserved; locked_ref->must_insert_reserved = 0; extent_op = locked_ref->extent_op; locked_ref->extent_op = NULL; /* * locked_ref is the head node, so we have to go one * node back for any delayed ref updates */ ref = select_delayed_ref(locked_ref); if (!ref) { /* All delayed refs have been processed, Go ahead * and send the head node to run_one_delayed_ref, * so that any accounting fixes can happen */ ref = &locked_ref->node; if (extent_op && must_insert_reserved) { kfree(extent_op); extent_op = NULL; } if (extent_op) { spin_unlock(&delayed_refs->lock); ret = run_delayed_extent_op(trans, root, ref, extent_op); BUG_ON(ret); kfree(extent_op); cond_resched(); spin_lock(&delayed_refs->lock); continue; } list_del_init(&locked_ref->cluster); locked_ref = NULL; } ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; spin_unlock(&delayed_refs->lock); ret = run_one_delayed_ref(trans, root, ref, extent_op, must_insert_reserved); BUG_ON(ret); btrfs_put_delayed_ref(ref); kfree(extent_op); count++; cond_resched(); spin_lock(&delayed_refs->lock); } return count; } /* * this starts processing the delayed reference count updates and * extent insertions we have queued up so far. count can be * 0, which means to process everything in the tree at the start * of the run (but not newly added entries), or it can be some target * number you'd like to process. */ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count) { struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct list_head cluster; int ret; int run_all = count == (unsigned long)-1; int run_most = 0; if (root == root->fs_info->extent_root) root = root->fs_info->tree_root; delayed_refs = &trans->transaction->delayed_refs; INIT_LIST_HEAD(&cluster); again: spin_lock(&delayed_refs->lock); if (count == 0) { count = delayed_refs->num_entries * 2; run_most = 1; } while (1) { if (!(run_all || run_most) && delayed_refs->num_heads_ready < 64) break; /* * go find something we can process in the rbtree. We start at * the beginning of the tree, and then build a cluster * of refs to process starting at the first one we are able to * lock */ ret = btrfs_find_ref_cluster(trans, &cluster, delayed_refs->run_delayed_start); if (ret) break; ret = run_clustered_refs(trans, root, &cluster); BUG_ON(ret < 0); count -= min_t(unsigned long, ret, count); if (count == 0) break; } if (run_all) { node = rb_first(&delayed_refs->root); if (!node) goto out; count = (unsigned long)-1; while (node) { ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); if (btrfs_delayed_ref_is_head(ref)) { struct btrfs_delayed_ref_head *head; head = btrfs_delayed_node_to_head(ref); atomic_inc(&ref->refs); spin_unlock(&delayed_refs->lock); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(ref); cond_resched(); goto again; } node = rb_next(node); } spin_unlock(&delayed_refs->lock); schedule_timeout(1); goto again; } out: spin_unlock(&delayed_refs->lock); return 0; } int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 flags, int is_data) { struct btrfs_delayed_extent_op *extent_op; int ret; extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); if (!extent_op) return -ENOMEM; extent_op->flags_to_set = flags; extent_op->update_flags = 1; extent_op->update_key = 0; extent_op->is_data = is_data ? 1 : 0; ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op); if (ret) kfree(extent_op); return ret; } static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_data_ref *data_ref; struct btrfs_delayed_ref_root *delayed_refs; struct rb_node *node; int ret = 0; ret = -ENOENT; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (!head) goto out; if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(root->fs_info->extent_root, path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); return -EAGAIN; } node = rb_prev(&head->node.rb_node); if (!node) goto out_unlock; ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); if (ref->bytenr != bytenr) goto out_unlock; ret = 1; if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) goto out_unlock; data_ref = btrfs_delayed_node_to_data_ref(ref); node = rb_prev(node); if (node) { ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); if (ref->bytenr == bytenr) goto out_unlock; } if (data_ref->root != root->root_key.objectid || data_ref->objectid != objectid || data_ref->offset != offset) goto out_unlock; ret = 0; out_unlock: mutex_unlock(&head->mutex); out: spin_unlock(&delayed_refs->lock); return ret; } static noinline int check_committed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { struct btrfs_root *extent_root = root->fs_info->extent_root; struct extent_buffer *leaf; struct btrfs_extent_data_ref *ref; struct btrfs_extent_inline_ref *iref; struct btrfs_extent_item *ei; struct btrfs_key key; u32 item_size; int ret; key.objectid = bytenr; key.offset = (u64)-1; key.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); ret = -ENOENT; if (path->slots[0] == 0) goto out; path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) goto out; ret = 1; item_size = btrfs_item_size_nr(leaf, path->slots[0]); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); goto out; } #endif ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); if (item_size != sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; if (btrfs_extent_generation(leaf, ei) <= btrfs_root_last_snapshot(&root->root_item)) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); if (btrfs_extent_inline_ref_type(leaf, iref) != BTRFS_EXTENT_DATA_REF_KEY) goto out; ref = (struct btrfs_extent_data_ref *)(&iref->offset); if (btrfs_extent_refs(leaf, ei) != btrfs_extent_data_ref_count(leaf, ref) || btrfs_extent_data_ref_root(leaf, ref) != root->root_key.objectid || btrfs_extent_data_ref_objectid(leaf, ref) != objectid || btrfs_extent_data_ref_offset(leaf, ref) != offset) goto out; ret = 0; out: return ret; } int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr) { struct btrfs_path *path; int ret; int ret2; path = btrfs_alloc_path(); if (!path) return -ENOENT; do { ret = check_committed_ref(trans, root, path, objectid, offset, bytenr); if (ret && ret != -ENOENT) goto out; ret2 = check_delayed_ref(trans, root, path, objectid, offset, bytenr); } while (ret2 == -EAGAIN); if (ret2 && ret2 != -ENOENT) { ret = ret2; goto out; } if (ret != -ENOENT || ret2 != -ENOENT) ret = 0; out: btrfs_free_path(path); if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) WARN_ON(ret > 0); return ret; } #if 0 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, u32 nr_extents) { struct btrfs_key key; struct btrfs_file_extent_item *fi; u64 root_gen; u32 nritems; int i; int level; int ret = 0; int shared = 0; if (!root->ref_cows) return 0; if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { shared = 0; root_gen = root->root_key.offset; } else { shared = 1; root_gen = trans->transid - 1; } level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); if (level == 0) { struct btrfs_leaf_ref *ref; struct btrfs_extent_info *info; ref = btrfs_alloc_leaf_ref(root, nr_extents); if (!ref) { ret = -ENOMEM; goto out; } ref->root_gen = root_gen; ref->bytenr = buf->start; ref->owner = btrfs_header_owner(buf); ref->generation = btrfs_header_generation(buf); ref->nritems = nr_extents; info = ref->extents; for (i = 0; nr_extents > 0 && i < nritems; i++) { u64 disk_bytenr; btrfs_item_key_to_cpu(buf, &key, i); if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(buf, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(buf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (disk_bytenr == 0) continue; info->bytenr = disk_bytenr; info->num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); info->objectid = key.objectid; info->offset = key.offset; info++; } ret = btrfs_add_leaf_ref(root, ref, shared); if (ret == -EEXIST && shared) { struct btrfs_leaf_ref *old; old = btrfs_lookup_leaf_ref(root, ref->bytenr); BUG_ON(!old); btrfs_remove_leaf_ref(root, old); btrfs_free_leaf_ref(root, old); ret = btrfs_add_leaf_ref(root, ref, shared); } WARN_ON(ret); btrfs_free_leaf_ref(root, ref); } out: return ret; } /* when a block goes through cow, we update the reference counts of * everything that block points to. The internal pointers of the block * can be in just about any order, and it is likely to have clusters of * things that are close together and clusters of things that are not. * * To help reduce the seeks that come with updating all of these reference * counts, sort them by byte number before actual updates are done. * * struct refsort is used to match byte number to slot in the btree block. * we sort based on the byte number and then use the slot to actually * find the item. * * struct refsort is smaller than strcut btrfs_item and smaller than * struct btrfs_key_ptr. Since we're currently limited to the page size * for a btree block, there's no way for a kmalloc of refsorts for a * single node to be bigger than a page. */ struct refsort { u64 bytenr; u32 slot; }; /* * for passing into sort() */ static int refsort_cmp(const void *a_void, const void *b_void) { const struct refsort *a = a_void; const struct refsort *b = b_void; if (a->bytenr < b->bytenr) return -1; if (a->bytenr > b->bytenr) return 1; return 0; } #endif static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref, int inc) { u64 bytenr; u64 num_bytes; u64 parent; u64 ref_root; u32 nritems; struct btrfs_key key; struct btrfs_file_extent_item *fi; int i; int level; int ret = 0; int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, u64, u64, u64, u64, u64, u64); ref_root = btrfs_header_owner(buf); nritems = btrfs_header_nritems(buf); level = btrfs_header_level(buf); if (!root->ref_cows && level == 0) return 0; if (inc) process_func = btrfs_inc_extent_ref; else process_func = btrfs_free_extent; if (full_backref) parent = buf->start; else parent = 0; for (i = 0; i < nritems; i++) { if (level == 0) { btrfs_item_key_to_cpu(buf, &key, i); if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(buf, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(buf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (bytenr == 0) continue; num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); key.offset -= btrfs_file_extent_offset(buf, fi); ret = process_func(trans, root, bytenr, num_bytes, parent, ref_root, key.objectid, key.offset); if (ret) goto fail; } else { bytenr = btrfs_node_blockptr(buf, i); num_bytes = btrfs_level_size(root, level - 1); ret = process_func(trans, root, bytenr, num_bytes, parent, ref_root, level - 1, 0); if (ret) goto fail; } } return 0; fail: BUG(); return ret; } int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref) { return __btrfs_mod_ref(trans, root, buf, full_backref, 1); } int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref) { return __btrfs_mod_ref(trans, root, buf, full_backref, 0); } static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_block_group_cache *cache) { int ret; struct btrfs_root *extent_root = root->fs_info->extent_root; unsigned long bi; struct extent_buffer *leaf; ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); if (ret < 0) goto fail; BUG_ON(ret); leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(extent_root, path); fail: if (ret) return ret; return 0; } static struct btrfs_block_group_cache * next_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { struct rb_node *node; spin_lock(&root->fs_info->block_group_cache_lock); node = rb_next(&cache->cache_node); btrfs_put_block_group(cache); if (node) { cache = rb_entry(node, struct btrfs_block_group_cache, cache_node); btrfs_get_block_group(cache); } else cache = NULL; spin_unlock(&root->fs_info->block_group_cache_lock); return cache; } int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_block_group_cache *cache; int err = 0; struct btrfs_path *path; u64 last = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { if (last == 0) { err = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(err); } cache = btrfs_lookup_first_block_group(root->fs_info, last); while (cache) { if (cache->dirty) break; cache = next_block_group(root, cache); } if (!cache) { if (last == 0) break; last = 0; continue; } cache->dirty = 0; last = cache->key.objectid + cache->key.offset; err = write_one_cache_group(trans, root, path, cache); BUG_ON(err); btrfs_put_block_group(cache); } btrfs_free_path(path); return 0; } int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) { struct btrfs_block_group_cache *block_group; int readonly = 0; block_group = btrfs_lookup_block_group(root->fs_info, bytenr); if (!block_group || block_group->ro) readonly = 1; if (block_group) btrfs_put_block_group(block_group); return readonly; } static int update_space_info(struct btrfs_fs_info *info, u64 flags, u64 total_bytes, u64 bytes_used, struct btrfs_space_info **space_info) { struct btrfs_space_info *found; int i; int factor; if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) factor = 2; else factor = 1; found = __find_space_info(info, flags); if (found) { spin_lock(&found->lock); found->total_bytes += total_bytes; found->bytes_used += bytes_used; found->disk_used += bytes_used * factor; found->full = 0; spin_unlock(&found->lock); *space_info = found; return 0; } found = kzalloc(sizeof(*found), GFP_NOFS); if (!found) return -ENOMEM; for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) INIT_LIST_HEAD(&found->block_groups[i]); init_rwsem(&found->groups_sem); spin_lock_init(&found->lock); found->flags = flags & (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA); found->total_bytes = total_bytes; found->bytes_used = bytes_used; found->disk_used = bytes_used * factor; found->bytes_pinned = 0; found->bytes_reserved = 0; found->bytes_readonly = 0; found->bytes_may_use = 0; found->full = 0; found->force_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); return 0; } static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) { u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_DUP); if (extra_flags) { if (flags & BTRFS_BLOCK_GROUP_DATA) fs_info->avail_data_alloc_bits |= extra_flags; if (flags & BTRFS_BLOCK_GROUP_METADATA) fs_info->avail_metadata_alloc_bits |= extra_flags; if (flags & BTRFS_BLOCK_GROUP_SYSTEM) fs_info->avail_system_alloc_bits |= extra_flags; } } u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { u64 num_devices = root->fs_info->fs_devices->rw_devices; if (num_devices == 1) flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); if (num_devices < 4) flags &= ~BTRFS_BLOCK_GROUP_RAID10; if ((flags & BTRFS_BLOCK_GROUP_DUP) && (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))) { flags &= ~BTRFS_BLOCK_GROUP_DUP; } if ((flags & BTRFS_BLOCK_GROUP_RAID1) && (flags & BTRFS_BLOCK_GROUP_RAID10)) { flags &= ~BTRFS_BLOCK_GROUP_RAID1; } if ((flags & BTRFS_BLOCK_GROUP_RAID0) && ((flags & BTRFS_BLOCK_GROUP_RAID1) | (flags & BTRFS_BLOCK_GROUP_RAID10) | (flags & BTRFS_BLOCK_GROUP_DUP))) flags &= ~BTRFS_BLOCK_GROUP_RAID0; return flags; } static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) { if (flags & BTRFS_BLOCK_GROUP_DATA) flags |= root->fs_info->avail_data_alloc_bits & root->fs_info->data_alloc_profile; else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) flags |= root->fs_info->avail_system_alloc_bits & root->fs_info->system_alloc_profile; else if (flags & BTRFS_BLOCK_GROUP_METADATA) flags |= root->fs_info->avail_metadata_alloc_bits & root->fs_info->metadata_alloc_profile; return btrfs_reduce_alloc_profile(root, flags); } static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) { u64 flags; if (data) flags = BTRFS_BLOCK_GROUP_DATA; else if (root == root->fs_info->chunk_root) flags = BTRFS_BLOCK_GROUP_SYSTEM; else flags = BTRFS_BLOCK_GROUP_METADATA; return get_alloc_profile(root, flags); } void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) { BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_DATA); } /* * This will check the space that the inode allocates from to make sure we have * enough space for bytes. */ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) { struct btrfs_space_info *data_sinfo; struct btrfs_root *root = BTRFS_I(inode)->root; u64 used; int ret = 0, committed = 0; /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); data_sinfo = BTRFS_I(inode)->space_info; if (!data_sinfo) goto alloc; again: /* make sure we have enough space to handle the data first */ spin_lock(&data_sinfo->lock); used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + data_sinfo->bytes_may_use; if (used + bytes > data_sinfo->total_bytes) { struct btrfs_trans_handle *trans; /* * if we don't have enough free bytes in this space then we need * to alloc a new chunk. */ if (!data_sinfo->full) { u64 alloc_target; data_sinfo->force_alloc = 1; spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); ret = do_chunk_alloc(trans, root->fs_info->extent_root, bytes + 2 * 1024 * 1024, alloc_target, 0); btrfs_end_transaction(trans, root); if (ret < 0) return ret; if (!data_sinfo) { btrfs_set_inode_space_info(root, inode); data_sinfo = BTRFS_I(inode)->space_info; } goto again; } spin_unlock(&data_sinfo->lock); /* commit the current transaction and try again */ if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); if (ret) return ret; goto again; } #if 0 /* I hope we never need this code again, just in case */ printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " "%llu bytes_reserved, " "%llu bytes_pinned, " "%llu bytes_readonly, %llu may use %llu total\n", (unsigned long long)bytes, (unsigned long long)data_sinfo->bytes_used, (unsigned long long)data_sinfo->bytes_reserved, (unsigned long long)data_sinfo->bytes_pinned, (unsigned long long)data_sinfo->bytes_readonly, (unsigned long long)data_sinfo->bytes_may_use, (unsigned long long)data_sinfo->total_bytes); #endif return -ENOSPC; } data_sinfo->bytes_may_use += bytes; BTRFS_I(inode)->reserved_bytes += bytes; spin_unlock(&data_sinfo->lock); return 0; } /* * called when we are clearing an delalloc extent from the * inode's io_tree or there was an error for whatever reason * after calling btrfs_check_data_free_space */ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_space_info *data_sinfo; /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); data_sinfo = BTRFS_I(inode)->space_info; spin_lock(&data_sinfo->lock); data_sinfo->bytes_may_use -= bytes; BTRFS_I(inode)->reserved_bytes -= bytes; spin_unlock(&data_sinfo->lock); } static void force_metadata_allocation(struct btrfs_fs_info *info) { struct list_head *head = &info->space_info; struct btrfs_space_info *found; rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags & BTRFS_BLOCK_GROUP_METADATA) found->force_alloc = 1; } rcu_read_unlock(); } static int should_alloc_chunk(struct btrfs_space_info *sinfo, u64 alloc_bytes) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; if (sinfo->bytes_used + sinfo->bytes_reserved + alloc_bytes + 256 * 1024 * 1024 < num_bytes) return 0; if (sinfo->bytes_used + sinfo->bytes_reserved + alloc_bytes < div_factor(num_bytes, 8)) return 0; return 1; } static int do_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 alloc_bytes, u64 flags, int force) { struct btrfs_space_info *space_info; struct btrfs_fs_info *fs_info = extent_root->fs_info; int ret = 0; mutex_lock(&fs_info->chunk_mutex); flags = btrfs_reduce_alloc_profile(extent_root, flags); space_info = __find_space_info(extent_root->fs_info, flags); if (!space_info) { ret = update_space_info(extent_root->fs_info, flags, 0, 0, &space_info); BUG_ON(ret); } BUG_ON(!space_info); spin_lock(&space_info->lock); if (space_info->force_alloc) force = 1; if (space_info->full) { spin_unlock(&space_info->lock); goto out; } if (!force && !should_alloc_chunk(space_info, alloc_bytes)) { spin_unlock(&space_info->lock); goto out; } spin_unlock(&space_info->lock); /* * if we're doing a data chunk, go ahead and make sure that * we keep a reasonable number of metadata chunks allocated in the * FS as well. */ if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { fs_info->data_chunk_allocations++; if (!(fs_info->data_chunk_allocations % fs_info->metadata_ratio)) force_metadata_allocation(fs_info); } ret = btrfs_alloc_chunk(trans, extent_root, flags); spin_lock(&space_info->lock); if (ret) space_info->full = 1; else ret = 1; space_info->force_alloc = 0; spin_unlock(&space_info->lock); out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; } static int maybe_allocate_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_space_info *sinfo, u64 num_bytes) { int ret; int end_trans = 0; if (sinfo->full) return 0; spin_lock(&sinfo->lock); ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024); spin_unlock(&sinfo->lock); if (!ret) return 0; if (!trans) { trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); end_trans = 1; } ret = do_chunk_alloc(trans, root->fs_info->extent_root, num_bytes + 2 * 1024 * 1024, get_alloc_profile(root, sinfo->flags), 0); if (end_trans) btrfs_end_transaction(trans, root); return ret == 1 ? 1 : 0; } /* * shrink metadata reservation for delalloc */ static int shrink_delalloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 to_reclaim) { struct btrfs_block_rsv *block_rsv; u64 reserved; u64 max_reclaim; u64 reclaimed = 0; int pause = 1; int ret; block_rsv = &root->fs_info->delalloc_block_rsv; spin_lock(&block_rsv->lock); reserved = block_rsv->reserved; spin_unlock(&block_rsv->lock); if (reserved == 0) return 0; max_reclaim = min(reserved, to_reclaim); while (1) { ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0); if (!ret) { __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(pause); pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; } else { pause = 1; } spin_lock(&block_rsv->lock); if (reserved > block_rsv->reserved) reclaimed = reserved - block_rsv->reserved; reserved = block_rsv->reserved; spin_unlock(&block_rsv->lock); if (reserved == 0 || reclaimed >= max_reclaim) break; if (trans && trans->transaction->blocked) return -EAGAIN; } return reclaimed >= to_reclaim; } static int should_retry_reserve(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes, int *retries) { struct btrfs_space_info *space_info = block_rsv->space_info; int ret; if ((*retries) > 2) return -ENOSPC; ret = maybe_allocate_chunk(trans, root, space_info, num_bytes); if (ret) return 1; if (trans && trans->transaction->in_commit) return -ENOSPC; ret = shrink_delalloc(trans, root, num_bytes); if (ret) return ret; spin_lock(&space_info->lock); if (space_info->bytes_pinned < num_bytes) ret = 1; spin_unlock(&space_info->lock); if (ret) return -ENOSPC; (*retries)++; if (trans) return -EAGAIN; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); return 1; } static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes) { struct btrfs_space_info *space_info = block_rsv->space_info; u64 unused; int ret = -ENOSPC; spin_lock(&space_info->lock); unused = space_info->bytes_used + space_info->bytes_reserved + space_info->bytes_pinned + space_info->bytes_readonly; if (unused < space_info->total_bytes) unused = space_info->total_bytes - unused; else unused = 0; if (unused >= num_bytes) { if (block_rsv->priority >= 10) { space_info->bytes_reserved += num_bytes; ret = 0; } else { if ((unused + block_rsv->reserved) * block_rsv->priority >= (num_bytes + block_rsv->reserved) * 10) { space_info->bytes_reserved += num_bytes; ret = 0; } } } spin_unlock(&space_info->lock); return ret; } static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_block_rsv *block_rsv; if (root->ref_cows) block_rsv = trans->block_rsv; else block_rsv = root->block_rsv; if (!block_rsv) block_rsv = &root->fs_info->empty_block_rsv; return block_rsv; } static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes) { int ret = -ENOSPC; spin_lock(&block_rsv->lock); if (block_rsv->reserved >= num_bytes) { block_rsv->reserved -= num_bytes; if (block_rsv->reserved < block_rsv->size) block_rsv->full = 0; ret = 0; } spin_unlock(&block_rsv->lock); return ret; } static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes, int update_size) { spin_lock(&block_rsv->lock); block_rsv->reserved += num_bytes; if (update_size) block_rsv->size += num_bytes; else if (block_rsv->reserved >= block_rsv->size) block_rsv->full = 1; spin_unlock(&block_rsv->lock); } void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, struct btrfs_block_rsv *dest, u64 num_bytes) { struct btrfs_space_info *space_info = block_rsv->space_info; spin_lock(&block_rsv->lock); if (num_bytes == (u64)-1) num_bytes = block_rsv->size; block_rsv->size -= num_bytes; if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } else { num_bytes = 0; } spin_unlock(&block_rsv->lock); if (num_bytes > 0) { if (dest) { block_rsv_add_bytes(dest, num_bytes, 0); } else { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); } } } static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, struct btrfs_block_rsv *dst, u64 num_bytes) { int ret; ret = block_rsv_use_bytes(src, num_bytes); if (ret) return ret; block_rsv_add_bytes(dst, num_bytes, 1); return 0; } void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) { memset(rsv, 0, sizeof(*rsv)); spin_lock_init(&rsv->lock); atomic_set(&rsv->usage, 1); rsv->priority = 6; INIT_LIST_HEAD(&rsv->list); } struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) { struct btrfs_block_rsv *block_rsv; struct btrfs_fs_info *fs_info = root->fs_info; u64 alloc_target; block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); if (!block_rsv) return NULL; btrfs_init_block_rsv(block_rsv); alloc_target = btrfs_get_alloc_profile(root, 0); block_rsv->space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); return block_rsv; } void btrfs_free_block_rsv(struct btrfs_root *root, struct btrfs_block_rsv *rsv) { if (rsv && atomic_dec_and_test(&rsv->usage)) { btrfs_block_rsv_release(root, rsv, (u64)-1); if (!rsv->durable) kfree(rsv); } } /* * make the block_rsv struct be able to capture freed space. * the captured space will re-add to the the block_rsv struct * after transaction commit */ void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv) { block_rsv->durable = 1; mutex_lock(&fs_info->durable_block_rsv_mutex); list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list); mutex_unlock(&fs_info->durable_block_rsv_mutex); } int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes, int *retries) { int ret; if (num_bytes == 0) return 0; again: ret = reserve_metadata_bytes(block_rsv, num_bytes); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 1); return 0; } ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries); if (ret > 0) goto again; return ret; } int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 min_reserved, int min_factor) { u64 num_bytes = 0; int commit_trans = 0; int ret = -ENOSPC; if (!block_rsv) return 0; spin_lock(&block_rsv->lock); if (min_factor > 0) num_bytes = div_factor(block_rsv->size, min_factor); if (min_reserved > num_bytes) num_bytes = min_reserved; if (block_rsv->reserved >= num_bytes) { ret = 0; } else { num_bytes -= block_rsv->reserved; if (block_rsv->durable && block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes) commit_trans = 1; } spin_unlock(&block_rsv->lock); if (!ret) return 0; if (block_rsv->refill_used) { ret = reserve_metadata_bytes(block_rsv, num_bytes); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 0); return 0; } } if (commit_trans) { if (trans) return -EAGAIN; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); return 0; } WARN_ON(1); printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", block_rsv->size, block_rsv->reserved, block_rsv->freed[0], block_rsv->freed[1]); return -ENOSPC; } int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, struct btrfs_block_rsv *dst_rsv, u64 num_bytes) { return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_block_rsv_release(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes) { struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; if (global_rsv->full || global_rsv == block_rsv || block_rsv->space_info != global_rsv->space_info) global_rsv = NULL; block_rsv_release_bytes(block_rsv, global_rsv, num_bytes); } /* * helper to calculate size of global block reservation. * the desired value is sum of space used by extent tree, * checksum tree and root tree */ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *sinfo; u64 num_bytes; u64 meta_used; u64 data_used; int csum_size = btrfs_super_csum_size(&fs_info->super_copy); #if 0 /* * per tree used space accounting can be inaccuracy, so we * can't rely on it. */ spin_lock(&fs_info->extent_root->accounting_lock); num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); spin_unlock(&fs_info->extent_root->accounting_lock); spin_lock(&fs_info->csum_root->accounting_lock); num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); spin_unlock(&fs_info->csum_root->accounting_lock); spin_lock(&fs_info->tree_root->accounting_lock); num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); spin_unlock(&fs_info->tree_root->accounting_lock); #endif sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); spin_lock(&sinfo->lock); data_used = sinfo->bytes_used; spin_unlock(&sinfo->lock); sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); spin_lock(&sinfo->lock); meta_used = sinfo->bytes_used; spin_unlock(&sinfo->lock); num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * csum_size * 2; num_bytes += div64_u64(data_used + meta_used, 50); if (num_bytes * 3 > meta_used) num_bytes = div64_u64(meta_used, 3); return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); } static void update_global_block_rsv(struct btrfs_fs_info *fs_info) { struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; struct btrfs_space_info *sinfo = block_rsv->space_info; u64 num_bytes; num_bytes = calc_global_metadata_size(fs_info); spin_lock(&block_rsv->lock); spin_lock(&sinfo->lock); block_rsv->size = num_bytes; num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + sinfo->bytes_reserved + sinfo->bytes_readonly; if (sinfo->total_bytes > num_bytes) { num_bytes = sinfo->total_bytes - num_bytes; block_rsv->reserved += num_bytes; sinfo->bytes_reserved += num_bytes; } if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; sinfo->bytes_reserved -= num_bytes; block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } #if 0 printk(KERN_INFO"global block rsv size %llu reserved %llu\n", block_rsv->size, block_rsv->reserved); #endif spin_unlock(&sinfo->lock); spin_unlock(&block_rsv->lock); } static void init_global_block_rsv(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); fs_info->chunk_block_rsv.space_info = space_info; fs_info->chunk_block_rsv.priority = 10; space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); fs_info->global_block_rsv.space_info = space_info; fs_info->global_block_rsv.priority = 10; fs_info->global_block_rsv.refill_used = 1; fs_info->delalloc_block_rsv.space_info = space_info; fs_info->trans_block_rsv.space_info = space_info; fs_info->empty_block_rsv.space_info = space_info; fs_info->empty_block_rsv.priority = 10; fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv); btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv); update_global_block_rsv(fs_info); } static void release_global_block_rsv(struct btrfs_fs_info *fs_info) { block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1); WARN_ON(fs_info->delalloc_block_rsv.size > 0); WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); WARN_ON(fs_info->trans_block_rsv.size > 0); WARN_ON(fs_info->trans_block_rsv.reserved > 0); WARN_ON(fs_info->chunk_block_rsv.size > 0); WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) { return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3 * num_items; } int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items, int *retries) { u64 num_bytes; int ret; if (num_items == 0 || root->fs_info->chunk_root == root) return 0; num_bytes = calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, num_bytes, retries); if (!ret) { trans->bytes_reserved += num_bytes; trans->block_rsv = &root->fs_info->trans_block_rsv; } return ret; } void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (!trans->bytes_reserved) return; BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv); btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); trans->bytes_reserved = 0; } int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; /* * one for deleting orphan item, one for updating inode and * two for calling btrfs_truncate_inode_items. * * btrfs_truncate_inode_items is a delete operation, it frees * more space than it uses in most cases. So two units of * metadata space should be enough for calling it many times. * If all of the metadata space is used, we can commit * transaction and use space it freed. */ u64 num_bytes = calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 num_bytes = calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending) { struct btrfs_root *root = pending->root; struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; /* * two for root back/forward refs, two for directory entries * and one for root of the snapshot. */ u64 num_bytes = calc_trans_metadata_size(root, 5); dst_rsv->space_info = src_rsv->space_info; return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes) { return num_bytes >>= 3; } int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; u64 to_reserve; int nr_extents; int retries = 0; int ret; if (btrfs_transaction_in_commit(root->fs_info)) schedule_timeout(1); num_bytes = ALIGN(num_bytes, root->sectorsize); again: spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; if (nr_extents > BTRFS_I(inode)->reserved_extents) { nr_extents -= BTRFS_I(inode)->reserved_extents; to_reserve = calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; } to_reserve += calc_csum_metadata_size(inode, num_bytes); ret = reserve_metadata_bytes(block_rsv, to_reserve); if (ret) { spin_unlock(&BTRFS_I(inode)->accounting_lock); ret = should_retry_reserve(NULL, root, block_rsv, to_reserve, &retries); if (ret > 0) goto again; return ret; } BTRFS_I(inode)->reserved_extents += nr_extents; atomic_inc(&BTRFS_I(inode)->outstanding_extents); spin_unlock(&BTRFS_I(inode)->accounting_lock); block_rsv_add_bytes(block_rsv, to_reserve, 1); if (block_rsv->size > 512 * 1024 * 1024) shrink_delalloc(NULL, root, to_reserve); return 0; } void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 to_free; int nr_extents; num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); if (nr_extents < BTRFS_I(inode)->reserved_extents) { nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; BTRFS_I(inode)->reserved_extents -= nr_extents; } else { nr_extents = 0; } spin_unlock(&BTRFS_I(inode)->accounting_lock); to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) to_free += calc_trans_metadata_size(root, nr_extents); btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, to_free); } int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) { int ret; ret = btrfs_check_data_free_space(inode, num_bytes); if (ret) return ret; ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); if (ret) { btrfs_free_reserved_data_space(inode, num_bytes); return ret; } return 0; } void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) { btrfs_delalloc_release_metadata(inode, num_bytes); btrfs_free_reserved_data_space(inode, num_bytes); } static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc) { struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; int factor; u64 total = num_bytes; u64 old_val; u64 byte_in_group; /* block accounting for super block */ spin_lock(&info->delalloc_lock); old_val = btrfs_super_bytes_used(&info->super_copy); if (alloc) old_val += num_bytes; else old_val -= num_bytes; btrfs_set_super_bytes_used(&info->super_copy, old_val); spin_unlock(&info->delalloc_lock); while (total) { cache = btrfs_lookup_block_group(info, bytenr); if (!cache) return -1; if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) factor = 2; else factor = 1; byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { old_val += num_bytes; btrfs_set_block_group_used(&cache->item, old_val); cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; cache->space_info->bytes_used += num_bytes; cache->space_info->disk_used += num_bytes * factor; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); } else { old_val -= num_bytes; btrfs_set_block_group_used(&cache->item, old_val); cache->pinned += num_bytes; cache->space_info->bytes_pinned += num_bytes; cache->space_info->bytes_used -= num_bytes; cache->space_info->disk_used -= num_bytes * factor; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); set_extent_dirty(info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); } btrfs_put_block_group(cache); total -= num_bytes; bytenr += num_bytes; } return 0; } static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) { struct btrfs_block_group_cache *cache; u64 bytenr; cache = btrfs_lookup_first_block_group(root->fs_info, search_start); if (!cache) return 0; bytenr = cache->key.objectid; btrfs_put_block_group(cache); return bytenr; } static int pin_down_extent(struct btrfs_root *root, struct btrfs_block_group_cache *cache, u64 bytenr, u64 num_bytes, int reserved) { spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned += num_bytes; cache->space_info->bytes_pinned += num_bytes; if (reserved) { cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); set_extent_dirty(root->fs_info->pinned_extents, bytenr, bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); return 0; } /* * this function must be called within transaction */ int btrfs_pin_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_block_group_cache *cache; cache = btrfs_lookup_block_group(root->fs_info, bytenr); BUG_ON(!cache); pin_down_extent(root, cache, bytenr, num_bytes, reserved); btrfs_put_block_group(cache); return 0; } /* * update size of reserved extents. this function may return -EAGAIN * if 'reserve' is true or 'sinfo' is false. */ static int update_reserved_bytes(struct btrfs_block_group_cache *cache, u64 num_bytes, int reserve, int sinfo) { int ret = 0; if (sinfo) { struct btrfs_space_info *space_info = cache->space_info; spin_lock(&space_info->lock); spin_lock(&cache->lock); if (reserve) { if (cache->ro) { ret = -EAGAIN; } else { cache->reserved += num_bytes; space_info->bytes_reserved += num_bytes; } } else { if (cache->ro) space_info->bytes_readonly += num_bytes; cache->reserved -= num_bytes; space_info->bytes_reserved -= num_bytes; } spin_unlock(&cache->lock); spin_unlock(&space_info->lock); } else { spin_lock(&cache->lock); if (cache->ro) { ret = -EAGAIN; } else { if (reserve) cache->reserved += num_bytes; else cache->reserved -= num_bytes; } spin_unlock(&cache->lock); } return ret; } int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_caching_control *next; struct btrfs_caching_control *caching_ctl; struct btrfs_block_group_cache *cache; down_write(&fs_info->extent_commit_sem); list_for_each_entry_safe(caching_ctl, next, &fs_info->caching_block_groups, list) { cache = caching_ctl->block_group; if (block_group_cache_done(cache)) { cache->last_byte_to_unpin = (u64)-1; list_del_init(&caching_ctl->list); put_caching_control(caching_ctl); } else { cache->last_byte_to_unpin = caching_ctl->progress; } } if (fs_info->pinned_extents == &fs_info->freed_extents[0]) fs_info->pinned_extents = &fs_info->freed_extents[1]; else fs_info->pinned_extents = &fs_info->freed_extents[0]; up_write(&fs_info->extent_commit_sem); update_global_block_rsv(fs_info); return 0; } static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_group_cache *cache = NULL; u64 len; while (start <= end) { if (!cache || start >= cache->key.objectid + cache->key.offset) { if (cache) btrfs_put_block_group(cache); cache = btrfs_lookup_block_group(fs_info, start); BUG_ON(!cache); } len = cache->key.objectid + cache->key.offset - start; len = min(len, end + 1 - start); if (start < cache->last_byte_to_unpin) { len = min(len, cache->last_byte_to_unpin - start); btrfs_add_free_space(cache, start, len); } start += len; spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned -= len; cache->space_info->bytes_pinned -= len; if (cache->ro) { cache->space_info->bytes_readonly += len; } else if (cache->reserved_pinned > 0) { len = min(len, cache->reserved_pinned); cache->reserved_pinned -= len; cache->space_info->bytes_reserved += len; } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); } if (cache) btrfs_put_block_group(cache); return 0; } int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_io_tree *unpin; struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *next_rsv; u64 start; u64 end; int idx; int ret; if (fs_info->pinned_extents == &fs_info->freed_extents[0]) unpin = &fs_info->freed_extents[1]; else unpin = &fs_info->freed_extents[0]; while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); if (ret) break; ret = btrfs_discard_extent(root, start, end + 1 - start); clear_extent_dirty(unpin, start, end, GFP_NOFS); unpin_extent_range(root, start, end); cond_resched(); } mutex_lock(&fs_info->durable_block_rsv_mutex); list_for_each_entry_safe(block_rsv, next_rsv, &fs_info->durable_block_rsv_list, list) { idx = trans->transid & 0x1; if (block_rsv->freed[idx] > 0) { block_rsv_add_bytes(block_rsv, block_rsv->freed[idx], 0); block_rsv->freed[idx] = 0; } if (atomic_read(&block_rsv->usage) == 0) { btrfs_block_rsv_release(root, block_rsv, (u64)-1); if (block_rsv->freed[0] == 0 && block_rsv->freed[1] == 0) { list_del_init(&block_rsv->list); kfree(block_rsv); } } else { btrfs_block_rsv_release(root, block_rsv, 0); } } mutex_unlock(&fs_info->durable_block_rsv_mutex); return 0; } static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner_objectid, u64 owner_offset, int refs_to_drop, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_key key; struct btrfs_path *path; struct btrfs_fs_info *info = root->fs_info; struct btrfs_root *extent_root = info->extent_root; struct extent_buffer *leaf; struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; int ret; int is_data; int extent_slot = 0; int found_extent = 0; int num_to_del = 1; u32 item_size; u64 refs; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 1; path->leave_spinning = 1; is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; BUG_ON(!is_data && refs_to_drop != 1); ret = lookup_extent_backref(trans, extent_root, path, &iref, bytenr, num_bytes, parent, root_objectid, owner_objectid, owner_offset); if (ret == 0) { extent_slot = path->slots[0]; while (extent_slot >= 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, extent_slot); if (key.objectid != bytenr) break; if (key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == num_bytes) { found_extent = 1; break; } if (path->slots[0] - extent_slot > 5) break; extent_slot--; } #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); if (found_extent && item_size < sizeof(*ei)) found_extent = 0; #endif if (!found_extent) { BUG_ON(iref); ret = remove_extent_backref(trans, extent_root, path, NULL, refs_to_drop, is_data); BUG_ON(ret); btrfs_release_path(extent_root, path); path->leave_spinning = 1; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); if (ret) { printk(KERN_ERR "umm, got %d back from search" ", was looking for %llu\n", ret, (unsigned long long)bytenr); btrfs_print_leaf(extent_root, path->nodes[0]); } BUG_ON(ret); extent_slot = path->slots[0]; } } else { btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); printk(KERN_ERR "btrfs unable to find ref byte nr %llu " "parent %llu root %llu owner %llu offset %llu\n", (unsigned long long)bytenr, (unsigned long long)parent, (unsigned long long)root_objectid, (unsigned long long)owner_objectid, (unsigned long long)owner_offset); } leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, extent_slot); #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (item_size < sizeof(*ei)) { BUG_ON(found_extent || extent_slot != path->slots[0]); ret = convert_extent_item_v0(trans, extent_root, path, owner_objectid, 0); BUG_ON(ret < 0); btrfs_release_path(extent_root, path); path->leave_spinning = 1; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); if (ret) { printk(KERN_ERR "umm, got %d back from search" ", was looking for %llu\n", ret, (unsigned long long)bytenr); btrfs_print_leaf(extent_root, path->nodes[0]); } BUG_ON(ret); extent_slot = path->slots[0]; leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, extent_slot); } #endif BUG_ON(item_size < sizeof(*ei)); ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item); if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { struct btrfs_tree_block_info *bi; BUG_ON(item_size < sizeof(*ei) + sizeof(*bi)); bi = (struct btrfs_tree_block_info *)(ei + 1); WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); } refs = btrfs_extent_refs(leaf, ei); BUG_ON(refs < refs_to_drop); refs -= refs_to_drop; if (refs > 0) { if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); /* * In the case of inline back ref, reference count will * be updated by remove_extent_backref */ if (iref) { BUG_ON(!found_extent); } else { btrfs_set_extent_refs(leaf, ei, refs); btrfs_mark_buffer_dirty(leaf); } if (found_extent) { ret = remove_extent_backref(trans, extent_root, path, iref, refs_to_drop, is_data); BUG_ON(ret); } } else { if (found_extent) { BUG_ON(is_data && refs_to_drop != extent_data_ref_count(root, path, iref)); if (iref) { BUG_ON(path->slots[0] != extent_slot); } else { BUG_ON(path->slots[0] != extent_slot + 1); path->slots[0] = extent_slot; num_to_del = 2; } } ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); BUG_ON(ret); btrfs_release_path(extent_root, path); if (is_data) { ret = btrfs_del_csums(trans, root, bytenr, num_bytes); BUG_ON(ret); } else { invalidate_mapping_pages(info->btree_inode->i_mapping, bytenr >> PAGE_CACHE_SHIFT, (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); } ret = update_block_group(trans, root, bytenr, num_bytes, 0); BUG_ON(ret); } btrfs_free_path(path); return ret; } /* * when we free an block, it is possible (and likely) that we free the last * delayed ref for that extent as well. This searches the delayed ref tree for * a given extent, and if there are no other delayed refs to be processed, it * removes it from the tree. */ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct rb_node *node; int ret = 0; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (!head) goto out; node = rb_prev(&head->node.rb_node); if (!node) goto out; ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); /* there are still entries for this ref, we can't drop it */ if (ref->bytenr == bytenr) goto out; if (head->extent_op) { if (!head->must_insert_reserved) goto out; kfree(head->extent_op); head->extent_op = NULL; } /* * waiting for the lock here would deadlock. If someone else has it * locked they are already in the process of dropping it anyway */ if (!mutex_trylock(&head->mutex)) goto out; /* * at this point we have a head with no other entries. Go * ahead and process it. */ head->node.in_tree = 0; rb_erase(&head->node.rb_node, &delayed_refs->root); delayed_refs->num_entries--; /* * we don't take a ref on the node because we're removing it from the * tree, so we just steal the ref the tree was holding. */ delayed_refs->num_heads--; if (list_empty(&head->cluster)) delayed_refs->num_heads_ready--; list_del_init(&head->cluster); spin_unlock(&delayed_refs->lock); BUG_ON(head->extent_op); if (head->must_insert_reserved) ret = 1; mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); return ret; out: spin_unlock(&delayed_refs->lock); return 0; } void btrfs_free_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, u64 parent, int last_ref) { struct btrfs_block_rsv *block_rsv; struct btrfs_block_group_cache *cache = NULL; int ret; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len, parent, root->root_key.objectid, btrfs_header_level(buf), BTRFS_DROP_DELAYED_REF, NULL); BUG_ON(ret); } if (!last_ref) return; block_rsv = get_block_rsv(trans, root); cache = btrfs_lookup_block_group(root->fs_info, buf->start); if (block_rsv->space_info != cache->space_info) goto out; if (btrfs_header_generation(buf) == trans->transid) { if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, root, buf->start); if (!ret) goto pin; } if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { pin_down_extent(root, cache, buf->start, buf->len, 1); goto pin; } WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); btrfs_add_free_space(cache, buf->start, buf->len); ret = update_reserved_bytes(cache, buf->len, 0, 0); if (ret == -EAGAIN) { /* block group became read-only */ update_reserved_bytes(cache, buf->len, 0, 1); goto out; } ret = 1; spin_lock(&block_rsv->lock); if (block_rsv->reserved < block_rsv->size) { block_rsv->reserved += buf->len; ret = 0; } spin_unlock(&block_rsv->lock); if (ret) { spin_lock(&cache->space_info->lock); cache->space_info->bytes_reserved -= buf->len; spin_unlock(&cache->space_info->lock); } goto out; } pin: if (block_rsv->durable && !cache->ro) { ret = 0; spin_lock(&cache->lock); if (!cache->ro) { cache->reserved_pinned += buf->len; ret = 1; } spin_unlock(&cache->lock); if (ret) { spin_lock(&block_rsv->lock); block_rsv->freed[trans->transid & 0x1] += buf->len; spin_unlock(&block_rsv->lock); } } out: btrfs_put_block_group(cache); } int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset) { int ret; /* * tree log blocks never actually go into the extent allocation * tree, just update pinning info and exit early. */ if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); /* unlocks the pinned mutex */ btrfs_pin_extent(root, bytenr, num_bytes, 1); ret = 0; } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, parent, root_objectid, (int)owner, BTRFS_DROP_DELAYED_REF, NULL); BUG_ON(ret); } else { ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, parent, root_objectid, owner, offset, BTRFS_DROP_DELAYED_REF, NULL); BUG_ON(ret); } return ret; } static u64 stripe_align(struct btrfs_root *root, u64 val) { u64 mask = ((u64)root->stripesize - 1); u64 ret = (val + mask) & ~mask; return ret; } /* * when we wait for progress in the block group caching, its because * our allocation attempt failed at least once. So, we must sleep * and let some progress happen before we try again. * * This function will sleep at least once waiting for new free space to * show up, and then it will check the block group free space numbers * for our min num_bytes. Another option is to have it go ahead * and look in the rbtree for a free extent of a given size, but this * is a good start. */ static noinline int wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, u64 num_bytes) { struct btrfs_caching_control *caching_ctl; DEFINE_WAIT(wait); caching_ctl = get_caching_control(cache); if (!caching_ctl) return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache) || (cache->free_space >= num_bytes)); put_caching_control(caching_ctl); return 0; } static noinline int wait_block_group_cache_done(struct btrfs_block_group_cache *cache) { struct btrfs_caching_control *caching_ctl; DEFINE_WAIT(wait); caching_ctl = get_caching_control(cache); if (!caching_ctl) return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache)); put_caching_control(caching_ctl); return 0; } static int get_block_group_index(struct btrfs_block_group_cache *cache) { int index; if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) index = 0; else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) index = 1; else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) index = 2; else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) index = 3; else index = 4; return index; } enum btrfs_loop_type { LOOP_FIND_IDEAL = 0, LOOP_CACHING_NOWAIT = 1, LOOP_CACHING_WAIT = 2, LOOP_ALLOC_CHUNK = 3, LOOP_NO_EMPTY_SIZE = 4, }; /* * walks the btree of allocated extents and find a hole of a given size. * The key ins is changed to record the hole: * ins->objectid == block start * ins->flags = BTRFS_EXTENT_ITEM_KEY * ins->offset == number of blocks * Any available blocks before search_start are skipped. */ static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *orig_root, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, u64 hint_byte, struct btrfs_key *ins, int data) { int ret = 0; struct btrfs_root *root = orig_root->fs_info->extent_root; struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_block_group_cache *block_group = NULL; int empty_cluster = 2 * 1024 * 1024; int allowed_chunk_alloc = 0; int done_chunk_alloc = 0; struct btrfs_space_info *space_info; int last_ptr_loop = 0; int loop = 0; int index = 0; bool found_uncached_bg = false; bool failed_cluster_refill = false; bool failed_alloc = false; u64 ideal_cache_percent = 0; u64 ideal_cache_offset = 0; WARN_ON(num_bytes < root->sectorsize); btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); ins->objectid = 0; ins->offset = 0; space_info = __find_space_info(root->fs_info, data); if (!space_info) { printk(KERN_ERR "No space info for %d\n", data); return -ENOSPC; } if (orig_root->ref_cows || empty_size) allowed_chunk_alloc = 1; if (data & BTRFS_BLOCK_GROUP_METADATA) { last_ptr = &root->fs_info->meta_alloc_cluster; if (!btrfs_test_opt(root, SSD)) empty_cluster = 64 * 1024; } if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) { last_ptr = &root->fs_info->data_alloc_cluster; } if (last_ptr) { spin_lock(&last_ptr->lock); if (last_ptr->block_group) hint_byte = last_ptr->window_start; spin_unlock(&last_ptr->lock); } search_start = max(search_start, first_logical_byte(root, 0)); search_start = max(search_start, hint_byte); if (!last_ptr) empty_cluster = 0; if (search_start == hint_byte) { ideal_cache: block_group = btrfs_lookup_block_group(root->fs_info, search_start); /* * we don't want to use the block group if it doesn't match our * allocation bits, or if its not cached. * * However if we are re-searching with an ideal block group * picked out then we don't care that the block group is cached. */ if (block_group && block_group_bits(block_group, data) && (block_group->cached != BTRFS_CACHE_NO || search_start == ideal_cache_offset)) { down_read(&space_info->groups_sem); if (list_empty(&block_group->list) || block_group->ro) { /* * someone is removing this block group, * we can't jump into the have_block_group * target because our list pointers are not * valid */ btrfs_put_block_group(block_group); up_read(&space_info->groups_sem); } else { index = get_block_group_index(block_group); goto have_block_group; } } else if (block_group) { btrfs_put_block_group(block_group); } } search: down_read(&space_info->groups_sem); list_for_each_entry(block_group, &space_info->block_groups[index], list) { u64 offset; int cached; btrfs_get_block_group(block_group); search_start = block_group->key.objectid; have_block_group: if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { u64 free_percent; free_percent = btrfs_block_group_used(&block_group->item); free_percent *= 100; free_percent = div64_u64(free_percent, block_group->key.offset); free_percent = 100 - free_percent; if (free_percent > ideal_cache_percent && likely(!block_group->ro)) { ideal_cache_offset = block_group->key.objectid; ideal_cache_percent = free_percent; } /* * We only want to start kthread caching if we are at * the point where we will wait for caching to make * progress, or if our ideal search is over and we've * found somebody to start caching. */ if (loop > LOOP_CACHING_NOWAIT || (loop > LOOP_FIND_IDEAL && atomic_read(&space_info->caching_threads) < 2)) { ret = cache_block_group(block_group); BUG_ON(ret); } found_uncached_bg = true; /* * If loop is set for cached only, try the next block * group. */ if (loop == LOOP_FIND_IDEAL) goto loop; } cached = block_group_cache_done(block_group); if (unlikely(!cached)) found_uncached_bg = true; if (unlikely(block_group->ro)) goto loop; /* * Ok we want to try and use the cluster allocator, so lets look * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will * have tried the cluster allocator plenty of times at this * point and not have found anything, so we are likely way too * fragmented for the clustering stuff to find anything, so lets * just skip it and let the allocator find whatever block it can * find */ if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { /* * the refill lock keeps out other * people trying to start a new cluster */ spin_lock(&last_ptr->refill_lock); if (last_ptr->block_group && (last_ptr->block_group->ro || !block_group_bits(last_ptr->block_group, data))) { offset = 0; goto refill_cluster; } offset = btrfs_alloc_from_cluster(block_group, last_ptr, num_bytes, search_start); if (offset) { /* we have a block, we're done */ spin_unlock(&last_ptr->refill_lock); goto checks; } spin_lock(&last_ptr->lock); /* * whoops, this cluster doesn't actually point to * this block group. Get a ref on the block * group is does point to and try again */ if (!last_ptr_loop && last_ptr->block_group && last_ptr->block_group != block_group) { btrfs_put_block_group(block_group); block_group = last_ptr->block_group; btrfs_get_block_group(block_group); spin_unlock(&last_ptr->lock); spin_unlock(&last_ptr->refill_lock); last_ptr_loop = 1; search_start = block_group->key.objectid; /* * we know this block group is properly * in the list because * btrfs_remove_block_group, drops the * cluster before it removes the block * group from the list */ goto have_block_group; } spin_unlock(&last_ptr->lock); refill_cluster: /* * this cluster didn't work out, free it and * start over */ btrfs_return_cluster_to_free_space(NULL, last_ptr); last_ptr_loop = 0; /* allocate a cluster in this block group */ ret = btrfs_find_space_cluster(trans, root, block_group, last_ptr, offset, num_bytes, empty_cluster + empty_size); if (ret == 0) { /* * now pull our allocation out of this * cluster */ offset = btrfs_alloc_from_cluster(block_group, last_ptr, num_bytes, search_start); if (offset) { /* we found one, proceed */ spin_unlock(&last_ptr->refill_lock); goto checks; } } else if (!cached && loop > LOOP_CACHING_NOWAIT && !failed_cluster_refill) { spin_unlock(&last_ptr->refill_lock); failed_cluster_refill = true; wait_block_group_cache_progress(block_group, num_bytes + empty_cluster + empty_size); goto have_block_group; } /* * at this point we either didn't find a cluster * or we weren't able to allocate a block from our * cluster. Free the cluster we've been trying * to use, and go to the next block group */ btrfs_return_cluster_to_free_space(NULL, last_ptr); spin_unlock(&last_ptr->refill_lock); goto loop; } offset = btrfs_find_space_for_alloc(block_group, search_start, num_bytes, empty_size); /* * If we didn't find a chunk, and we haven't failed on this * block group before, and this block group is in the middle of * caching and we are ok with waiting, then go ahead and wait * for progress to be made, and set failed_alloc to true. * * If failed_alloc is true then we've already waited on this * block group once and should move on to the next block group. */ if (!offset && !failed_alloc && !cached && loop > LOOP_CACHING_NOWAIT) { wait_block_group_cache_progress(block_group, num_bytes + empty_size); failed_alloc = true; goto have_block_group; } else if (!offset) { goto loop; } checks: search_start = stripe_align(root, offset); /* move on to the next group */ if (search_start + num_bytes >= search_end) { btrfs_add_free_space(block_group, offset, num_bytes); goto loop; } /* move on to the next group */ if (search_start + num_bytes > block_group->key.objectid + block_group->key.offset) { btrfs_add_free_space(block_group, offset, num_bytes); goto loop; } ins->objectid = search_start; ins->offset = num_bytes; if (offset < search_start) btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); ret = update_reserved_bytes(block_group, num_bytes, 1, (data & BTRFS_BLOCK_GROUP_DATA)); if (ret == -EAGAIN) { btrfs_add_free_space(block_group, offset, num_bytes); goto loop; } /* we are all good, lets return */ ins->objectid = search_start; ins->offset = num_bytes; if (offset < search_start) btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); break; loop: failed_cluster_refill = false; failed_alloc = false; BUG_ON(index != get_block_group_index(block_group)); btrfs_put_block_group(block_group); } up_read(&space_info->groups_sem); if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) goto search; /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for * for them to make caching progress. Also * determine the best possible bg to cache * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking * caching kthreads as we move along * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching * LOOP_ALLOC_CHUNK, force a chunk allocation and try again * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try * again */ if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && (found_uncached_bg || empty_size || empty_cluster || allowed_chunk_alloc)) { index = 0; if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { found_uncached_bg = false; loop++; if (!ideal_cache_percent && atomic_read(&space_info->caching_threads)) goto search; /* * 1 of the following 2 things have happened so far * * 1) We found an ideal block group for caching that * is mostly full and will cache quickly, so we might * as well wait for it. * * 2) We searched for cached only and we didn't find * anything, and we didn't start any caching kthreads * either, so chances are we will loop through and * start a couple caching kthreads, and then come back * around and just wait for them. This will be slower * because we will have 2 caching kthreads reading at * the same time when we could have just started one * and waited for it to get far enough to give us an * allocation, so go ahead and go to the wait caching * loop. */ loop = LOOP_CACHING_WAIT; search_start = ideal_cache_offset; ideal_cache_percent = 0; goto ideal_cache; } else if (loop == LOOP_FIND_IDEAL) { /* * Didn't find a uncached bg, wait on anything we find * next. */ loop = LOOP_CACHING_WAIT; goto search; } if (loop < LOOP_CACHING_WAIT) { loop++; goto search; } if (loop == LOOP_ALLOC_CHUNK) { empty_size = 0; empty_cluster = 0; } if (allowed_chunk_alloc) { ret = do_chunk_alloc(trans, root, num_bytes + 2 * 1024 * 1024, data, 1); allowed_chunk_alloc = 0; done_chunk_alloc = 1; } else if (!done_chunk_alloc) { space_info->force_alloc = 1; } if (loop < LOOP_NO_EMPTY_SIZE) { loop++; goto search; } ret = -ENOSPC; } else if (!ins->objectid) { ret = -ENOSPC; } /* we found what we needed */ if (ins->objectid) { if (!(data & BTRFS_BLOCK_GROUP_DATA)) trans->block_group = block_group->key.objectid; btrfs_put_block_group(block_group); ret = 0; } return ret; } static void dump_space_info(struct btrfs_space_info *info, u64 bytes, int dump_block_groups) { struct btrfs_block_group_cache *cache; int index = 0; spin_lock(&info->lock); printk(KERN_INFO "space_info has %llu free, is %sfull\n", (unsigned long long)(info->total_bytes - info->bytes_used - info->bytes_pinned - info->bytes_reserved - info->bytes_readonly), (info->full) ? "" : "not "); printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, " "reserved=%llu, may_use=%llu, readonly=%llu\n", (unsigned long long)info->total_bytes, (unsigned long long)info->bytes_used, (unsigned long long)info->bytes_pinned, (unsigned long long)info->bytes_reserved, (unsigned long long)info->bytes_may_use, (unsigned long long)info->bytes_readonly); spin_unlock(&info->lock); if (!dump_block_groups) return; down_read(&info->groups_sem); again: list_for_each_entry(cache, &info->block_groups[index], list) { spin_lock(&cache->lock); printk(KERN_INFO "block group %llu has %llu bytes, %llu used " "%llu pinned %llu reserved\n", (unsigned long long)cache->key.objectid, (unsigned long long)cache->key.offset, (unsigned long long)btrfs_block_group_used(&cache->item), (unsigned long long)cache->pinned, (unsigned long long)cache->reserved); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); } if (++index < BTRFS_NR_RAID_TYPES) goto again; up_read(&info->groups_sem); } int btrfs_reserve_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 num_bytes, u64 min_alloc_size, u64 empty_size, u64 hint_byte, u64 search_end, struct btrfs_key *ins, u64 data) { int ret; u64 search_start = 0; data = btrfs_get_alloc_profile(root, data); again: /* * the only place that sets empty_size is btrfs_realloc_node, which * is not called recursively on allocations */ if (empty_size || root->ref_cows) ret = do_chunk_alloc(trans, root->fs_info->extent_root, num_bytes + 2 * 1024 * 1024, data, 0); WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(trans, root, num_bytes, empty_size, search_start, search_end, hint_byte, ins, data); if (ret == -ENOSPC && num_bytes > min_alloc_size) { num_bytes = num_bytes >> 1; num_bytes = num_bytes & ~(root->sectorsize - 1); num_bytes = max(num_bytes, min_alloc_size); do_chunk_alloc(trans, root->fs_info->extent_root, num_bytes, data, 1); goto again; } if (ret == -ENOSPC) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); printk(KERN_ERR "btrfs allocation failed flags %llu, " "wanted %llu\n", (unsigned long long)data, (unsigned long long)num_bytes); dump_space_info(sinfo, num_bytes, 1); } return ret; } int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) { struct btrfs_block_group_cache *cache; int ret = 0; cache = btrfs_lookup_block_group(root->fs_info, start); if (!cache) { printk(KERN_ERR "Unable to find block group for %llu\n", (unsigned long long)start); return -ENOSPC; } ret = btrfs_discard_extent(root, start, len); btrfs_add_free_space(cache, start, len); update_reserved_bytes(cache, len, 0, 1); btrfs_put_block_group(cache); return ret; } static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, u64 flags, u64 owner, u64 offset, struct btrfs_key *ins, int ref_mod) { int ret; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_extent_item *extent_item; struct btrfs_extent_inline_ref *iref; struct btrfs_path *path; struct extent_buffer *leaf; int type; u32 size; if (parent > 0) type = BTRFS_SHARED_DATA_REF_KEY; else type = BTRFS_EXTENT_DATA_REF_KEY; size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); path = btrfs_alloc_path(); BUG_ON(!path); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, ins, size); BUG_ON(ret); leaf = path->nodes[0]; extent_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, extent_item, ref_mod); btrfs_set_extent_generation(leaf, extent_item, trans->transid); btrfs_set_extent_flags(leaf, extent_item, flags | BTRFS_EXTENT_FLAG_DATA); iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); btrfs_set_extent_inline_ref_type(leaf, iref, type); if (parent > 0) { struct btrfs_shared_data_ref *ref; ref = (struct btrfs_shared_data_ref *)(iref + 1); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); } else { struct btrfs_extent_data_ref *ref; ref = (struct btrfs_extent_data_ref *)(&iref->offset); btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, ref, owner); btrfs_set_extent_data_ref_offset(leaf, ref, offset); btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); } btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { printk(KERN_ERR "btrfs update block group failed for %llu " "%llu\n", (unsigned long long)ins->objectid, (unsigned long long)ins->offset); BUG(); } return ret; } static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, u64 flags, struct btrfs_disk_key *key, int level, struct btrfs_key *ins) { int ret; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_extent_item *extent_item; struct btrfs_tree_block_info *block_info; struct btrfs_extent_inline_ref *iref; struct btrfs_path *path; struct extent_buffer *leaf; u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); path = btrfs_alloc_path(); BUG_ON(!path); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, ins, size); BUG_ON(ret); leaf = path->nodes[0]; extent_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, extent_item, 1); btrfs_set_extent_generation(leaf, extent_item, trans->transid); btrfs_set_extent_flags(leaf, extent_item, flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); block_info = (struct btrfs_tree_block_info *)(extent_item + 1); btrfs_set_tree_block_key(leaf, block_info, key); btrfs_set_tree_block_level(leaf, block_info, level); iref = (struct btrfs_extent_inline_ref *)(block_info + 1); if (parent > 0) { BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_SHARED_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { printk(KERN_ERR "btrfs update block group failed for %llu " "%llu\n", (unsigned long long)ins->objectid, (unsigned long long)ins->offset); BUG(); } return ret; } int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 root_objectid, u64 owner, u64 offset, struct btrfs_key *ins) { int ret; BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset, 0, root_objectid, owner, offset, BTRFS_ADD_DELAYED_EXTENT, NULL); return ret; } /* * this is used by the tree logging recovery code. It records that * an extent has been allocated and makes sure to clear the free * space cache bits as well */ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 root_objectid, u64 owner, u64 offset, struct btrfs_key *ins) { int ret; struct btrfs_block_group_cache *block_group; struct btrfs_caching_control *caching_ctl; u64 start = ins->objectid; u64 num_bytes = ins->offset; block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); cache_block_group(block_group); caching_ctl = get_caching_control(block_group); if (!caching_ctl) { BUG_ON(!block_group_cache_done(block_group)); ret = btrfs_remove_free_space(block_group, start, num_bytes); BUG_ON(ret); } else { mutex_lock(&caching_ctl->mutex); if (start >= caching_ctl->progress) { ret = add_excluded_extent(root, start, num_bytes); BUG_ON(ret); } else if (start + num_bytes <= caching_ctl->progress) { ret = btrfs_remove_free_space(block_group, start, num_bytes); BUG_ON(ret); } else { num_bytes = caching_ctl->progress - start; ret = btrfs_remove_free_space(block_group, start, num_bytes); BUG_ON(ret); start = caching_ctl->progress; num_bytes = ins->objectid + ins->offset - caching_ctl->progress; ret = add_excluded_extent(root, start, num_bytes); BUG_ON(ret); } mutex_unlock(&caching_ctl->mutex); put_caching_control(caching_ctl); } ret = update_reserved_bytes(block_group, ins->offset, 1, 1); BUG_ON(ret); btrfs_put_block_group(block_group); ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 0, owner, offset, ins, 1); return ret; } struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u32 blocksize, int level) { struct extent_buffer *buf; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return ERR_PTR(-ENOMEM); btrfs_set_header_generation(buf, trans->transid); btrfs_set_buffer_lockdep_class(buf, level); btrfs_tree_lock(buf); clean_tree_block(trans, root, buf); btrfs_set_lock_blocking(buf); btrfs_set_buffer_uptodate(buf); if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { /* * we allow two log transactions at a time, use different * EXENT bit to differentiate dirty pages. */ if (root->log_transid % 2 == 0) set_extent_dirty(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, GFP_NOFS); else set_extent_new(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, GFP_NOFS); } else { set_extent_dirty(&trans->transaction->dirty_pages, buf->start, buf->start + buf->len - 1, GFP_NOFS); } trans->blocks_used++; /* this returns a buffer locked for blocking */ return buf; } static struct btrfs_block_rsv * use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; int ret; block_rsv = get_block_rsv(trans, root); if (block_rsv->size == 0) { ret = reserve_metadata_bytes(block_rsv, blocksize); if (ret) return ERR_PTR(ret); return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; WARN_ON(1); printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", block_rsv->size, block_rsv->reserved, block_rsv->freed[0], block_rsv->freed[1]); return ERR_PTR(-ENOSPC); } static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize) { block_rsv_add_bytes(block_rsv, blocksize, 0); block_rsv_release_bytes(block_rsv, NULL, 0); } /* * finds a free extent and does all the dirty work required for allocation * returns the key for the extent through ins, and a tree buffer for * the first block of the extent through buf. * * returns the tree buffer or NULL. */ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize, u64 parent, u64 root_objectid, struct btrfs_disk_key *key, int level, u64 hint, u64 empty_size) { struct btrfs_key ins; struct btrfs_block_rsv *block_rsv; struct extent_buffer *buf; u64 flags = 0; int ret; block_rsv = use_block_rsv(trans, root, blocksize); if (IS_ERR(block_rsv)) return ERR_CAST(block_rsv); ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, empty_size, hint, (u64)-1, &ins, 0); if (ret) { unuse_block_rsv(block_rsv, blocksize); return ERR_PTR(ret); } buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize, level); BUG_ON(IS_ERR(buf)); if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent == 0) parent = ins.objectid; flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; } else BUG_ON(parent > 0); if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { struct btrfs_delayed_extent_op *extent_op; extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); BUG_ON(!extent_op); if (key) memcpy(&extent_op->key, key, sizeof(extent_op->key)); else memset(&extent_op->key, 0, sizeof(extent_op->key)); extent_op->flags_to_set = flags; extent_op->update_key = 1; extent_op->update_flags = 1; extent_op->is_data = 0; ret = btrfs_add_delayed_tree_ref(trans, ins.objectid, ins.offset, parent, root_objectid, level, BTRFS_ADD_DELAYED_EXTENT, extent_op); BUG_ON(ret); } return buf; } struct walk_control { u64 refs[BTRFS_MAX_LEVEL]; u64 flags[BTRFS_MAX_LEVEL]; struct btrfs_key update_progress; int stage; int level; int shared_level; int update_ref; int keep_locks; int reada_slot; int reada_count; }; #define DROP_REFERENCE 1 #define UPDATE_BACKREF 2 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct walk_control *wc, struct btrfs_path *path) { u64 bytenr; u64 generation; u64 refs; u64 flags; u64 last = 0; u32 nritems; u32 blocksize; struct btrfs_key key; struct extent_buffer *eb; int ret; int slot; int nread = 0; if (path->slots[wc->level] < wc->reada_slot) { wc->reada_count = wc->reada_count * 2 / 3; wc->reada_count = max(wc->reada_count, 2); } else { wc->reada_count = wc->reada_count * 3 / 2; wc->reada_count = min_t(int, wc->reada_count, BTRFS_NODEPTRS_PER_BLOCK(root)); } eb = path->nodes[wc->level]; nritems = btrfs_header_nritems(eb); blocksize = btrfs_level_size(root, wc->level - 1); for (slot = path->slots[wc->level]; slot < nritems; slot++) { if (nread >= wc->reada_count) break; cond_resched(); bytenr = btrfs_node_blockptr(eb, slot); generation = btrfs_node_ptr_generation(eb, slot); if (slot == path->slots[wc->level]) goto reada; if (wc->stage == UPDATE_BACKREF && generation <= root->root_key.offset) continue; /* We don't lock the tree block, it's OK to be racy here */ ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, &refs, &flags); BUG_ON(ret); BUG_ON(refs == 0); if (wc->stage == DROP_REFERENCE) { if (refs == 1) goto reada; if (wc->level == 1 && (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) continue; if (!wc->update_ref || generation <= root->root_key.offset) continue; btrfs_node_key_to_cpu(eb, &key, slot); ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); if (ret < 0) continue; } else { if (wc->level == 1 && (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) continue; } reada: ret = readahead_tree_block(root, bytenr, blocksize, generation); if (ret) break; last = bytenr + blocksize; nread++; } wc->reada_slot = slot; } /* * hepler to process tree block while walking down the tree. * * when wc->stage == UPDATE_BACKREF, this function updates * back refs for pointers in the block. * * NOTE: return value 1 means we should stop walking down. */ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int lookup_info) { int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; int ret; if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != root->root_key.objectid) return 1; /* * when reference count of tree block is 1, it won't increase * again. once full backref flag is set, we never clear it. */ if (lookup_info && ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { BUG_ON(!path->locks[level]); ret = btrfs_lookup_extent_info(trans, root, eb->start, eb->len, &wc->refs[level], &wc->flags[level]); BUG_ON(ret); BUG_ON(wc->refs[level] == 0); } if (wc->stage == DROP_REFERENCE) { if (wc->refs[level] > 1) return 1; if (path->locks[level] && !wc->keep_locks) { btrfs_tree_unlock(eb); path->locks[level] = 0; } return 0; } /* wc->stage == UPDATE_BACKREF */ if (!(wc->flags[level] & flag)) { BUG_ON(!path->locks[level]); ret = btrfs_inc_ref(trans, root, eb, 1); BUG_ON(ret); ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); ret = btrfs_set_disk_extent_flags(trans, root, eb->start, eb->len, flag, 0); BUG_ON(ret); wc->flags[level] |= flag; } /* * the block is shared by multiple trees, so it's not good to * keep the tree lock */ if (path->locks[level] && level > 0) { btrfs_tree_unlock(eb); path->locks[level] = 0; } return 0; } /* * hepler to process tree block pointer. * * when wc->stage == DROP_REFERENCE, this function checks * reference count of the block pointed to. if the block * is shared and we need update back refs for the subtree * rooted at the block, this function changes wc->stage to * UPDATE_BACKREF. if the block is shared and there is no * need to update back, this function drops the reference * to the block. * * NOTE: return value 1 means we should stop walking down. */ static noinline int do_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int *lookup_info) { u64 bytenr; u64 generation; u64 parent; u32 blocksize; struct btrfs_key key; struct extent_buffer *next; int level = wc->level; int reada = 0; int ret = 0; generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]); /* * if the lower level block was created before the snapshot * was created, we know there is no need to update back refs * for the subtree */ if (wc->stage == UPDATE_BACKREF && generation <= root->root_key.offset) { *lookup_info = 1; return 1; } bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); blocksize = btrfs_level_size(root, level - 1); next = btrfs_find_tree_block(root, bytenr, blocksize); if (!next) { next = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!next) return -ENOMEM; reada = 1; } btrfs_tree_lock(next); btrfs_set_lock_blocking(next); ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, &wc->refs[level - 1], &wc->flags[level - 1]); BUG_ON(ret); BUG_ON(wc->refs[level - 1] == 0); *lookup_info = 0; if (wc->stage == DROP_REFERENCE) { if (wc->refs[level - 1] > 1) { if (level == 1 && (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) goto skip; if (!wc->update_ref || generation <= root->root_key.offset) goto skip; btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); if (ret < 0) goto skip; wc->stage = UPDATE_BACKREF; wc->shared_level = level - 1; } } else { if (level == 1 && (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) goto skip; } if (!btrfs_buffer_uptodate(next, generation)) { btrfs_tree_unlock(next); free_extent_buffer(next); next = NULL; *lookup_info = 1; } if (!next) { if (reada && level == 1) reada_walk_down(trans, root, wc, path); next = read_tree_block(root, bytenr, blocksize, generation); btrfs_tree_lock(next); btrfs_set_lock_blocking(next); } level--; BUG_ON(level != btrfs_header_level(next)); path->nodes[level] = next; path->slots[level] = 0; path->locks[level] = 1; wc->level = level; if (wc->level == 1) wc->reada_slot = 0; return 0; skip: wc->refs[level - 1] = 0; wc->flags[level - 1] = 0; if (wc->stage == DROP_REFERENCE) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { parent = path->nodes[level]->start; } else { BUG_ON(root->root_key.objectid != btrfs_header_owner(path->nodes[level])); parent = 0; } ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, root->root_key.objectid, level - 1, 0); BUG_ON(ret); } btrfs_tree_unlock(next); free_extent_buffer(next); *lookup_info = 1; return 1; } /* * hepler to process tree block while walking up the tree. * * when wc->stage == DROP_REFERENCE, this function drops * reference count on the block. * * when wc->stage == UPDATE_BACKREF, this function changes * wc->stage back to DROP_REFERENCE if we changed wc->stage * to UPDATE_BACKREF previously while processing the block. * * NOTE: return value 1 means we should stop walking up. */ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) { int ret; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 parent = 0; if (wc->stage == UPDATE_BACKREF) { BUG_ON(wc->shared_level < level); if (level < wc->shared_level) goto out; ret = find_next_key(path, level + 1, &wc->update_progress); if (ret > 0) wc->update_ref = 0; wc->stage = DROP_REFERENCE; wc->shared_level = -1; path->slots[level] = 0; /* * check reference count again if the block isn't locked. * we should start walking down the tree again if reference * count is one. */ if (!path->locks[level]) { BUG_ON(level == 0); btrfs_tree_lock(eb); btrfs_set_lock_blocking(eb); path->locks[level] = 1; ret = btrfs_lookup_extent_info(trans, root, eb->start, eb->len, &wc->refs[level], &wc->flags[level]); BUG_ON(ret); BUG_ON(wc->refs[level] == 0); if (wc->refs[level] == 1) { btrfs_tree_unlock(eb); path->locks[level] = 0; return 1; } } } /* wc->stage == DROP_REFERENCE */ BUG_ON(wc->refs[level] > 1 && !path->locks[level]); if (wc->refs[level] == 1) { if (level == 0) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) ret = btrfs_dec_ref(trans, root, eb, 1); else ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); } /* make block locked assertion in clean_tree_block happy */ if (!path->locks[level] && btrfs_header_generation(eb) == trans->transid) { btrfs_tree_lock(eb); btrfs_set_lock_blocking(eb); path->locks[level] = 1; } clean_tree_block(trans, root, eb); } if (eb == root->node) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) parent = eb->start; else BUG_ON(root->root_key.objectid != btrfs_header_owner(eb)); } else { if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) parent = path->nodes[level + 1]->start; else BUG_ON(root->root_key.objectid != btrfs_header_owner(path->nodes[level + 1])); } btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); out: wc->refs[level] = 0; wc->flags[level] = 0; return 0; } static noinline int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) { int level = wc->level; int lookup_info = 1; int ret; while (level >= 0) { ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; if (level == 0) break; if (path->slots[level] >= btrfs_header_nritems(path->nodes[level])) break; ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; continue; } else if (ret < 0) return ret; level = wc->level; } return 0; } static noinline int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int max_level) { int level = wc->level; int ret; path->slots[level] = btrfs_header_nritems(path->nodes[level]); while (level < max_level && path->nodes[level]) { wc->level = level; if (path->slots[level] + 1 < btrfs_header_nritems(path->nodes[level])) { path->slots[level]++; return 0; } else { ret = walk_up_proc(trans, root, path, wc); if (ret > 0) return 0; if (path->locks[level]) { btrfs_tree_unlock(path->nodes[level]); path->locks[level] = 0; } free_extent_buffer(path->nodes[level]); path->nodes[level] = NULL; level++; } } return 1; } /* * drop a subvolume tree. * * this function traverses the tree freeing any blocks that only * referenced by the tree. * * when a shared tree block is found. this function decreases its * reference count by one. if update_ref is true, this function * also make sure backrefs for the shared block and all lower level * blocks are properly updated. */ int btrfs_drop_snapshot(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, int update_ref) { struct btrfs_path *path; struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = root->fs_info->tree_root; struct btrfs_root_item *root_item = &root->root_item; struct walk_control *wc; struct btrfs_key key; int err = 0; int ret; int level; path = btrfs_alloc_path(); BUG_ON(!path); wc = kzalloc(sizeof(*wc), GFP_NOFS); BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); if (block_rsv) trans->block_rsv = block_rsv; if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_header_level(root->node); path->nodes[level] = btrfs_lock_root_node(root); btrfs_set_lock_blocking(path->nodes[level]); path->slots[level] = 0; path->locks[level] = 1; memset(&wc->update_progress, 0, sizeof(wc->update_progress)); } else { btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); memcpy(&wc->update_progress, &key, sizeof(wc->update_progress)); level = root_item->drop_level; BUG_ON(level == 0); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); path->lowest_level = 0; if (ret < 0) { err = ret; goto out; } WARN_ON(ret > 0); /* * unlock our path, this is safe because only this * function is allowed to delete this snapshot */ btrfs_unlock_up_safe(path, 0); level = btrfs_header_level(root->node); while (1) { btrfs_tree_lock(path->nodes[level]); btrfs_set_lock_blocking(path->nodes[level]); ret = btrfs_lookup_extent_info(trans, root, path->nodes[level]->start, path->nodes[level]->len, &wc->refs[level], &wc->flags[level]); BUG_ON(ret); BUG_ON(wc->refs[level] == 0); if (level == root_item->drop_level) break; btrfs_tree_unlock(path->nodes[level]); WARN_ON(wc->refs[level] != 1); level--; } } wc->level = level; wc->shared_level = -1; wc->stage = DROP_REFERENCE; wc->update_ref = update_ref; wc->keep_locks = 0; wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); while (1) { ret = walk_down_tree(trans, root, path, wc); if (ret < 0) { err = ret; break; } ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); if (ret < 0) { err = ret; break; } if (ret > 0) { BUG_ON(wc->stage != DROP_REFERENCE); break; } if (wc->stage == DROP_REFERENCE) { level = wc->level; btrfs_node_key(path->nodes[level], &root_item->drop_progress, path->slots[level]); root_item->drop_level = level; } BUG_ON(wc->level == 0); if (btrfs_should_end_transaction(trans, tree_root)) { ret = btrfs_update_root(trans, tree_root, &root->root_key, root_item); BUG_ON(ret); btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); if (block_rsv) trans->block_rsv = block_rsv; } } btrfs_release_path(root, path); BUG_ON(err); ret = btrfs_del_root(trans, tree_root, &root->root_key); BUG_ON(ret); if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { ret = btrfs_find_last_root(tree_root, root->root_key.objectid, NULL, NULL); BUG_ON(ret < 0); if (ret > 0) { ret = btrfs_del_orphan_item(trans, tree_root, root->root_key.objectid); BUG_ON(ret); } } if (root->in_radix) { btrfs_free_fs_root(tree_root->fs_info, root); } else { free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root); } out: btrfs_end_transaction_throttle(trans, tree_root); kfree(wc); btrfs_free_path(path); return err; } /* * drop subtree rooted at tree block 'node'. * * NOTE: this function will unlock and release tree block 'node' */ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, struct extent_buffer *parent) { struct btrfs_path *path; struct walk_control *wc; int level; int parent_level; int ret = 0; int wret; BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); path = btrfs_alloc_path(); BUG_ON(!path); wc = kzalloc(sizeof(*wc), GFP_NOFS); BUG_ON(!wc); btrfs_assert_tree_locked(parent); parent_level = btrfs_header_level(parent); extent_buffer_get(parent); path->nodes[parent_level] = parent; path->slots[parent_level] = btrfs_header_nritems(parent); btrfs_assert_tree_locked(node); level = btrfs_header_level(node); path->nodes[level] = node; path->slots[level] = 0; path->locks[level] = 1; wc->refs[parent_level] = 1; wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; wc->level = level; wc->shared_level = -1; wc->stage = DROP_REFERENCE; wc->update_ref = 0; wc->keep_locks = 1; wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); while (1) { wret = walk_down_tree(trans, root, path, wc); if (wret < 0) { ret = wret; break; } wret = walk_up_tree(trans, root, path, wc, parent_level); if (wret < 0) ret = wret; if (wret != 0) break; } kfree(wc); btrfs_free_path(path); return ret; } #if 0 static unsigned long calc_ra(unsigned long start, unsigned long last, unsigned long nr) { return min(last, start + nr - 1); } static noinline int relocate_inode_pages(struct inode *inode, u64 start, u64 len) { u64 page_start; u64 page_end; unsigned long first_index; unsigned long last_index; unsigned long i; struct page *page; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct file_ra_state *ra; struct btrfs_ordered_extent *ordered; unsigned int total_read = 0; unsigned int total_dirty = 0; int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; /* make sure the dirty trick played by the caller work */ ret = invalidate_inode_pages2_range(inode->i_mapping, first_index, last_index); if (ret) goto out_unlock; file_ra_state_init(ra, inode->i_mapping); for (i = first_index ; i <= last_index; i++) { if (total_read % ra->ra_pages == 0) { btrfs_force_ra(inode->i_mapping, ra, NULL, i, calc_ra(i, last_index, ra->ra_pages)); } total_read++; again: if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) BUG_ON(1); page = grab_cache_page(inode->i_mapping, i); if (!page) { ret = -ENOMEM; goto out_unlock; } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); page_cache_release(page); ret = -EIO; goto out_unlock; } } wait_on_page_writeback(page); page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; lock_extent(io_tree, page_start, page_end, GFP_NOFS); ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); page_cache_release(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; } set_page_extent_mapped(page); if (i == first_index) set_extent_bits(io_tree, page_start, page_end, EXTENT_BOUNDARY, GFP_NOFS); btrfs_set_extent_delalloc(inode, page_start, page_end); set_page_dirty(page); total_dirty++; unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); page_cache_release(page); } out_unlock: kfree(ra); mutex_unlock(&inode->i_mutex); balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); return ret; } static noinline int relocate_data_extent(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset) { struct btrfs_root *root = BTRFS_I(reloc_inode)->root; struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; struct extent_map *em; u64 start = extent_key->objectid - offset; u64 end = start + extent_key->offset - 1; em = alloc_extent_map(GFP_NOFS); BUG_ON(!em || IS_ERR(em)); em->start = start; em->len = extent_key->offset; em->block_len = extent_key->offset; em->block_start = extent_key->objectid; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); /* setup extent map to cheat btrfs_readpage */ lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); while (1) { int ret; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); if (ret != -EEXIST) { free_extent_map(em); break; } btrfs_drop_extent_cache(reloc_inode, start, end, 0); } unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); return relocate_inode_pages(reloc_inode, start, extent_key->offset); } struct btrfs_ref_path { u64 extent_start; u64 nodes[BTRFS_MAX_LEVEL]; u64 root_objectid; u64 root_generation; u64 owner_objectid; u32 num_refs; int lowest_level; int current_level; int shared_level; struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; u64 new_nodes[BTRFS_MAX_LEVEL]; }; struct disk_extent { u64 ram_bytes; u64 disk_bytenr; u64 disk_num_bytes; u64 offset; u64 num_bytes; u8 compression; u8 encryption; u16 other_encoding; }; static int is_cowonly_root(u64 root_objectid) { if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || root_objectid == BTRFS_EXTENT_TREE_OBJECTID || root_objectid == BTRFS_CHUNK_TREE_OBJECTID || root_objectid == BTRFS_DEV_TREE_OBJECTID || root_objectid == BTRFS_TREE_LOG_OBJECTID || root_objectid == BTRFS_CSUM_TREE_OBJECTID) return 1; return 0; } static noinline int __next_ref_path(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_ref_path *ref_path, int first_time) { struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_extent_ref *ref; struct btrfs_key key; struct btrfs_key found_key; u64 bytenr; u32 nritems; int level; int ret = 1; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (first_time) { ref_path->lowest_level = -1; ref_path->current_level = -1; ref_path->shared_level = -1; goto walk_up; } walk_down: level = ref_path->current_level - 1; while (level >= -1) { u64 parent; if (level < ref_path->lowest_level) break; if (level >= 0) bytenr = ref_path->nodes[level]; else bytenr = ref_path->extent_start; BUG_ON(bytenr == 0); parent = ref_path->nodes[level + 1]; ref_path->nodes[level + 1] = 0; ref_path->current_level = level; BUG_ON(parent == 0); key.objectid = bytenr; key.offset = parent + 1; key.type = BTRFS_EXTENT_REF_KEY; ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(extent_root, path); if (ret < 0) goto out; if (ret > 0) goto next; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid == bytenr && found_key.type == BTRFS_EXTENT_REF_KEY) { if (level < ref_path->shared_level) ref_path->shared_level = level; goto found; } next: level--; btrfs_release_path(extent_root, path); cond_resched(); } /* reached lowest level */ ret = 1; goto out; walk_up: level = ref_path->current_level; while (level < BTRFS_MAX_LEVEL - 1) { u64 ref_objectid; if (level >= 0) bytenr = ref_path->nodes[level]; else bytenr = ref_path->extent_start; BUG_ON(bytenr == 0); key.objectid = bytenr; key.offset = 0; key.type = BTRFS_EXTENT_REF_KEY; ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); if (ret < 0) goto out; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(extent_root, path); if (ret < 0) goto out; if (ret > 0) { /* the extent was freed by someone */ if (ref_path->lowest_level == level) goto out; btrfs_release_path(extent_root, path); goto walk_down; } leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != bytenr || found_key.type != BTRFS_EXTENT_REF_KEY) { /* the extent was freed by someone */ if (ref_path->lowest_level == level) { ret = 1; goto out; } btrfs_release_path(extent_root, path); goto walk_down; } found: ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); ref_objectid = btrfs_ref_objectid(leaf, ref); if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { if (first_time) { level = (int)ref_objectid; BUG_ON(level >= BTRFS_MAX_LEVEL); ref_path->lowest_level = level; ref_path->current_level = level; ref_path->nodes[level] = bytenr; } else { WARN_ON(ref_objectid != level); } } else { WARN_ON(level != -1); } first_time = 0; if (ref_path->lowest_level == level) { ref_path->owner_objectid = ref_objectid; ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); } /* * the block is tree root or the block isn't in reference * counted tree. */ if (found_key.objectid == found_key.offset || is_cowonly_root(btrfs_ref_root(leaf, ref))) { ref_path->root_objectid = btrfs_ref_root(leaf, ref); ref_path->root_generation = btrfs_ref_generation(leaf, ref); if (level < 0) { /* special reference from the tree log */ ref_path->nodes[0] = found_key.offset; ref_path->current_level = 0; } ret = 0; goto out; } level++; BUG_ON(ref_path->nodes[level] != 0); ref_path->nodes[level] = found_key.offset; ref_path->current_level = level; /* * the reference was created in the running transaction, * no need to continue walking up. */ if (btrfs_ref_generation(leaf, ref) == trans->transid) { ref_path->root_objectid = btrfs_ref_root(leaf, ref); ref_path->root_generation = btrfs_ref_generation(leaf, ref); ret = 0; goto out; } btrfs_release_path(extent_root, path); cond_resched(); } /* reached max tree level, but no tree root found. */ BUG(); out: btrfs_free_path(path); return ret; } static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_ref_path *ref_path, u64 extent_start) { memset(ref_path, 0, sizeof(*ref_path)); ref_path->extent_start = extent_start; return __next_ref_path(trans, extent_root, ref_path, 1); } static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_ref_path *ref_path) { return __next_ref_path(trans, extent_root, ref_path, 0); } static noinline int get_new_locations(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset, int no_fragment, struct disk_extent **extents, int *nr_extents) { struct btrfs_root *root = BTRFS_I(reloc_inode)->root; struct btrfs_path *path; struct btrfs_file_extent_item *fi; struct extent_buffer *leaf; struct disk_extent *exts = *extents; struct btrfs_key found_key; u64 cur_pos; u64 last_byte; u32 nritems; int nr = 0; int max = *nr_extents; int ret; WARN_ON(!no_fragment && *extents); if (!exts) { max = 1; exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); if (!exts) return -ENOMEM; } path = btrfs_alloc_path(); BUG_ON(!path); cur_pos = extent_key->objectid - offset; last_byte = extent_key->objectid + extent_key->offset; ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, cur_pos, 0); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } while (1) { leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; if (ret > 0) break; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.offset != cur_pos || found_key.type != BTRFS_EXTENT_DATA_KEY || found_key.objectid != reloc_inode->i_ino) break; fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || btrfs_file_extent_disk_bytenr(leaf, fi) == 0) break; if (nr == max) { struct disk_extent *old = exts; max *= 2; exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); memcpy(exts, old, sizeof(*exts) * nr); if (old != *extents) kfree(old); } exts[nr].disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); exts[nr].disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); exts[nr].offset = btrfs_file_extent_offset(leaf, fi); exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); exts[nr].compression = btrfs_file_extent_compression(leaf, fi); exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, fi); BUG_ON(exts[nr].offset > 0); BUG_ON(exts[nr].compression || exts[nr].encryption); BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); cur_pos += exts[nr].num_bytes; nr++; if (cur_pos + offset >= last_byte) break; if (no_fragment) { ret = 1; goto out; } path->slots[0]++; } BUG_ON(cur_pos + offset > last_byte); if (cur_pos + offset < last_byte) { ret = -ENOENT; goto out; } ret = 0; out: btrfs_free_path(path); if (ret) { if (exts != *extents) kfree(exts); } else { *extents = exts; *nr_extents = nr; } return ret; } static noinline int replace_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *extent_key, struct btrfs_key *leaf_key, struct btrfs_ref_path *ref_path, struct disk_extent *new_extents, int nr_extents) { struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct inode *inode = NULL; struct btrfs_key key; u64 lock_start = 0; u64 lock_end = 0; u64 num_bytes; u64 ext_offset; u64 search_end = (u64)-1; u32 nritems; int nr_scaned = 0; int extent_locked = 0; int extent_type; int ret; memcpy(&key, leaf_key, sizeof(key)); if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { if (key.objectid < ref_path->owner_objectid || (key.objectid == ref_path->owner_objectid && key.type < BTRFS_EXTENT_DATA_KEY)) { key.objectid = ref_path->owner_objectid; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; } } while (1) { ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto out; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); next: if (extent_locked && ret > 0) { /* * the file extent item was modified by someone * before the extent got locked. */ unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); extent_locked = 0; } if (path->slots[0] >= nritems) { if (++nr_scaned > 2) break; BUG_ON(extent_locked); ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; if (ret > 0) break; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { if ((key.objectid > ref_path->owner_objectid) || (key.objectid == ref_path->owner_objectid && key.type > BTRFS_EXTENT_DATA_KEY) || key.offset >= search_end) break; } if (inode && key.objectid != inode->i_ino) { BUG_ON(extent_locked); btrfs_release_path(root, path); mutex_unlock(&inode->i_mutex); iput(inode); inode = NULL; continue; } if (key.type != BTRFS_EXTENT_DATA_KEY) { path->slots[0]++; ret = 1; goto next; } fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); if ((extent_type != BTRFS_FILE_EXTENT_REG && extent_type != BTRFS_FILE_EXTENT_PREALLOC) || (btrfs_file_extent_disk_bytenr(leaf, fi) != extent_key->objectid)) { path->slots[0]++; ret = 1; goto next; } num_bytes = btrfs_file_extent_num_bytes(leaf, fi); ext_offset = btrfs_file_extent_offset(leaf, fi); if (search_end == (u64)-1) { search_end = key.offset - ext_offset + btrfs_file_extent_ram_bytes(leaf, fi); } if (!extent_locked) { lock_start = key.offset; lock_end = lock_start + num_bytes - 1; } else { if (lock_start > key.offset || lock_end + 1 < key.offset + num_bytes) { unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); extent_locked = 0; } } if (!inode) { btrfs_release_path(root, path); inode = btrfs_iget_locked(root->fs_info->sb, key.objectid, root); if (inode->i_state & I_NEW) { BTRFS_I(inode)->root = root; BTRFS_I(inode)->location.objectid = key.objectid; BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; BTRFS_I(inode)->location.offset = 0; btrfs_read_locked_inode(inode); unlock_new_inode(inode); } /* * some code call btrfs_commit_transaction while * holding the i_mutex, so we can't use mutex_lock * here. */ if (is_bad_inode(inode) || !mutex_trylock(&inode->i_mutex)) { iput(inode); inode = NULL; key.offset = (u64)-1; goto skip; } } if (!extent_locked) { struct btrfs_ordered_extent *ordered; btrfs_release_path(root, path); lock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); if (ordered && ordered->file_offset <= lock_end && ordered->file_offset + ordered->len > lock_start) { unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); key.offset += num_bytes; goto skip; } if (ordered) btrfs_put_ordered_extent(ordered); extent_locked = 1; continue; } if (nr_extents == 1) { /* update extent pointer in place */ btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extents[0].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[0].disk_num_bytes); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, key.offset, key.offset + num_bytes - 1, 0); ret = btrfs_inc_extent_ref(trans, root, new_extents[0].disk_bytenr, new_extents[0].disk_num_bytes, leaf->start, root->root_key.objectid, trans->transid, key.objectid); BUG_ON(ret); ret = btrfs_free_extent(trans, root, extent_key->objectid, extent_key->offset, leaf->start, btrfs_header_owner(leaf), btrfs_header_generation(leaf), key.objectid, 0); BUG_ON(ret); btrfs_release_path(root, path); key.offset += num_bytes; } else { BUG_ON(1); #if 0 u64 alloc_hint; u64 extent_len; int i; /* * drop old extent pointer at first, then insert the * new pointers one bye one */ btrfs_release_path(root, path); ret = btrfs_drop_extents(trans, root, inode, key.offset, key.offset + num_bytes, key.offset, &alloc_hint); BUG_ON(ret); for (i = 0; i < nr_extents; i++) { if (ext_offset >= new_extents[i].num_bytes) { ext_offset -= new_extents[i].num_bytes; continue; } extent_len = min(new_extents[i].num_bytes - ext_offset, num_bytes); ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); BUG_ON(ret); leaf = path->nodes[0]; fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_generation(leaf, fi, trans->transid); btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extents[i].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[i].disk_num_bytes); btrfs_set_file_extent_ram_bytes(leaf, fi, new_extents[i].ram_bytes); btrfs_set_file_extent_compression(leaf, fi, new_extents[i].compression); btrfs_set_file_extent_encryption(leaf, fi, new_extents[i].encryption); btrfs_set_file_extent_other_encoding(leaf, fi, new_extents[i].other_encoding); btrfs_set_file_extent_num_bytes(leaf, fi, extent_len); ext_offset += new_extents[i].offset; btrfs_set_file_extent_offset(leaf, fi, ext_offset); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, key.offset, key.offset + extent_len - 1, 0); ret = btrfs_inc_extent_ref(trans, root, new_extents[i].disk_bytenr, new_extents[i].disk_num_bytes, leaf->start, root->root_key.objectid, trans->transid, key.objectid); BUG_ON(ret); btrfs_release_path(root, path); inode_add_bytes(inode, extent_len); ext_offset = 0; num_bytes -= extent_len; key.offset += extent_len; if (num_bytes == 0) break; } BUG_ON(i >= nr_extents); #endif } if (extent_locked) { unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); extent_locked = 0; } skip: if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && key.offset >= search_end) break; cond_resched(); } ret = 0; out: btrfs_release_path(root, path); if (inode) { mutex_unlock(&inode->i_mutex); if (extent_locked) { unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); } iput(inode); } return ret; } int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, u64 orig_start) { int level; int ret; BUG_ON(btrfs_header_generation(buf) != trans->transid); BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); level = btrfs_header_level(buf); if (level == 0) { struct btrfs_leaf_ref *ref; struct btrfs_leaf_ref *orig_ref; orig_ref = btrfs_lookup_leaf_ref(root, orig_start); if (!orig_ref) return -ENOENT; ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); if (!ref) { btrfs_free_leaf_ref(root, orig_ref); return -ENOMEM; } ref->nritems = orig_ref->nritems; memcpy(ref->extents, orig_ref->extents, sizeof(ref->extents[0]) * ref->nritems); btrfs_free_leaf_ref(root, orig_ref); ref->root_gen = trans->transid; ref->bytenr = buf->start; ref->owner = btrfs_header_owner(buf); ref->generation = btrfs_header_generation(buf); ret = btrfs_add_leaf_ref(root, ref, 0); WARN_ON(ret); btrfs_free_leaf_ref(root, ref); } return 0; } static noinline int invalidate_extent_cache(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, struct btrfs_root *target_root) { struct btrfs_key key; struct inode *inode = NULL; struct btrfs_file_extent_item *fi; struct extent_state *cached_state = NULL; u64 num_bytes; u64 skip_objectid = 0; u32 nritems; u32 i; nritems = btrfs_header_nritems(leaf); for (i = 0; i < nritems; i++) { btrfs_item_key_to_cpu(leaf, &key, i); if (key.objectid == skip_objectid || key.type != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) continue; if (!inode || inode->i_ino != key.objectid) { iput(inode); inode = btrfs_ilookup(target_root->fs_info->sb, key.objectid, target_root, 1); } if (!inode) { skip_objectid = key.objectid; continue; } num_bytes = btrfs_file_extent_num_bytes(leaf, fi); lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, key.offset + num_bytes - 1, 0, &cached_state, GFP_NOFS); btrfs_drop_extent_cache(inode, key.offset, key.offset + num_bytes - 1, 1); unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, key.offset + num_bytes - 1, &cached_state, GFP_NOFS); cond_resched(); } iput(inode); return 0; } static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, struct inode *reloc_inode) { struct btrfs_key key; struct btrfs_key extent_key; struct btrfs_file_extent_item *fi; struct btrfs_leaf_ref *ref; struct disk_extent *new_extent; u64 bytenr; u64 num_bytes; u32 nritems; u32 i; int ext_index; int nr_extent; int ret; new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); BUG_ON(!new_extent); ref = btrfs_lookup_leaf_ref(root, leaf->start); BUG_ON(!ref); ext_index = -1; nritems = btrfs_header_nritems(leaf); for (i = 0; i < nritems; i++) { btrfs_item_key_to_cpu(leaf, &key, i); if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); if (bytenr == 0) continue; ext_index++; if (bytenr >= group->key.objectid + group->key.offset || bytenr + num_bytes <= group->key.objectid) continue; extent_key.objectid = bytenr; extent_key.offset = num_bytes; extent_key.type = BTRFS_EXTENT_ITEM_KEY; nr_extent = 1; ret = get_new_locations(reloc_inode, &extent_key, group->key.objectid, 1, &new_extent, &nr_extent); if (ret > 0) continue; BUG_ON(ret < 0); BUG_ON(ref->extents[ext_index].bytenr != bytenr); BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); ref->extents[ext_index].bytenr = new_extent->disk_bytenr; ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extent->disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extent->disk_num_bytes); btrfs_mark_buffer_dirty(leaf); ret = btrfs_inc_extent_ref(trans, root, new_extent->disk_bytenr, new_extent->disk_num_bytes, leaf->start, root->root_key.objectid, trans->transid, key.objectid); BUG_ON(ret); ret = btrfs_free_extent(trans, root, bytenr, num_bytes, leaf->start, btrfs_header_owner(leaf), btrfs_header_generation(leaf), key.objectid, 0); BUG_ON(ret); cond_resched(); } kfree(new_extent); BUG_ON(ext_index + 1 != ref->nritems); btrfs_free_leaf_ref(root, ref); return 0; } int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *reloc_root; int ret; if (root->reloc_root) { reloc_root = root->reloc_root; root->reloc_root = NULL; list_add(&reloc_root->dead_list, &root->fs_info->dead_reloc_roots); btrfs_set_root_bytenr(&reloc_root->root_item, reloc_root->node->start); btrfs_set_root_level(&root->root_item, btrfs_header_level(reloc_root->node)); memset(&reloc_root->root_item.drop_progress, 0, sizeof(struct btrfs_disk_key)); reloc_root->root_item.drop_level = 0; ret = btrfs_update_root(trans, root->fs_info->tree_root, &reloc_root->root_key, &reloc_root->root_item); BUG_ON(ret); } return 0; } int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) { struct btrfs_trans_handle *trans; struct btrfs_root *reloc_root; struct btrfs_root *prev_root = NULL; struct list_head dead_roots; int ret; unsigned long nr; INIT_LIST_HEAD(&dead_roots); list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); while (!list_empty(&dead_roots)) { reloc_root = list_entry(dead_roots.prev, struct btrfs_root, dead_list); list_del_init(&reloc_root->dead_list); BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); if (ret != -EAGAIN) break; mutex_unlock(&root->fs_info->drop_mutex); nr = trans->blocks_used; ret = btrfs_end_transaction(trans, root); BUG_ON(ret); btrfs_btree_balance_dirty(root, nr); } free_extent_buffer(reloc_root->node); ret = btrfs_del_root(trans, root->fs_info->tree_root, &reloc_root->root_key); BUG_ON(ret); mutex_unlock(&root->fs_info->drop_mutex); nr = trans->blocks_used; ret = btrfs_end_transaction(trans, root); BUG_ON(ret); btrfs_btree_balance_dirty(root, nr); kfree(prev_root); prev_root = reloc_root; } if (prev_root) { btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); kfree(prev_root); } return 0; } int btrfs_add_dead_reloc_root(struct btrfs_root *root) { list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); return 0; } int btrfs_cleanup_reloc_trees(struct btrfs_root *root) { struct btrfs_root *reloc_root; struct btrfs_trans_handle *trans; struct btrfs_key location; int found; int ret; mutex_lock(&root->fs_info->tree_reloc_mutex); ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); BUG_ON(ret); found = !list_empty(&root->fs_info->dead_reloc_roots); mutex_unlock(&root->fs_info->tree_reloc_mutex); if (found) { trans = btrfs_start_transaction(root, 1); BUG_ON(!trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; location.offset = (u64)-1; location.type = BTRFS_ROOT_ITEM_KEY; reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); BUG_ON(!reloc_root); btrfs_orphan_cleanup(reloc_root); return 0; } static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *reloc_root; struct extent_buffer *eb; struct btrfs_root_item *root_item; struct btrfs_key root_key; int ret; BUG_ON(!root->ref_cows); if (root->reloc_root) return 0; root_item = kmalloc(sizeof(*root_item), GFP_NOFS); BUG_ON(!root_item); ret = btrfs_copy_root(trans, root, root->commit_root, &eb, BTRFS_TREE_RELOC_OBJECTID); BUG_ON(ret); root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; root_key.offset = root->root_key.objectid; root_key.type = BTRFS_ROOT_ITEM_KEY; memcpy(root_item, &root->root_item, sizeof(root_item)); btrfs_set_root_refs(root_item, 0); btrfs_set_root_bytenr(root_item, eb->start); btrfs_set_root_level(root_item, btrfs_header_level(eb)); btrfs_set_root_generation(root_item, trans->transid); btrfs_tree_unlock(eb); free_extent_buffer(eb); ret = btrfs_insert_root(trans, root->fs_info->tree_root, &root_key, root_item); BUG_ON(ret); kfree(root_item); reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, &root_key); BUG_ON(!reloc_root); reloc_root->last_trans = trans->transid; reloc_root->commit_root = NULL; reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; root->reloc_root = reloc_root; return 0; } /* * Core function of space balance. * * The idea is using reloc trees to relocate tree blocks in reference * counted roots. There is one reloc tree for each subvol, and all * reloc trees share same root key objectid. Reloc trees are snapshots * of the latest committed roots of subvols (root->commit_root). * * To relocate a tree block referenced by a subvol, there are two steps. * COW the block through subvol's reloc tree, then update block pointer * in the subvol to point to the new block. Since all reloc trees share * same root key objectid, doing special handing for tree blocks owned * by them is easy. Once a tree block has been COWed in one reloc tree, * we can use the resulting new block directly when the same block is * required to COW again through other reloc trees. By this way, relocated * tree blocks are shared between reloc trees, so they are also shared * between subvols. */ static noinline int relocate_one_path(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, struct btrfs_ref_path *ref_path, struct btrfs_block_group_cache *group, struct inode *reloc_inode) { struct btrfs_root *reloc_root; struct extent_buffer *eb = NULL; struct btrfs_key *keys; u64 *nodes; int level; int shared_level; int lowest_level = 0; int ret; if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) lowest_level = ref_path->owner_objectid; if (!root->ref_cows) { path->lowest_level = lowest_level; ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); BUG_ON(ret < 0); path->lowest_level = 0; btrfs_release_path(root, path); return 0; } mutex_lock(&root->fs_info->tree_reloc_mutex); ret = init_reloc_tree(trans, root); BUG_ON(ret); reloc_root = root->reloc_root; shared_level = ref_path->shared_level; ref_path->shared_level = BTRFS_MAX_LEVEL - 1; keys = ref_path->node_keys; nodes = ref_path->new_nodes; memset(&keys[shared_level + 1], 0, sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); memset(&nodes[shared_level + 1], 0, sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); if (nodes[lowest_level] == 0) { path->lowest_level = lowest_level; ret = btrfs_search_slot(trans, reloc_root, first_key, path, 0, 1); BUG_ON(ret); for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { eb = path->nodes[level]; if (!eb || eb == reloc_root->node) break; nodes[level] = eb->start; if (level == 0) btrfs_item_key_to_cpu(eb, &keys[level], 0); else btrfs_node_key_to_cpu(eb, &keys[level], 0); } if (nodes[0] && ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { eb = path->nodes[0]; ret = replace_extents_in_leaf(trans, reloc_root, eb, group, reloc_inode); BUG_ON(ret); } btrfs_release_path(reloc_root, path); } else { ret = btrfs_merge_path(trans, reloc_root, keys, nodes, lowest_level); BUG_ON(ret); } /* * replace tree blocks in the fs tree with tree blocks in * the reloc tree. */ ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); BUG_ON(ret < 0); if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { ret = btrfs_search_slot(trans, reloc_root, first_key, path, 0, 0); BUG_ON(ret); extent_buffer_get(path->nodes[0]); eb = path->nodes[0]; btrfs_release_path(reloc_root, path); ret = invalidate_extent_cache(reloc_root, eb, group, root); BUG_ON(ret); free_extent_buffer(eb); } mutex_unlock(&root->fs_info->tree_reloc_mutex); path->lowest_level = 0; return 0; } static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, struct btrfs_ref_path *ref_path) { int ret; ret = relocate_one_path(trans, root, path, first_key, ref_path, NULL, NULL); BUG_ON(ret); return 0; } static noinline int del_extent_zero(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key) { int ret; ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); if (ret) goto out; ret = btrfs_del_item(trans, extent_root, path); out: btrfs_release_path(extent_root, path); return ret; } static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, struct btrfs_ref_path *ref_path) { struct btrfs_key root_key; root_key.objectid = ref_path->root_objectid; root_key.type = BTRFS_ROOT_ITEM_KEY; if (is_cowonly_root(ref_path->root_objectid)) root_key.offset = 0; else root_key.offset = (u64)-1; return btrfs_read_fs_root_no_name(fs_info, &root_key); } static noinline int relocate_one_extent(struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key, struct btrfs_block_group_cache *group, struct inode *reloc_inode, int pass) { struct btrfs_trans_handle *trans; struct btrfs_root *found_root; struct btrfs_ref_path *ref_path = NULL; struct disk_extent *new_extents = NULL; int nr_extents = 0; int loops; int ret; int level; struct btrfs_key first_key; u64 prev_block = 0; trans = btrfs_start_transaction(extent_root, 1); BUG_ON(!trans); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); goto out; } ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); if (!ref_path) { ret = -ENOMEM; goto out; } for (loops = 0; ; loops++) { if (loops == 0) { ret = btrfs_first_ref_path(trans, extent_root, ref_path, extent_key->objectid); } else { ret = btrfs_next_ref_path(trans, extent_root, ref_path); } if (ret < 0) goto out; if (ret > 0) break; if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) continue; found_root = read_ref_root(extent_root->fs_info, ref_path); BUG_ON(!found_root); /* * for reference counted tree, only process reference paths * rooted at the latest committed root. */ if (found_root->ref_cows && ref_path->root_generation != found_root->root_key.offset) continue; if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { if (pass == 0) { /* * copy data extents to new locations */ u64 group_start = group->key.objectid; ret = relocate_data_extent(reloc_inode, extent_key, group_start); if (ret < 0) goto out; break; } level = 0; } else { level = ref_path->owner_objectid; } if (prev_block != ref_path->nodes[level]) { struct extent_buffer *eb; u64 block_start = ref_path->nodes[level]; u64 block_size = btrfs_level_size(found_root, level); eb = read_tree_block(found_root, block_start, block_size, 0); btrfs_tree_lock(eb); BUG_ON(level != btrfs_header_level(eb)); if (level == 0) btrfs_item_key_to_cpu(eb, &first_key, 0); else btrfs_node_key_to_cpu(eb, &first_key, 0); btrfs_tree_unlock(eb); free_extent_buffer(eb); prev_block = block_start; } mutex_lock(&extent_root->fs_info->trans_mutex); btrfs_record_root_in_trans(found_root); mutex_unlock(&extent_root->fs_info->trans_mutex); if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { /* * try to update data extent references while * keeping metadata shared between snapshots. */ if (pass == 1) { ret = relocate_one_path(trans, found_root, path, &first_key, ref_path, group, reloc_inode); if (ret < 0) goto out; continue; } /* * use fallback method to process the remaining * references. */ if (!new_extents) { u64 group_start = group->key.objectid; new_extents = kmalloc(sizeof(*new_extents), GFP_NOFS); nr_extents = 1; ret = get_new_locations(reloc_inode, extent_key, group_start, 1, &new_extents, &nr_extents); if (ret) goto out; } ret = replace_one_extent(trans, found_root, path, extent_key, &first_key, ref_path, new_extents, nr_extents); } else { ret = relocate_tree_block(trans, found_root, path, &first_key, ref_path); } if (ret < 0) goto out; } ret = 0; out: btrfs_end_transaction(trans, extent_root); kfree(new_extents); kfree(ref_path); return ret; } #endif static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) { u64 num_devices; u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; num_devices = root->fs_info->fs_devices->rw_devices; if (num_devices == 1) { stripped |= BTRFS_BLOCK_GROUP_DUP; stripped = flags & ~stripped; /* turn raid0 into single device chunks */ if (flags & BTRFS_BLOCK_GROUP_RAID0) return stripped; /* turn mirroring into duplication */ if (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) return stripped | BTRFS_BLOCK_GROUP_DUP; return flags; } else { /* they already had raid on here, just return */ if (flags & stripped) return flags; stripped |= BTRFS_BLOCK_GROUP_DUP; stripped = flags & ~stripped; /* switch duplicated blocks with raid1 */ if (flags & BTRFS_BLOCK_GROUP_DUP) return stripped | BTRFS_BLOCK_GROUP_RAID1; /* turn single device chunks into raid0 */ return stripped | BTRFS_BLOCK_GROUP_RAID0; } return flags; } static int set_block_group_ro(struct btrfs_block_group_cache *cache) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; int ret = -ENOSPC; if (cache->ro) return 0; spin_lock(&sinfo->lock); spin_lock(&cache->lock); num_bytes = cache->key.offset - cache->reserved - cache->pinned - cache->bytes_super - btrfs_block_group_used(&cache->item); if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + sinfo->bytes_may_use + sinfo->bytes_readonly + cache->reserved_pinned + num_bytes < sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; sinfo->bytes_reserved += cache->reserved_pinned; cache->reserved_pinned = 0; cache->ro = 1; ret = 0; } spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); return ret; } int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { struct btrfs_trans_handle *trans; u64 alloc_flags; int ret; BUG_ON(cache->ro); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); alloc_flags = update_block_group_flags(root, cache->flags); if (alloc_flags != cache->flags) do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); ret = set_block_group_ro(cache); if (!ret) goto out; alloc_flags = get_alloc_profile(root, cache->space_info->flags); ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); if (ret < 0) goto out; ret = set_block_group_ro(cache); out: btrfs_end_transaction(trans, root); return ret; } int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; BUG_ON(!cache->ro); spin_lock(&sinfo->lock); spin_lock(&cache->lock); num_bytes = cache->key.offset - cache->reserved - cache->pinned - cache->bytes_super - btrfs_block_group_used(&cache->item); sinfo->bytes_readonly -= num_bytes; cache->ro = 0; spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); return 0; } /* * checks to see if its even possible to relocate this block group. * * @return - -1 if it's not a good idea to relocate this block group, 0 if its * ok to go ahead and try. */ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) { struct btrfs_block_group_cache *block_group; struct btrfs_space_info *space_info; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_device *device; int full = 0; int ret = 0; block_group = btrfs_lookup_block_group(root->fs_info, bytenr); /* odd, couldn't find the block group, leave it alone */ if (!block_group) return -1; /* no bytes used, we're good */ if (!btrfs_block_group_used(&block_group->item)) goto out; space_info = block_group->space_info; spin_lock(&space_info->lock); full = space_info->full; /* * if this is the last block group we have in this space, we can't * relocate it unless we're able to allocate a new chunk below. * * Otherwise, we need to make sure we have room in the space to handle * all of the extents from this block group. If we can, we're good */ if ((space_info->total_bytes != block_group->key.offset) && (space_info->bytes_used + space_info->bytes_reserved + space_info->bytes_pinned + space_info->bytes_readonly + btrfs_block_group_used(&block_group->item) < space_info->total_bytes)) { spin_unlock(&space_info->lock); goto out; } spin_unlock(&space_info->lock); /* * ok we don't have enough space, but maybe we have free space on our * devices to allocate new chunks for relocation, so loop through our * alloc devices and guess if we have enough space. However, if we * were marked as full, then we know there aren't enough chunks, and we * can just return. */ ret = -1; if (full) goto out; mutex_lock(&root->fs_info->chunk_mutex); list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 min_free = btrfs_block_group_used(&block_group->item); u64 dev_offset, max_avail; /* * check to make sure we can actually find a chunk with enough * space to fit our block group in. */ if (device->total_bytes > device->bytes_used + min_free) { ret = find_free_dev_extent(NULL, device, min_free, &dev_offset, &max_avail); if (!ret) break; ret = -1; } } mutex_unlock(&root->fs_info->chunk_mutex); out: btrfs_put_block_group(block_group); return ret; } static int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key) { int ret = 0; struct btrfs_key found_key; struct extent_buffer *leaf; int slot; ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret < 0) goto out; while (1) { slot = path->slots[0]; leaf = path->nodes[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid >= key->objectid && found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { ret = 0; goto out; } path->slots[0]++; } out: return ret; } int btrfs_free_block_groups(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; struct btrfs_space_info *space_info; struct btrfs_caching_control *caching_ctl; struct rb_node *n; down_write(&info->extent_commit_sem); while (!list_empty(&info->caching_block_groups)) { caching_ctl = list_entry(info->caching_block_groups.next, struct btrfs_caching_control, list); list_del(&caching_ctl->list); put_caching_control(caching_ctl); } up_write(&info->extent_commit_sem); spin_lock(&info->block_group_cache_lock); while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group_cache, cache_node); rb_erase(&block_group->cache_node, &info->block_group_cache_tree); spin_unlock(&info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); spin_lock(&info->block_group_cache_lock); } spin_unlock(&info->block_group_cache_lock); /* now that all the block groups are freed, go through and * free all the space_info structs. This is only called during * the final stages of unmount, and so we know nobody is * using them. We call synchronize_rcu() once before we start, * just to be on the safe side. */ synchronize_rcu(); release_global_block_rsv(info); while(!list_empty(&info->space_info)) { space_info = list_entry(info->space_info.next, struct btrfs_space_info, list); if (space_info->bytes_pinned > 0 || space_info->bytes_reserved > 0) { WARN_ON(1); dump_space_info(space_info, 0, 0); } list_del(&space_info->list); kfree(space_info); } return 0; } static void __link_block_group(struct btrfs_space_info *space_info, struct btrfs_block_group_cache *cache) { int index = get_block_group_index(cache); down_write(&space_info->groups_sem); list_add_tail(&cache->list, &space_info->block_groups[index]); up_write(&space_info->groups_sem); } int btrfs_read_block_groups(struct btrfs_root *root) { struct btrfs_path *path; int ret; struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; struct btrfs_space_info *space_info; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; root = info->extent_root; key.objectid = 0; key.offset = 0; btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { ret = find_first_block_group(root, path, &key); if (ret > 0) break; if (ret != 0) goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) { ret = -ENOMEM; goto error; } atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); cache->fs_info = info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); /* * we only want to have 32k of ram per block group for keeping * track of free space, and if we pass 1/2 of that we want to * start converting things over to using bitmaps */ cache->extents_thresh = ((1024 * 32) / 2) / sizeof(struct btrfs_free_space); read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); memcpy(&cache->key, &found_key, sizeof(found_key)); key.objectid = found_key.objectid + found_key.offset; btrfs_release_path(root, path); cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't * find any space, or we are empty, and we can just add all * the space in and be done with it. This saves us _alot_ of * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, found_key.objectid, found_key.objectid + found_key.offset); free_excluded_extents(root, cache); } ret = update_space_info(info, cache->flags, found_key.offset, btrfs_block_group_used(&cache->item), &space_info); BUG_ON(ret); cache->space_info = space_info; spin_lock(&cache->space_info->lock); cache->space_info->bytes_readonly += cache->bytes_super; spin_unlock(&cache->space_info->lock); __link_block_group(space_info, cache); ret = btrfs_add_block_group_cache(root->fs_info, cache); BUG_ON(ret); set_avail_alloc_bits(root->fs_info, cache->flags); if (btrfs_chunk_readonly(root, cache->key.objectid)) set_block_group_ro(cache); } list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { if (!(get_alloc_profile(root, space_info->flags) & (BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))) continue; /* * avoid allocating from un-mirrored block group if there are * mirrored block groups. */ list_for_each_entry(cache, &space_info->block_groups[3], list) set_block_group_ro(cache); list_for_each_entry(cache, &space_info->block_groups[4], list) set_block_group_ro(cache); } init_global_block_rsv(info); ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_make_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytes_used, u64 type, u64 chunk_objectid, u64 chunk_offset, u64 size) { int ret; struct btrfs_root *extent_root; struct btrfs_block_group_cache *cache; extent_root = root->fs_info->extent_root; root->fs_info->last_trans_log_full_commit = trans->transid; cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return -ENOMEM; cache->key.objectid = chunk_offset; cache->key.offset = size; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; cache->sectorsize = root->sectorsize; /* * we only want to have 32k of ram per block group for keeping track * of free space, and if we pass 1/2 of that we want to start * converting things over to using bitmaps */ cache->extents_thresh = ((1024 * 32) / 2) / sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); cache->flags = type; btrfs_set_block_group_flags(&cache->item, type); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; exclude_super_stripes(root, cache); add_new_free_space(cache, root->fs_info, chunk_offset, chunk_offset + size); free_excluded_extents(root, cache); ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, &cache->space_info); BUG_ON(ret); spin_lock(&cache->space_info->lock); cache->space_info->bytes_readonly += cache->bytes_super; spin_unlock(&cache->space_info->lock); __link_block_group(cache->space_info, cache); ret = btrfs_add_block_group_cache(root->fs_info, cache); BUG_ON(ret); ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, sizeof(cache->item)); BUG_ON(ret); set_avail_alloc_bits(extent_root->fs_info, type); return 0; } int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start) { struct btrfs_path *path; struct btrfs_block_group_cache *block_group; struct btrfs_free_cluster *cluster; struct btrfs_key key; int ret; root = root->fs_info->extent_root; block_group = btrfs_lookup_block_group(root->fs_info, group_start); BUG_ON(!block_group); BUG_ON(!block_group->ro); memcpy(&key, &block_group->key, sizeof(key)); /* make sure this block group isn't part of an allocation cluster */ cluster = &root->fs_info->data_alloc_cluster; spin_lock(&cluster->refill_lock); btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&cluster->refill_lock); /* * make sure this block group isn't part of a metadata * allocation cluster */ cluster = &root->fs_info->meta_alloc_cluster; spin_lock(&cluster->refill_lock); btrfs_return_cluster_to_free_space(block_group, cluster); spin_unlock(&cluster->refill_lock); path = btrfs_alloc_path(); BUG_ON(!path); spin_lock(&root->fs_info->block_group_cache_lock); rb_erase(&block_group->cache_node, &root->fs_info->block_group_cache_tree); spin_unlock(&root->fs_info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); /* * we must use list_del_init so people can check to see if they * are still on the list after taking the semaphore */ list_del_init(&block_group->list); up_write(&block_group->space_info->groups_sem); if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); btrfs_remove_free_space_cache(block_group); spin_lock(&block_group->space_info->lock); block_group->space_info->total_bytes -= block_group->key.offset; block_group->space_info->bytes_readonly -= block_group->key.offset; spin_unlock(&block_group->space_info->lock); btrfs_clear_space_info_full(root->fs_info); btrfs_put_block_group(block_group); btrfs_put_block_group(block_group); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) ret = -EIO; if (ret < 0) goto out; ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); return ret; }
gpl-2.0
mausvt/seagate_central_cns3420_2-6-35
arch/ia64/kernel/irq_ia64.c
846
16041
/* * linux/arch/ia64/kernel/irq_ia64.c * * Copyright (C) 1998-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * * 6/10/99: Updated to bring in sync with x86 version to facilitate * support for SMP and different interrupt controllers. * * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector * PCI to vector allocation routine. * 04/14/2004 Ashok Raj <ashok.raj@intel.com> * Added CPU Hotplug handling for IPF. */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/random.h> /* for rand_initialize_irq() */ #include <linux/signal.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/ratelimit.h> #include <asm/delay.h> #include <asm/intrinsics.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/machvec.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/tlbflush.h> #ifdef CONFIG_PERFMON # include <asm/perfmon.h> #endif #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) /* These can be overridden in platform_irq_init */ int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ __u8 isa_irq_to_vector_map[16] = { /* 8259 IRQ translation, first 16 entries */ 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 }; EXPORT_SYMBOL(isa_irq_to_vector_map); DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = -1 }; static cpumask_t vector_table[IA64_NUM_VECTORS] = { [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; int check_irq_used(int irq) { if (irq_status[irq] == IRQ_USED) return 1; return -1; } static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos, vector; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { vector = IA64_FIRST_DEVICE_VECTOR + pos; cpus_and(mask, domain, vector_table[vector]); if (!cpus_empty(mask)) continue; return vector; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpus_or(vector_table[vector], vector_table[vector], domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpus_and(mask, cfg->domain, cpu_online_map); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; cpus_andnot(vector_table[vector], vector_table[vector], domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int ia64_native_assign_irq_vector (int irq) { unsigned long flags; int vector, cpu; cpumask_t domain = CPU_MASK_NONE; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; if (irq == AUTO_ASSIGN) irq = vector; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void ia64_native_free_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; clear_irq_vector(vector); } int reserve_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return cpumask_of_cpu(cpu); return CPU_MASK_ALL; } static int __irq_prepare_move(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->move_in_progress || cfg->move_cleanup_count) return -EBUSY; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpu_isset(cpu, cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int irq_prepare_move(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } void irq_complete_move(unsigned irq) { struct irq_cfg *cfg = &irq_cfg[irq]; cpumask_t cleanup_mask; int i; if (likely(!cfg->move_in_progress)) return; if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) { int me = smp_processor_id(); ia64_vector vector; unsigned long flags; for (vector = IA64_FIRST_DEVICE_VECTOR; vector < IA64_LAST_DEVICE_VECTOR; vector++) { int irq; struct irq_desc *desc; struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; if (irq < 0) continue; desc = irq_desc + irq; cfg = irq_cfg + irq; raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; if (!cpu_isset(me, cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __get_cpu_var(vector_irq)[vector] = -1; cpu_clear(me, vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } static struct irqaction irq_move_irqaction = { .handler = smp_irq_move_cleanup_interrupt, .flags = IRQF_DISABLED, .name = "irq_move" }; static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 0; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; dynamic_irq_cleanup(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } /* * Dynamic irq allocate and deallocation for MSI */ int create_irq(void) { unsigned long flags; int irq, vector, cpu; cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { dynamic_irq_cleanup(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) #else # define IS_RESCHEDULE(vec) (0) # define IS_LOCAL_TLB_FLUSH(vec) (0) #endif /* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); } #ifdef CONFIG_HOTPLUG_CPU /* * This function emulates a interrupt processing when a cpu is about to be * brought down. */ void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); /* * Now try calling normal ia64_handle_irq as it would have got called * from a real intr handler. Try passing null for pt_regs, hopefully * it will work. I hope it works!. * Probably could shared code. */ if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); /* * Disable interrupts and send EOI */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); } #endif #ifdef CONFIG_SMP static irqreturn_t dummy_handler (int irq, void *dev_id) { BUG(); } static struct irqaction ipi_irqaction = { .handler = handle_IPI, .flags = IRQF_DISABLED, .name = "IPI" }; /* * KVM uses this interrupt to force a cpu out of guest mode */ static struct irqaction resched_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction tlb_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "tlb_flush" }; #endif void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) { struct irq_desc *desc; unsigned int irq; irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; desc->chip = &irq_type_ia64_lsapic; if (action) setup_irq(irq, action); } void __init ia64_native_register_ipi(void) { #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); #endif } void __init init_IRQ (void) { ia64_register_ipi(); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); #endif #endif #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif platform_irq_init(); } void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) { void __iomem *ipi_addr; unsigned long ipi_data; unsigned long phys_cpu_id; phys_cpu_id = cpu_physical_id(cpu); /* * cpu number is in 8bit ID and 8bit EID */ ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); writeq(ipi_data, ipi_addr); }
gpl-2.0
W4TCH0UT/zz_quark
arch/arm/mach-virt/platsmp.c
1102
1401
/* * Dummy Virtual Machine - does what it says on the tin. * * Copyright (C) 2012 ARM Ltd * Author: Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/of.h> #include <asm/psci.h> #include <asm/smp_plat.h> extern void secondary_startup(void); static void __init virt_smp_init_cpus(void) { } static void __init virt_smp_prepare_cpus(unsigned int max_cpus) { } static int __cpuinit virt_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (psci_ops.cpu_on) return psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_startup)); return -ENODEV; } struct smp_operations __initdata virt_smp_ops = { .smp_init_cpus = virt_smp_init_cpus, .smp_prepare_cpus = virt_smp_prepare_cpus, .smp_boot_secondary = virt_boot_secondary, };
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M190S
drivers/media/video/cx18/cx18-av-core.c
1102
41632
/* * cx18 ADEC audio functions * * Derived from cx25840-core.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <media/v4l2-chip-ident.h> #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" int cx18_av_write(struct cx18 *cx, u16 addr, u8 value) { u32 reg = 0xc40000 + (addr & ~3); u32 mask = 0xff; int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~(mask << shift)) | ((u32)value << shift); cx18_write_reg(cx, x, reg); return 0; } int cx18_av_write_expect(struct cx18 *cx, u16 addr, u8 value, u8 eval, u8 mask) { u32 reg = 0xc40000 + (addr & ~3); int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~((u32)0xff << shift)) | ((u32)value << shift); cx18_write_reg_expect(cx, x, reg, ((u32)eval << shift), ((u32)mask << shift)); return 0; } int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg(cx, value, 0xc40000 + addr); return 0; } int cx18_av_write4_expect(struct cx18 *cx, u16 addr, u32 value, u32 eval, u32 mask) { cx18_write_reg_expect(cx, value, 0xc40000 + addr, eval, mask); return 0; } int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg_noretry(cx, value, 0xc40000 + addr); return 0; } u8 cx18_av_read(struct cx18 *cx, u16 addr) { u32 x = cx18_read_reg(cx, 0xc40000 + (addr & ~3)); int shift = (addr & 3) * 8; return (x >> shift) & 0xff; } u32 cx18_av_read4(struct cx18 *cx, u16 addr) { return cx18_read_reg(cx, 0xc40000 + addr); } int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask, u8 or_value) { return cx18_av_write(cx, addr, (cx18_av_read(cx, addr) & and_mask) | or_value); } int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask, u32 or_value) { return cx18_av_write4(cx, addr, (cx18_av_read4(cx, addr) & and_mask) | or_value); } static void cx18_av_init(struct cx18 *cx) { /* * The crystal freq used in calculations in this driver will be * 28.636360 MHz. * Aim to run the PLLs' VCOs near 400 MHz to minimze errors. */ /* * VDCLK Integer = 0x0f, Post Divider = 0x04 * AIMCLK Integer = 0x0e, Post Divider = 0x16 */ cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f); /* VDCLK Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */ cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe); /* AIMCLK Fraction = 0x05227ad */ /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/ cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */ cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56); } static void cx18_av_initialize(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u32 v; cx18_av_loadfw(cx); /* Stop 8051 code execution */ cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x03000000, 0x03000000, 0x13000000); /* initallize the PLL by toggling sleep bit */ v = cx18_av_read4(cx, CXADEC_HOST_REG1); /* enable sleep mode - register appears to be read only... */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v | 1, v, 0xfffe); /* disable sleep mode */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v & 0xfffe, v & 0xfffe, 0xffff); /* initialize DLLs */ v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v | 0x10000100); v = cx18_av_read4(cx, CXADEC_DLL2_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v | 0x06000100); /* set analog bias currents. Set Vreg to 1.20V. */ cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL1, 0x000A1802); v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1; /* enable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v, v, 0x03009F0F); /* disable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE, v & 0xFFFFFFFE, 0x03009F0F); /* enable 656 output */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00); /* video output drive strength */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL2, ~0, 0x2); /* reset video */ cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000); cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0); /* * Disable Video Auto-config of the Analog Front End and Video PLL. * * Since we only use BT.656 pixel mode, which works for both 525 and 625 * line systems, it's just easier for us to set registers * 0x102 (CXADEC_CHIP_CTRL), 0x104-0x106 (CXADEC_AFE_CTRL), * 0x108-0x109 (CXADEC_PLL_CTRL1), and 0x10c-0x10f (CXADEC_VID_PLL_FRAC) * ourselves, than to run around cleaning up after the auto-config. * * (Note: my CX23418 chip doesn't seem to let the ACFG_DIS bit * get set to 1, but OTOH, it doesn't seem to do AFE and VID PLL * autoconfig either.) * * As a default, also turn off Dual mode for ADC2 and set ADC2 to CH3. */ cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000); /* Setup the Video and and Aux/Audio PLLs */ cx18_av_init(cx); /* set video to auto-detect */ /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */ /* set the comb notch = 1 */ cx18_av_and_or4(cx, CXADEC_MODE_CTRL, 0xFFF7E7F0, 0x02040800); /* Enable wtw_en in CRUSH_CTRL (Set bit 22) */ /* Enable maj_sel in CRUSH_CTRL (Set bit 20) */ cx18_av_and_or4(cx, CXADEC_CRUSH_CTRL, ~0, 0x00500000); /* Set VGA_TRACK_RANGE to 0x20 */ cx18_av_and_or4(cx, CXADEC_DFE_CTRL2, 0xFFFF00FF, 0x00002000); /* * Initial VBI setup * VIP-1.1, 10 bit mode, enable Raw, disable sliced, * don't clamp raw samples when codes are in use, 1 byte user D-words, * IDID0 has line #, RP code V bit transition on VBLANK, data during * blanking intervals */ cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4013252e); /* Set the video input. The setting in MODE_CTRL gets lost when we do the above setup */ /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */ /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */ /* * Analog Front End (AFE) * Default to luma on ch1/ADC1, chroma on ch2/ADC2, SIF on ch3/ADC2 * bypass_ch[1-3] use filter * droop_comp_ch[1-3] disable * clamp_en_ch[1-3] disable * aud_in_sel ADC2 * luma_in_sel ADC1 * chroma_in_sel ADC2 * clamp_sel_ch[2-3] midcode * clamp_sel_ch1 video decoder * vga_sel_ch3 audio decoder * vga_sel_ch[1-2] video decoder * half_bw_ch[1-3] disable * +12db_ch[1-3] disable */ cx18_av_and_or4(cx, CXADEC_AFE_CTRL, 0xFF000000, 0x00005D00); /* if(dwEnable && dw3DCombAvailable) { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */ /* } else { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */ /* } */ cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F); state->default_volume = 228 - cx18_av_read(cx, 0x8d4); state->default_volume = ((state->default_volume / 2) + 23) << 9; } static int cx18_av_reset(struct v4l2_subdev *sd, u32 val) { cx18_av_initialize(sd); return 0; } static int cx18_av_load_fw(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); if (!state->is_initialized) { /* initialize on first use */ state->is_initialized = 1; cx18_av_initialize(sd); } return 0; } void cx18_av_std_setup(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; v4l2_std_id std = state->std; /* * Video ADC crystal clock to pixel clock SRC decimation ratio * 28.636360 MHz/13.5 Mpps * 256 = 0x21f.07b */ const int src_decimation = 0x21f; int hblank, hactive, burst, vblank, vactive, sc; int vblank656; int luma_lpf, uv_lpf, comb; u32 pll_int, pll_frac, pll_post; /* datasheet startup, step 8d */ if (std & ~V4L2_STD_NTSC) cx18_av_write(cx, 0x49f, 0x11); else cx18_av_write(cx, 0x49f, 0x14); /* * Note: At the end of a field, there are 3 sets of half line duration * (double horizontal rate) pulses: * * 5 (625) or 6 (525) half-lines to blank for the vertical retrace * 5 (625) or 6 (525) vertical sync pulses of half line duration * 5 (625) or 6 (525) half-lines of equalization pulses */ if (std & V4L2_STD_625_50) { /* * The following relationships of half line counts should hold: * 625 = vblank656 + vactive * 10 = vblank656 - vblank = vsync pulses + equalization pulses * * vblank656: half lines after line 625/mid-313 of blanked video * vblank: half lines, after line 5/317, of blanked video * vactive: half lines of active video + * 5 half lines after the end of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 1 or mid-313) * vblank starts counting from the after the 5 vsync pulses and * 5 or 4 equalization pulses (start of line 6 or 318) * * For 625 line systems the driver will extract VBI information * from lines 6-23 and lines 318-335 (but the slicer can only * handle 17 lines, not the 18 in the vblank region). * In addition, we need vblank656 and vblank to be one whole * line longer, to cover line 24 and 336, so the SAV/EAV RP * codes get generated such that the encoder can actually * extract line 23 & 335 (WSS). We'll lose 1 line in each field * at the top of the screen. * * It appears the 5 half lines that happen after active * video must be included in vactive (579 instead of 574), * otherwise the colors get badly displayed in various regions * of the screen. I guess the chroma comb filter gets confused * without them (at least when a PVR-350 is the PAL source). */ vblank656 = 48; /* lines 1 - 24 & 313 - 336 */ vblank = 38; /* lines 6 - 24 & 318 - 336 */ vactive = 579; /* lines 24 - 313 & 337 - 626 */ /* * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after * the end of active video to start a horizontal line, so that * leaves 132 pixels of hblank to ignore. */ hblank = 132; hactive = 720; /* * Burst gate delay (for 625 line systems) * Hsync leading edge to color burst rise = 5.6 us * Color burst width = 2.25 us * Gate width = 4 pixel clocks * (5.6 us + 2.25/2 us) * 13.5 Mpps + 4/2 clocks = 92.79 clocks */ burst = 93; luma_lpf = 2; if (std & V4L2_STD_PAL) { uv_lpf = 1; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_Nc) { uv_lpf = 1; comb = 0x20; /* sc = 3582056.25 * src_decimation/28636360 * 2^13 */ sc = 556422; } else { /* SECAM */ uv_lpf = 0; comb = 0; /* (fr + fb)/2 = (4406260 + 4250000)/2 = 4328130 */ /* sc = 4328130 * src_decimation/28636360 * 2^13 */ sc = 672314; } } else { /* * The following relationships of half line counts should hold: * 525 = prevsync + vblank656 + vactive * 12 = vblank656 - vblank = vsync pulses + equalization pulses * * prevsync: 6 half-lines before the vsync pulses * vblank656: half lines, after line 3/mid-266, of blanked video * vblank: half lines, after line 9/272, of blanked video * vactive: half lines of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 4 or mid-266) * vblank starts counting from the after the 6 vsync pulses and * 6 or 5 equalization pulses (start of line 10 or 272) * * For 525 line systems the driver will extract VBI information * from lines 10-21 and lines 273-284. */ vblank656 = 38; /* lines 4 - 22 & 266 - 284 */ vblank = 26; /* lines 10 - 22 & 272 - 284 */ vactive = 481; /* lines 23 - 263 & 285 - 525 */ /* * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is * is 858 pixels = 720 active + 138 blanking. The Hsync leading * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the * end of active video, leaving 122 pixels of hblank to ignore * before active video starts. */ hactive = 720; hblank = 122; luma_lpf = 1; uv_lpf = 1; /* * Burst gate delay (for 525 line systems) * Hsync leading edge to color burst rise = 5.3 us * Color burst width = 2.5 us * Gate width = 4 pixel clocks * (5.3 us + 2.5/2 us) * 13.5 Mpps + 4/2 clocks = 90.425 clocks */ if (std == V4L2_STD_PAL_60) { burst = 90; luma_lpf = 2; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_M) { /* The 97 needs to be verified against PAL-M timings */ burst = 97; comb = 0x20; /* sc = 3575611.49 * src_decimation/28636360 * 2^13 */ sc = 555421; } else { burst = 90; comb = 0x66; /* sc = 3579545.45.. * src_decimation/28636360 * 2^13 */ sc = 556032; } } /* DEBUG: Displays configured PLL frequency */ pll_int = cx18_av_read(cx, 0x108); pll_frac = cx18_av_read4(cx, 0x10c) & 0x1ffffff; pll_post = cx18_av_read(cx, 0x109); CX18_DEBUG_INFO_DEV(sd, "PLL regs = int: %u, frac: %u, post: %u\n", pll_int, pll_frac, pll_post); if (pll_post) { int fsc, pll; u64 tmp; pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25; pll /= pll_post; CX18_DEBUG_INFO_DEV(sd, "Video PLL = %d.%06d MHz\n", pll / 1000000, pll % 1000000); CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n", pll / 8000000, (pll / 8) % 1000000); CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio " "= %d.%03d\n", src_decimation / 256, ((src_decimation % 256) * 1000) / 256); tmp = 28636360 * (u64) sc; do_div(tmp, src_decimation); fsc = tmp >> 13; CX18_DEBUG_INFO_DEV(sd, "Chroma sub-carrier initial freq = %d.%06d " "MHz\n", fsc / 1000000, fsc % 1000000); CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, " "vactive %i, vblank656 %i, src_dec %i, " "burst 0x%02x, luma_lpf %i, uv_lpf %i, " "comb 0x%02x, sc 0x%06x\n", hblank, hactive, vblank, vactive, vblank656, src_decimation, burst, luma_lpf, uv_lpf, comb, sc); } /* Sets horizontal blanking delay and active lines */ cx18_av_write(cx, 0x470, hblank); cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) | (hactive << 4))); cx18_av_write(cx, 0x472, hactive >> 4); /* Sets burst gate delay */ cx18_av_write(cx, 0x473, burst); /* Sets vertical blanking delay and active duration */ cx18_av_write(cx, 0x474, vblank); cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) | (vactive << 4))); cx18_av_write(cx, 0x476, vactive >> 4); cx18_av_write(cx, 0x477, vblank656); /* Sets src decimation rate */ cx18_av_write(cx, 0x478, 0xff & src_decimation); cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8)); /* Sets Luma and UV Low pass filters */ cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); /* Enables comb filters */ cx18_av_write(cx, 0x47b, comb); /* Sets SC Step*/ cx18_av_write(cx, 0x47c, sc); cx18_av_write(cx, 0x47d, 0xff & sc >> 8); cx18_av_write(cx, 0x47e, 0xff & sc >> 16); if (std & V4L2_STD_625_50) { state->slicer_line_delay = 1; state->slicer_line_offset = (6 + state->slicer_line_delay - 2); } else { state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); } cx18_av_write(cx, 0x47f, state->slicer_line_delay); } static void input_change(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; v4l2_std_id std = state->std; u8 v; /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */ cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11); cx18_av_and_or(cx, 0x401, ~0x60, 0); cx18_av_and_or(cx, 0x401, ~0x60, 0x60); if (std & V4L2_STD_525_60) { if (std == V4L2_STD_NTSC_M_JP) { /* Japan uses EIAJ audio standard */ cx18_av_write_expect(cx, 0x808, 0xf7, 0xf7, 0xff); cx18_av_write_expect(cx, 0x80b, 0x02, 0x02, 0x3f); } else if (std == V4L2_STD_NTSC_M_KR) { /* South Korea uses A2 audio standard */ cx18_av_write_expect(cx, 0x808, 0xf8, 0xf8, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else { /* Others use the BTSC audio standard */ cx18_av_write_expect(cx, 0x808, 0xf6, 0xf6, 0xff); cx18_av_write_expect(cx, 0x80b, 0x01, 0x01, 0x3f); } } else if (std & V4L2_STD_PAL) { /* Follow tuner change procedure for PAL */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else if (std & V4L2_STD_SECAM) { /* Select autodetect for SECAM */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } v = cx18_av_read(cx, 0x803); if (v & 0x10) { /* restart audio decoder microcontroller */ v &= ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); v |= 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } } static int cx18_av_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct cx18 *cx = v4l2_get_subdevdata(sd); input_change(cx); return 0; } static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input, enum cx18_av_audio_input aud_input) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; enum analog_signal_type { NONE, CVBS, Y, C, SIF, Pb, Pr } ch[3] = {NONE, NONE, NONE}; u8 afe_mux_cfg; u8 adc2_cfg; u8 input_mode; u32 afe_cfg; int i; CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n", vid_input, aud_input); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1); ch[0] = CVBS; input_mode = 0x0; } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) { int luma = vid_input & 0xf000; int r_chroma = vid_input & 0xf0000; int b_chroma = vid_input & 0xf00000; if ((vid_input & ~0xfff000) || luma < CX18_AV_COMPONENT_LUMA1 || luma > CX18_AV_COMPONENT_LUMA8 || r_chroma < CX18_AV_COMPONENT_R_CHROMA4 || r_chroma > CX18_AV_COMPONENT_R_CHROMA6 || b_chroma < CX18_AV_COMPONENT_B_CHROMA7 || b_chroma > CX18_AV_COMPONENT_B_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12; ch[0] = Y; afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12; ch[1] = Pr; afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14; ch[2] = Pb; input_mode = 0x6; } else { int luma = vid_input & 0xf0; int chroma = vid_input & 0xf00; if ((vid_input & ~0xff0) || luma < CX18_AV_SVIDEO_LUMA1 || luma > CX18_AV_SVIDEO_LUMA8 || chroma < CX18_AV_SVIDEO_CHROMA4 || chroma > CX18_AV_SVIDEO_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4); ch[0] = Y; if (chroma >= CX18_AV_SVIDEO_CHROMA7) { afe_mux_cfg &= 0x3f; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2; ch[2] = C; } else { afe_mux_cfg &= 0xcf; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4; ch[1] = C; } input_mode = 0x2; } switch (aud_input) { case CX18_AV_AUDIO_SERIAL1: case CX18_AV_AUDIO_SERIAL2: /* do nothing, use serial audio input */ break; case CX18_AV_AUDIO4: afe_mux_cfg &= ~0x30; ch[1] = SIF; break; case CX18_AV_AUDIO5: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x10; ch[1] = SIF; break; case CX18_AV_AUDIO6: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x20; ch[1] = SIF; break; case CX18_AV_AUDIO7: afe_mux_cfg &= ~0xc0; ch[2] = SIF; break; case CX18_AV_AUDIO8: afe_mux_cfg = (afe_mux_cfg & ~0xc0) | 0x40; ch[2] = SIF; break; default: CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n", aud_input); return -EINVAL; } /* Set up analog front end multiplexers */ cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7); /* Set INPUT_MODE to Composite, S-Video, or Component */ cx18_av_and_or(cx, 0x401, ~0x6, input_mode); /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ adc2_cfg = cx18_av_read(cx, 0x102); if (ch[2] == NONE) adc2_cfg &= ~0x2; /* No sig on CH3, set ADC2 to CH2 for input */ else adc2_cfg |= 0x2; /* Signal on CH3, set ADC2 to CH3 for input */ /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */ if (ch[1] != NONE && ch[2] != NONE) adc2_cfg |= 0x4; /* Set dual mode */ else adc2_cfg &= ~0x4; /* Clear dual mode */ cx18_av_write_expect(cx, 0x102, adc2_cfg, adc2_cfg, 0x17); /* Configure the analog front end */ afe_cfg = cx18_av_read4(cx, CXADEC_AFE_CTRL); afe_cfg &= 0xff000000; afe_cfg |= 0x00005000; /* CHROMA_IN, AUD_IN: ADC2; LUMA_IN: ADC1 */ if (ch[1] != NONE && ch[2] != NONE) afe_cfg |= 0x00000030; /* half_bw_ch[2-3] since in dual mode */ for (i = 0; i < 3; i++) { switch (ch[i]) { default: case NONE: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); break; case CVBS: case Y: if (i > 0) afe_cfg |= 0x00002000; /* LUMA_IN_SEL: ADC2 */ break; case C: case Pb: case Pr: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); if (i == 0 && ch[i] == C) afe_cfg &= ~0x00001000; /* CHROMA_IN_SEL ADC1 */ break; case SIF: /* * VGA_GAIN_SEL = Audio Decoder * CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000240 << i); if (i == 0) afe_cfg &= ~0x00004000; /* AUD_IN_SEL ADC1 */ break; } } cx18_av_write4(cx, CXADEC_AFE_CTRL, afe_cfg); state->vid_input = vid_input; state->aud_input = aud_input; cx18_av_audio_set_path(cx); input_change(cx); return 0; } static int cx18_av_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, input, state->aud_input); } static int cx18_av_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, state->vid_input, input); } static int cx18_av_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 vpres; u8 mode; int val = 0; if (state->radio) return 0; vpres = cx18_av_read(cx, 0x40e) & 0x20; vt->signal = vpres ? 0xffff : 0x0; vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; mode = cx18_av_read(cx, 0x804); /* get rxsubchans and audmode */ if ((mode & 0xf) == 1) val |= V4L2_TUNER_SUB_STEREO; else val |= V4L2_TUNER_SUB_MONO; if (mode == 2 || mode == 4) val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (mode & 0x10) val |= V4L2_TUNER_SUB_SAP; vt->rxsubchans = val; vt->audmode = state->audmode; return 0; } static int cx18_av_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 v; if (state->radio) return 0; v = cx18_av_read(cx, 0x809); v &= ~0xf; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: /* mono -> mono stereo -> mono bilingual -> lang1 */ break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: /* mono -> mono stereo -> stereo bilingual -> lang1 */ v |= 0x4; break; case V4L2_TUNER_MODE_LANG1_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang1/lang2 */ v |= 0x7; break; case V4L2_TUNER_MODE_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang2 */ v |= 0x1; break; default: return -EINVAL; } cx18_av_write_expect(cx, 0x809, v, v, 0xff); state->audmode = vt->audmode; return 0; } static int cx18_av_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 fmt = 0; /* zero is autodetect */ u8 pal_m = 0; if (state->radio == 0 && state->std == norm) return 0; state->radio = 0; state->std = norm; /* First tests should be against specific std */ if (state->std == V4L2_STD_NTSC_M_JP) { fmt = 0x2; } else if (state->std == V4L2_STD_NTSC_443) { fmt = 0x3; } else if (state->std == V4L2_STD_PAL_M) { pal_m = 1; fmt = 0x5; } else if (state->std == V4L2_STD_PAL_N) { fmt = 0x6; } else if (state->std == V4L2_STD_PAL_Nc) { fmt = 0x7; } else if (state->std == V4L2_STD_PAL_60) { fmt = 0x8; } else { /* Then, test against generic ones */ if (state->std & V4L2_STD_NTSC) fmt = 0x1; else if (state->std & V4L2_STD_PAL) fmt = 0x4; else if (state->std & V4L2_STD_SECAM) fmt = 0xc; } CX18_DEBUG_INFO_DEV(sd, "changing video std to fmt %i\n", fmt); /* Follow step 9 of section 3.16 in the cx18_av datasheet. Without this PAL may display a vertical ghosting effect. This happens for example with the Yuan MPC622. */ if (fmt >= 4 && fmt < 8) { /* Set format to NTSC-M */ cx18_av_and_or(cx, 0x400, ~0xf, 1); /* Turn off LCOMB */ cx18_av_and_or(cx, 0x47b, ~6, 0); } cx18_av_and_or(cx, 0x400, ~0x2f, fmt | 0x20); cx18_av_and_or(cx, 0x403, ~0x3, pal_m); cx18_av_std_setup(cx); input_change(cx); return 0; } static int cx18_av_s_radio(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); state->radio = 1; return 0; } static int cx18_av_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (ctrl->value < 0 || ctrl->value > 255) { CX18_ERR_DEV(sd, "invalid brightness setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x414, ctrl->value - 128); break; case V4L2_CID_CONTRAST: if (ctrl->value < 0 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid contrast setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x415, ctrl->value << 1); break; case V4L2_CID_SATURATION: if (ctrl->value < 0 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid saturation setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x420, ctrl->value << 1); cx18_av_write(cx, 0x421, ctrl->value << 1); break; case V4L2_CID_HUE: if (ctrl->value < -128 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid hue setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x422, ctrl->value); break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_MUTE: return cx18_av_audio_s_ctrl(cx, ctrl); default: return -EINVAL; } return 0; } static int cx18_av_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrl->value = (s8)cx18_av_read(cx, 0x414) + 128; break; case V4L2_CID_CONTRAST: ctrl->value = cx18_av_read(cx, 0x415) >> 1; break; case V4L2_CID_SATURATION: ctrl->value = cx18_av_read(cx, 0x420) >> 1; break; case V4L2_CID_HUE: ctrl->value = (s8)cx18_av_read(cx, 0x422); break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_MUTE: return cx18_av_audio_g_ctrl(cx, ctrl); default: return -EINVAL; } return 0; } static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { struct cx18_av_state *state = to_cx18_av_state(sd); switch (qc->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128); case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64); case V4L2_CID_HUE: return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0); default: break; } switch (qc->id) { case V4L2_CID_AUDIO_VOLUME: return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, state->default_volume); case V4L2_CID_AUDIO_MUTE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0); case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); default: return -EINVAL; } return -EINVAL; } static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); int HSC, VSC, Vsrc, Hsrc, filter, Vlines; int is_50Hz = !(state->std & V4L2_STD_525_60); if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; Vsrc = (cx18_av_read(cx, 0x476) & 0x3f) << 4; Vsrc |= (cx18_av_read(cx, 0x475) & 0xf0) >> 4; Hsrc = (cx18_av_read(cx, 0x472) & 0x3f) << 4; Hsrc |= (cx18_av_read(cx, 0x471) & 0xf0) >> 4; /* * This adjustment reflects the excess of vactive, set in * cx18_av_std_setup(), above standard values: * * 480 + 1 for 60 Hz systems * 576 + 3 for 50 Hz systems */ Vlines = fmt->height + (is_50Hz ? 3 : 1); /* * Invalid height and width scaling requests are: * 1. width less than 1/16 of the source width * 2. width greater than the source width * 3. height less than 1/8 of the source height * 4. height greater than the source height */ if ((fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) || (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) { CX18_ERR_DEV(sd, "%dx%d is not a valid size!\n", fmt->width, fmt->height); return -ERANGE; } HSC = (Hsrc * (1 << 20)) / fmt->width - (1 << 20); VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9)); VSC &= 0x1fff; if (fmt->width >= 385) filter = 0; else if (fmt->width > 192) filter = 1; else if (fmt->width > 96) filter = 2; else filter = 3; CX18_DEBUG_INFO_DEV(sd, "decoder set size %dx%d -> scale %ux%u\n", fmt->width, fmt->height, HSC, VSC); /* HSCALE=HSC */ cx18_av_write(cx, 0x418, HSC & 0xff); cx18_av_write(cx, 0x419, (HSC >> 8) & 0xff); cx18_av_write(cx, 0x41a, HSC >> 16); /* VSCALE=VSC */ cx18_av_write(cx, 0x41c, VSC & 0xff); cx18_av_write(cx, 0x41d, VSC >> 8); /* VS_INTRLACE=1 VFILT=filter */ cx18_av_write(cx, 0x41e, 0x8 | filter); return 0; } static int cx18_av_s_stream(struct v4l2_subdev *sd, int enable) { struct cx18 *cx = v4l2_get_subdevdata(sd); CX18_DEBUG_INFO_DEV(sd, "%s output\n", enable ? "enable" : "disable"); if (enable) { cx18_av_write(cx, 0x115, 0x8c); cx18_av_write(cx, 0x116, 0x07); } else { cx18_av_write(cx, 0x115, 0x00); cx18_av_write(cx, 0x116, 0x00); } return 0; } static void log_video_status(struct cx18 *cx) { static const char *const fmt_strs[] = { "0x0", "NTSC-M", "NTSC-J", "NTSC-4.43", "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60", "0x9", "0xA", "0xB", "SECAM", "0xD", "0xE", "0xF" }; struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 vidfmt_sel = cx18_av_read(cx, 0x400) & 0xf; u8 gen_stat1 = cx18_av_read(cx, 0x40d); u8 gen_stat2 = cx18_av_read(cx, 0x40e); int vid_input = state->vid_input; CX18_INFO_DEV(sd, "Video signal: %spresent\n", (gen_stat2 & 0x20) ? "" : "not "); CX18_INFO_DEV(sd, "Detected format: %s\n", fmt_strs[gen_stat1 & 0xf]); CX18_INFO_DEV(sd, "Specified standard: %s\n", vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection"); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { CX18_INFO_DEV(sd, "Specified video input: Composite %d\n", vid_input - CX18_AV_COMPOSITE1 + 1); } else { CX18_INFO_DEV(sd, "Specified video input: " "S-Video (Luma In%d, Chroma In%d)\n", (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8); } CX18_INFO_DEV(sd, "Specified audioclock freq: %d Hz\n", state->audclk_freq); } static void log_audio_status(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 download_ctl = cx18_av_read(cx, 0x803); u8 mod_det_stat0 = cx18_av_read(cx, 0x804); u8 mod_det_stat1 = cx18_av_read(cx, 0x805); u8 audio_config = cx18_av_read(cx, 0x808); u8 pref_mode = cx18_av_read(cx, 0x809); u8 afc0 = cx18_av_read(cx, 0x80b); u8 mute_ctl = cx18_av_read(cx, 0x8d3); int aud_input = state->aud_input; char *p; switch (mod_det_stat0) { case 0x00: p = "mono"; break; case 0x01: p = "stereo"; break; case 0x02: p = "dual"; break; case 0x04: p = "tri"; break; case 0x10: p = "mono with SAP"; break; case 0x11: p = "stereo with SAP"; break; case 0x12: p = "dual with SAP"; break; case 0x14: p = "tri with SAP"; break; case 0xfe: p = "forced mode"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio mode: %s\n", p); switch (mod_det_stat1) { case 0x00: p = "not defined"; break; case 0x01: p = "EIAJ"; break; case 0x02: p = "A2-M"; break; case 0x03: p = "A2-BG"; break; case 0x04: p = "A2-DK1"; break; case 0x05: p = "A2-DK2"; break; case 0x06: p = "A2-DK3"; break; case 0x07: p = "A1 (6.0 MHz FM Mono)"; break; case 0x08: p = "AM-L"; break; case 0x09: p = "NICAM-BG"; break; case 0x0a: p = "NICAM-DK"; break; case 0x0b: p = "NICAM-I"; break; case 0x0c: p = "NICAM-L"; break; case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break; case 0x0e: p = "IF FM Radio"; break; case 0x0f: p = "BTSC"; break; case 0x10: p = "detected chrominance"; break; case 0xfd: p = "unknown audio standard"; break; case 0xfe: p = "forced audio standard"; break; case 0xff: p = "no detected audio standard"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio standard: %s\n", p); CX18_INFO_DEV(sd, "Audio muted: %s\n", (mute_ctl & 0x2) ? "yes" : "no"); CX18_INFO_DEV(sd, "Audio microcontroller: %s\n", (download_ctl & 0x10) ? "running" : "stopped"); switch (audio_config >> 4) { case 0x00: p = "undefined"; break; case 0x01: p = "BTSC"; break; case 0x02: p = "EIAJ"; break; case 0x03: p = "A2-M"; break; case 0x04: p = "A2-BG"; break; case 0x05: p = "A2-DK1"; break; case 0x06: p = "A2-DK2"; break; case 0x07: p = "A2-DK3"; break; case 0x08: p = "A1 (6.0 MHz FM Mono)"; break; case 0x09: p = "AM-L"; break; case 0x0a: p = "NICAM-BG"; break; case 0x0b: p = "NICAM-DK"; break; case 0x0c: p = "NICAM-I"; break; case 0x0d: p = "NICAM-L"; break; case 0x0e: p = "FM radio"; break; case 0x0f: p = "automatic detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio standard: %s\n", p); if ((audio_config >> 4) < 0xF) { switch (audio_config & 0xF) { case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break; case 0x01: p = "MONO2 (LANGUAGE B)"; break; case 0x02: p = "MONO3 (STEREO forced MONO)"; break; case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break; case 0x04: p = "STEREO"; break; case 0x05: p = "DUAL1 (AC)"; break; case 0x06: p = "DUAL2 (BC)"; break; case 0x07: p = "DUAL3 (AB)"; break; default: p = "undefined"; } CX18_INFO_DEV(sd, "Configured audio mode: %s\n", p); } else { switch (audio_config & 0xF) { case 0x00: p = "BG"; break; case 0x01: p = "DK1"; break; case 0x02: p = "DK2"; break; case 0x03: p = "DK3"; break; case 0x04: p = "I"; break; case 0x05: p = "L"; break; case 0x06: p = "BTSC"; break; case 0x07: p = "EIAJ"; break; case 0x08: p = "A2-M"; break; case 0x09: p = "FM Radio (4.5 MHz)"; break; case 0x0a: p = "FM Radio (5.5 MHz)"; break; case 0x0b: p = "S-Video"; break; case 0x0f: p = "automatic standard and mode detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio system: %s\n", p); } if (aud_input) CX18_INFO_DEV(sd, "Specified audio input: Tuner (In%d)\n", aud_input); else CX18_INFO_DEV(sd, "Specified audio input: External\n"); switch (pref_mode & 0xf) { case 0: p = "mono/language A"; break; case 1: p = "language B"; break; case 2: p = "language C"; break; case 3: p = "analog fallback"; break; case 4: p = "stereo"; break; case 5: p = "language AC"; break; case 6: p = "language BC"; break; case 7: p = "language AB"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Preferred audio mode: %s\n", p); if ((audio_config & 0xf) == 0xf) { switch ((afc0 >> 3) & 0x1) { case 0: p = "system DK"; break; case 1: p = "system L"; break; } CX18_INFO_DEV(sd, "Selected 65 MHz format: %s\n", p); switch (afc0 & 0x7) { case 0: p = "Chroma"; break; case 1: p = "BTSC"; break; case 2: p = "EIAJ"; break; case 3: p = "A2-M"; break; case 4: p = "autodetect"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Selected 45 MHz format: %s\n", p); } } static int cx18_av_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); log_video_status(cx); log_audio_status(cx); return 0; } static inline int cx18_av_dbg_match(const struct v4l2_dbg_match *match) { return match->type == V4L2_CHIP_MATCH_HOST && match->addr == 1; } static int cx18_av_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct cx18_av_state *state = to_cx18_av_state(sd); if (cx18_av_dbg_match(&chip->match)) { chip->ident = state->id; chip->revision = state->rev; } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx18_av_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->size = 4; reg->val = cx18_av_read4(cx, reg->reg & 0x00000ffc); return 0; } static int cx18_av_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; cx18_av_write4(cx, reg->reg & 0x00000ffc, reg->val); return 0; } #endif static const struct v4l2_subdev_core_ops cx18_av_general_ops = { .g_chip_ident = cx18_av_g_chip_ident, .log_status = cx18_av_log_status, .load_fw = cx18_av_load_fw, .reset = cx18_av_reset, .queryctrl = cx18_av_queryctrl, .g_ctrl = cx18_av_g_ctrl, .s_ctrl = cx18_av_s_ctrl, .s_std = cx18_av_s_std, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = cx18_av_g_register, .s_register = cx18_av_s_register, #endif }; static const struct v4l2_subdev_tuner_ops cx18_av_tuner_ops = { .s_radio = cx18_av_s_radio, .s_frequency = cx18_av_s_frequency, .g_tuner = cx18_av_g_tuner, .s_tuner = cx18_av_s_tuner, }; static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = { .s_clock_freq = cx18_av_s_clock_freq, .s_routing = cx18_av_s_audio_routing, }; static const struct v4l2_subdev_video_ops cx18_av_video_ops = { .s_routing = cx18_av_s_video_routing, .s_stream = cx18_av_s_stream, .s_mbus_fmt = cx18_av_s_mbus_fmt, }; static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = { .decode_vbi_line = cx18_av_decode_vbi_line, .g_sliced_fmt = cx18_av_g_sliced_fmt, .s_sliced_fmt = cx18_av_s_sliced_fmt, .s_raw_fmt = cx18_av_s_raw_fmt, }; static const struct v4l2_subdev_ops cx18_av_ops = { .core = &cx18_av_general_ops, .tuner = &cx18_av_tuner_ops, .audio = &cx18_av_audio_ops, .video = &cx18_av_video_ops, .vbi = &cx18_av_vbi_ops, }; int cx18_av_probe(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd; int err; state->rev = cx18_av_read4(cx, CXADEC_CHIP_CTRL) & 0xffff; state->id = ((state->rev >> 4) == CXADEC_CHIP_TYPE_MAKO) ? V4L2_IDENT_CX23418_843 : V4L2_IDENT_UNKNOWN; state->vid_input = CX18_AV_COMPOSITE7; state->aud_input = CX18_AV_AUDIO8; state->audclk_freq = 48000; state->audmode = V4L2_TUNER_MODE_LANG1; state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); sd = &state->sd; v4l2_subdev_init(sd, &cx18_av_ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %03x", cx->v4l2_dev.name, (state->rev >> 4)); sd->grp_id = CX18_HW_418_AV; err = v4l2_device_register_subdev(&cx->v4l2_dev, sd); if (!err) cx18_av_init(cx); return err; }
gpl-2.0
CML/GP0-2.6.35-Kernel
drivers/media/video/cx18/cx18-av-core.c
1102
41632
/* * cx18 ADEC audio functions * * Derived from cx25840-core.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <media/v4l2-chip-ident.h> #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" int cx18_av_write(struct cx18 *cx, u16 addr, u8 value) { u32 reg = 0xc40000 + (addr & ~3); u32 mask = 0xff; int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~(mask << shift)) | ((u32)value << shift); cx18_write_reg(cx, x, reg); return 0; } int cx18_av_write_expect(struct cx18 *cx, u16 addr, u8 value, u8 eval, u8 mask) { u32 reg = 0xc40000 + (addr & ~3); int shift = (addr & 3) * 8; u32 x = cx18_read_reg(cx, reg); x = (x & ~((u32)0xff << shift)) | ((u32)value << shift); cx18_write_reg_expect(cx, x, reg, ((u32)eval << shift), ((u32)mask << shift)); return 0; } int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg(cx, value, 0xc40000 + addr); return 0; } int cx18_av_write4_expect(struct cx18 *cx, u16 addr, u32 value, u32 eval, u32 mask) { cx18_write_reg_expect(cx, value, 0xc40000 + addr, eval, mask); return 0; } int cx18_av_write4_noretry(struct cx18 *cx, u16 addr, u32 value) { cx18_write_reg_noretry(cx, value, 0xc40000 + addr); return 0; } u8 cx18_av_read(struct cx18 *cx, u16 addr) { u32 x = cx18_read_reg(cx, 0xc40000 + (addr & ~3)); int shift = (addr & 3) * 8; return (x >> shift) & 0xff; } u32 cx18_av_read4(struct cx18 *cx, u16 addr) { return cx18_read_reg(cx, 0xc40000 + addr); } int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask, u8 or_value) { return cx18_av_write(cx, addr, (cx18_av_read(cx, addr) & and_mask) | or_value); } int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask, u32 or_value) { return cx18_av_write4(cx, addr, (cx18_av_read4(cx, addr) & and_mask) | or_value); } static void cx18_av_init(struct cx18 *cx) { /* * The crystal freq used in calculations in this driver will be * 28.636360 MHz. * Aim to run the PLLs' VCOs near 400 MHz to minimze errors. */ /* * VDCLK Integer = 0x0f, Post Divider = 0x04 * AIMCLK Integer = 0x0e, Post Divider = 0x16 */ cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f); /* VDCLK Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */ cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe); /* AIMCLK Fraction = 0x05227ad */ /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/ cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */ cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56); } static void cx18_av_initialize(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u32 v; cx18_av_loadfw(cx); /* Stop 8051 code execution */ cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x03000000, 0x03000000, 0x13000000); /* initallize the PLL by toggling sleep bit */ v = cx18_av_read4(cx, CXADEC_HOST_REG1); /* enable sleep mode - register appears to be read only... */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v | 1, v, 0xfffe); /* disable sleep mode */ cx18_av_write4_expect(cx, CXADEC_HOST_REG1, v & 0xfffe, v & 0xfffe, 0xffff); /* initialize DLLs */ v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v | 0x10000100); v = cx18_av_read4(cx, CXADEC_DLL2_DIAG_CTRL) & 0xE1FFFEFF; /* disable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v); /* enable FLD */ cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v | 0x06000100); /* set analog bias currents. Set Vreg to 1.20V. */ cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL1, 0x000A1802); v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1; /* enable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v, v, 0x03009F0F); /* disable TUNE_FIL_RST */ cx18_av_write4_expect(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE, v & 0xFFFFFFFE, 0x03009F0F); /* enable 656 output */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00); /* video output drive strength */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL2, ~0, 0x2); /* reset video */ cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000); cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0); /* * Disable Video Auto-config of the Analog Front End and Video PLL. * * Since we only use BT.656 pixel mode, which works for both 525 and 625 * line systems, it's just easier for us to set registers * 0x102 (CXADEC_CHIP_CTRL), 0x104-0x106 (CXADEC_AFE_CTRL), * 0x108-0x109 (CXADEC_PLL_CTRL1), and 0x10c-0x10f (CXADEC_VID_PLL_FRAC) * ourselves, than to run around cleaning up after the auto-config. * * (Note: my CX23418 chip doesn't seem to let the ACFG_DIS bit * get set to 1, but OTOH, it doesn't seem to do AFE and VID PLL * autoconfig either.) * * As a default, also turn off Dual mode for ADC2 and set ADC2 to CH3. */ cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000); /* Setup the Video and and Aux/Audio PLLs */ cx18_av_init(cx); /* set video to auto-detect */ /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */ /* set the comb notch = 1 */ cx18_av_and_or4(cx, CXADEC_MODE_CTRL, 0xFFF7E7F0, 0x02040800); /* Enable wtw_en in CRUSH_CTRL (Set bit 22) */ /* Enable maj_sel in CRUSH_CTRL (Set bit 20) */ cx18_av_and_or4(cx, CXADEC_CRUSH_CTRL, ~0, 0x00500000); /* Set VGA_TRACK_RANGE to 0x20 */ cx18_av_and_or4(cx, CXADEC_DFE_CTRL2, 0xFFFF00FF, 0x00002000); /* * Initial VBI setup * VIP-1.1, 10 bit mode, enable Raw, disable sliced, * don't clamp raw samples when codes are in use, 1 byte user D-words, * IDID0 has line #, RP code V bit transition on VBLANK, data during * blanking intervals */ cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4013252e); /* Set the video input. The setting in MODE_CTRL gets lost when we do the above setup */ /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */ /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */ /* * Analog Front End (AFE) * Default to luma on ch1/ADC1, chroma on ch2/ADC2, SIF on ch3/ADC2 * bypass_ch[1-3] use filter * droop_comp_ch[1-3] disable * clamp_en_ch[1-3] disable * aud_in_sel ADC2 * luma_in_sel ADC1 * chroma_in_sel ADC2 * clamp_sel_ch[2-3] midcode * clamp_sel_ch1 video decoder * vga_sel_ch3 audio decoder * vga_sel_ch[1-2] video decoder * half_bw_ch[1-3] disable * +12db_ch[1-3] disable */ cx18_av_and_or4(cx, CXADEC_AFE_CTRL, 0xFF000000, 0x00005D00); /* if(dwEnable && dw3DCombAvailable) { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */ /* } else { */ /* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */ /* } */ cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F); state->default_volume = 228 - cx18_av_read(cx, 0x8d4); state->default_volume = ((state->default_volume / 2) + 23) << 9; } static int cx18_av_reset(struct v4l2_subdev *sd, u32 val) { cx18_av_initialize(sd); return 0; } static int cx18_av_load_fw(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); if (!state->is_initialized) { /* initialize on first use */ state->is_initialized = 1; cx18_av_initialize(sd); } return 0; } void cx18_av_std_setup(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; v4l2_std_id std = state->std; /* * Video ADC crystal clock to pixel clock SRC decimation ratio * 28.636360 MHz/13.5 Mpps * 256 = 0x21f.07b */ const int src_decimation = 0x21f; int hblank, hactive, burst, vblank, vactive, sc; int vblank656; int luma_lpf, uv_lpf, comb; u32 pll_int, pll_frac, pll_post; /* datasheet startup, step 8d */ if (std & ~V4L2_STD_NTSC) cx18_av_write(cx, 0x49f, 0x11); else cx18_av_write(cx, 0x49f, 0x14); /* * Note: At the end of a field, there are 3 sets of half line duration * (double horizontal rate) pulses: * * 5 (625) or 6 (525) half-lines to blank for the vertical retrace * 5 (625) or 6 (525) vertical sync pulses of half line duration * 5 (625) or 6 (525) half-lines of equalization pulses */ if (std & V4L2_STD_625_50) { /* * The following relationships of half line counts should hold: * 625 = vblank656 + vactive * 10 = vblank656 - vblank = vsync pulses + equalization pulses * * vblank656: half lines after line 625/mid-313 of blanked video * vblank: half lines, after line 5/317, of blanked video * vactive: half lines of active video + * 5 half lines after the end of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 1 or mid-313) * vblank starts counting from the after the 5 vsync pulses and * 5 or 4 equalization pulses (start of line 6 or 318) * * For 625 line systems the driver will extract VBI information * from lines 6-23 and lines 318-335 (but the slicer can only * handle 17 lines, not the 18 in the vblank region). * In addition, we need vblank656 and vblank to be one whole * line longer, to cover line 24 and 336, so the SAV/EAV RP * codes get generated such that the encoder can actually * extract line 23 & 335 (WSS). We'll lose 1 line in each field * at the top of the screen. * * It appears the 5 half lines that happen after active * video must be included in vactive (579 instead of 574), * otherwise the colors get badly displayed in various regions * of the screen. I guess the chroma comb filter gets confused * without them (at least when a PVR-350 is the PAL source). */ vblank656 = 48; /* lines 1 - 24 & 313 - 336 */ vblank = 38; /* lines 6 - 24 & 318 - 336 */ vactive = 579; /* lines 24 - 313 & 337 - 626 */ /* * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after * the end of active video to start a horizontal line, so that * leaves 132 pixels of hblank to ignore. */ hblank = 132; hactive = 720; /* * Burst gate delay (for 625 line systems) * Hsync leading edge to color burst rise = 5.6 us * Color burst width = 2.25 us * Gate width = 4 pixel clocks * (5.6 us + 2.25/2 us) * 13.5 Mpps + 4/2 clocks = 92.79 clocks */ burst = 93; luma_lpf = 2; if (std & V4L2_STD_PAL) { uv_lpf = 1; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_Nc) { uv_lpf = 1; comb = 0x20; /* sc = 3582056.25 * src_decimation/28636360 * 2^13 */ sc = 556422; } else { /* SECAM */ uv_lpf = 0; comb = 0; /* (fr + fb)/2 = (4406260 + 4250000)/2 = 4328130 */ /* sc = 4328130 * src_decimation/28636360 * 2^13 */ sc = 672314; } } else { /* * The following relationships of half line counts should hold: * 525 = prevsync + vblank656 + vactive * 12 = vblank656 - vblank = vsync pulses + equalization pulses * * prevsync: 6 half-lines before the vsync pulses * vblank656: half lines, after line 3/mid-266, of blanked video * vblank: half lines, after line 9/272, of blanked video * vactive: half lines of active video * * As far as I can tell: * vblank656 starts counting from the falling edge of the first * vsync pulse (start of line 4 or mid-266) * vblank starts counting from the after the 6 vsync pulses and * 6 or 5 equalization pulses (start of line 10 or 272) * * For 525 line systems the driver will extract VBI information * from lines 10-21 and lines 273-284. */ vblank656 = 38; /* lines 4 - 22 & 266 - 284 */ vblank = 26; /* lines 10 - 22 & 272 - 284 */ vactive = 481; /* lines 23 - 263 & 285 - 525 */ /* * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is * is 858 pixels = 720 active + 138 blanking. The Hsync leading * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the * end of active video, leaving 122 pixels of hblank to ignore * before active video starts. */ hactive = 720; hblank = 122; luma_lpf = 1; uv_lpf = 1; /* * Burst gate delay (for 525 line systems) * Hsync leading edge to color burst rise = 5.3 us * Color burst width = 2.5 us * Gate width = 4 pixel clocks * (5.3 us + 2.5/2 us) * 13.5 Mpps + 4/2 clocks = 90.425 clocks */ if (std == V4L2_STD_PAL_60) { burst = 90; luma_lpf = 2; comb = 0x20; /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */ sc = 688700; } else if (std == V4L2_STD_PAL_M) { /* The 97 needs to be verified against PAL-M timings */ burst = 97; comb = 0x20; /* sc = 3575611.49 * src_decimation/28636360 * 2^13 */ sc = 555421; } else { burst = 90; comb = 0x66; /* sc = 3579545.45.. * src_decimation/28636360 * 2^13 */ sc = 556032; } } /* DEBUG: Displays configured PLL frequency */ pll_int = cx18_av_read(cx, 0x108); pll_frac = cx18_av_read4(cx, 0x10c) & 0x1ffffff; pll_post = cx18_av_read(cx, 0x109); CX18_DEBUG_INFO_DEV(sd, "PLL regs = int: %u, frac: %u, post: %u\n", pll_int, pll_frac, pll_post); if (pll_post) { int fsc, pll; u64 tmp; pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25; pll /= pll_post; CX18_DEBUG_INFO_DEV(sd, "Video PLL = %d.%06d MHz\n", pll / 1000000, pll % 1000000); CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n", pll / 8000000, (pll / 8) % 1000000); CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio " "= %d.%03d\n", src_decimation / 256, ((src_decimation % 256) * 1000) / 256); tmp = 28636360 * (u64) sc; do_div(tmp, src_decimation); fsc = tmp >> 13; CX18_DEBUG_INFO_DEV(sd, "Chroma sub-carrier initial freq = %d.%06d " "MHz\n", fsc / 1000000, fsc % 1000000); CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, " "vactive %i, vblank656 %i, src_dec %i, " "burst 0x%02x, luma_lpf %i, uv_lpf %i, " "comb 0x%02x, sc 0x%06x\n", hblank, hactive, vblank, vactive, vblank656, src_decimation, burst, luma_lpf, uv_lpf, comb, sc); } /* Sets horizontal blanking delay and active lines */ cx18_av_write(cx, 0x470, hblank); cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) | (hactive << 4))); cx18_av_write(cx, 0x472, hactive >> 4); /* Sets burst gate delay */ cx18_av_write(cx, 0x473, burst); /* Sets vertical blanking delay and active duration */ cx18_av_write(cx, 0x474, vblank); cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) | (vactive << 4))); cx18_av_write(cx, 0x476, vactive >> 4); cx18_av_write(cx, 0x477, vblank656); /* Sets src decimation rate */ cx18_av_write(cx, 0x478, 0xff & src_decimation); cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8)); /* Sets Luma and UV Low pass filters */ cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); /* Enables comb filters */ cx18_av_write(cx, 0x47b, comb); /* Sets SC Step*/ cx18_av_write(cx, 0x47c, sc); cx18_av_write(cx, 0x47d, 0xff & sc >> 8); cx18_av_write(cx, 0x47e, 0xff & sc >> 16); if (std & V4L2_STD_625_50) { state->slicer_line_delay = 1; state->slicer_line_offset = (6 + state->slicer_line_delay - 2); } else { state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); } cx18_av_write(cx, 0x47f, state->slicer_line_delay); } static void input_change(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; v4l2_std_id std = state->std; u8 v; /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */ cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11); cx18_av_and_or(cx, 0x401, ~0x60, 0); cx18_av_and_or(cx, 0x401, ~0x60, 0x60); if (std & V4L2_STD_525_60) { if (std == V4L2_STD_NTSC_M_JP) { /* Japan uses EIAJ audio standard */ cx18_av_write_expect(cx, 0x808, 0xf7, 0xf7, 0xff); cx18_av_write_expect(cx, 0x80b, 0x02, 0x02, 0x3f); } else if (std == V4L2_STD_NTSC_M_KR) { /* South Korea uses A2 audio standard */ cx18_av_write_expect(cx, 0x808, 0xf8, 0xf8, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else { /* Others use the BTSC audio standard */ cx18_av_write_expect(cx, 0x808, 0xf6, 0xf6, 0xff); cx18_av_write_expect(cx, 0x80b, 0x01, 0x01, 0x3f); } } else if (std & V4L2_STD_PAL) { /* Follow tuner change procedure for PAL */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } else if (std & V4L2_STD_SECAM) { /* Select autodetect for SECAM */ cx18_av_write_expect(cx, 0x808, 0xff, 0xff, 0xff); cx18_av_write_expect(cx, 0x80b, 0x03, 0x03, 0x3f); } v = cx18_av_read(cx, 0x803); if (v & 0x10) { /* restart audio decoder microcontroller */ v &= ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); v |= 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } } static int cx18_av_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct cx18 *cx = v4l2_get_subdevdata(sd); input_change(cx); return 0; } static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input, enum cx18_av_audio_input aud_input) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; enum analog_signal_type { NONE, CVBS, Y, C, SIF, Pb, Pr } ch[3] = {NONE, NONE, NONE}; u8 afe_mux_cfg; u8 adc2_cfg; u8 input_mode; u32 afe_cfg; int i; CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n", vid_input, aud_input); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1); ch[0] = CVBS; input_mode = 0x0; } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) { int luma = vid_input & 0xf000; int r_chroma = vid_input & 0xf0000; int b_chroma = vid_input & 0xf00000; if ((vid_input & ~0xfff000) || luma < CX18_AV_COMPONENT_LUMA1 || luma > CX18_AV_COMPONENT_LUMA8 || r_chroma < CX18_AV_COMPONENT_R_CHROMA4 || r_chroma > CX18_AV_COMPONENT_R_CHROMA6 || b_chroma < CX18_AV_COMPONENT_B_CHROMA7 || b_chroma > CX18_AV_COMPONENT_B_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12; ch[0] = Y; afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12; ch[1] = Pr; afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14; ch[2] = Pb; input_mode = 0x6; } else { int luma = vid_input & 0xf0; int chroma = vid_input & 0xf00; if ((vid_input & ~0xff0) || luma < CX18_AV_SVIDEO_LUMA1 || luma > CX18_AV_SVIDEO_LUMA8 || chroma < CX18_AV_SVIDEO_CHROMA4 || chroma > CX18_AV_SVIDEO_CHROMA8) { CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n", vid_input); return -EINVAL; } afe_mux_cfg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4); ch[0] = Y; if (chroma >= CX18_AV_SVIDEO_CHROMA7) { afe_mux_cfg &= 0x3f; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2; ch[2] = C; } else { afe_mux_cfg &= 0xcf; afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4; ch[1] = C; } input_mode = 0x2; } switch (aud_input) { case CX18_AV_AUDIO_SERIAL1: case CX18_AV_AUDIO_SERIAL2: /* do nothing, use serial audio input */ break; case CX18_AV_AUDIO4: afe_mux_cfg &= ~0x30; ch[1] = SIF; break; case CX18_AV_AUDIO5: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x10; ch[1] = SIF; break; case CX18_AV_AUDIO6: afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x20; ch[1] = SIF; break; case CX18_AV_AUDIO7: afe_mux_cfg &= ~0xc0; ch[2] = SIF; break; case CX18_AV_AUDIO8: afe_mux_cfg = (afe_mux_cfg & ~0xc0) | 0x40; ch[2] = SIF; break; default: CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n", aud_input); return -EINVAL; } /* Set up analog front end multiplexers */ cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7); /* Set INPUT_MODE to Composite, S-Video, or Component */ cx18_av_and_or(cx, 0x401, ~0x6, input_mode); /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ adc2_cfg = cx18_av_read(cx, 0x102); if (ch[2] == NONE) adc2_cfg &= ~0x2; /* No sig on CH3, set ADC2 to CH2 for input */ else adc2_cfg |= 0x2; /* Signal on CH3, set ADC2 to CH3 for input */ /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */ if (ch[1] != NONE && ch[2] != NONE) adc2_cfg |= 0x4; /* Set dual mode */ else adc2_cfg &= ~0x4; /* Clear dual mode */ cx18_av_write_expect(cx, 0x102, adc2_cfg, adc2_cfg, 0x17); /* Configure the analog front end */ afe_cfg = cx18_av_read4(cx, CXADEC_AFE_CTRL); afe_cfg &= 0xff000000; afe_cfg |= 0x00005000; /* CHROMA_IN, AUD_IN: ADC2; LUMA_IN: ADC1 */ if (ch[1] != NONE && ch[2] != NONE) afe_cfg |= 0x00000030; /* half_bw_ch[2-3] since in dual mode */ for (i = 0; i < 3; i++) { switch (ch[i]) { default: case NONE: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); break; case CVBS: case Y: if (i > 0) afe_cfg |= 0x00002000; /* LUMA_IN_SEL: ADC2 */ break; case C: case Pb: case Pr: /* CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000200 << i); if (i == 0 && ch[i] == C) afe_cfg &= ~0x00001000; /* CHROMA_IN_SEL ADC1 */ break; case SIF: /* * VGA_GAIN_SEL = Audio Decoder * CLAMP_SEL = Fixed to midcode clamp level */ afe_cfg |= (0x00000240 << i); if (i == 0) afe_cfg &= ~0x00004000; /* AUD_IN_SEL ADC1 */ break; } } cx18_av_write4(cx, CXADEC_AFE_CTRL, afe_cfg); state->vid_input = vid_input; state->aud_input = aud_input; cx18_av_audio_set_path(cx); input_change(cx); return 0; } static int cx18_av_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, input, state->aud_input); } static int cx18_av_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); return set_input(cx, state->vid_input, input); } static int cx18_av_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 vpres; u8 mode; int val = 0; if (state->radio) return 0; vpres = cx18_av_read(cx, 0x40e) & 0x20; vt->signal = vpres ? 0xffff : 0x0; vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; mode = cx18_av_read(cx, 0x804); /* get rxsubchans and audmode */ if ((mode & 0xf) == 1) val |= V4L2_TUNER_SUB_STEREO; else val |= V4L2_TUNER_SUB_MONO; if (mode == 2 || mode == 4) val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (mode & 0x10) val |= V4L2_TUNER_SUB_SAP; vt->rxsubchans = val; vt->audmode = state->audmode; return 0; } static int cx18_av_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 v; if (state->radio) return 0; v = cx18_av_read(cx, 0x809); v &= ~0xf; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: /* mono -> mono stereo -> mono bilingual -> lang1 */ break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: /* mono -> mono stereo -> stereo bilingual -> lang1 */ v |= 0x4; break; case V4L2_TUNER_MODE_LANG1_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang1/lang2 */ v |= 0x7; break; case V4L2_TUNER_MODE_LANG2: /* mono -> mono stereo -> stereo bilingual -> lang2 */ v |= 0x1; break; default: return -EINVAL; } cx18_av_write_expect(cx, 0x809, v, v, 0xff); state->audmode = vt->audmode; return 0; } static int cx18_av_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); u8 fmt = 0; /* zero is autodetect */ u8 pal_m = 0; if (state->radio == 0 && state->std == norm) return 0; state->radio = 0; state->std = norm; /* First tests should be against specific std */ if (state->std == V4L2_STD_NTSC_M_JP) { fmt = 0x2; } else if (state->std == V4L2_STD_NTSC_443) { fmt = 0x3; } else if (state->std == V4L2_STD_PAL_M) { pal_m = 1; fmt = 0x5; } else if (state->std == V4L2_STD_PAL_N) { fmt = 0x6; } else if (state->std == V4L2_STD_PAL_Nc) { fmt = 0x7; } else if (state->std == V4L2_STD_PAL_60) { fmt = 0x8; } else { /* Then, test against generic ones */ if (state->std & V4L2_STD_NTSC) fmt = 0x1; else if (state->std & V4L2_STD_PAL) fmt = 0x4; else if (state->std & V4L2_STD_SECAM) fmt = 0xc; } CX18_DEBUG_INFO_DEV(sd, "changing video std to fmt %i\n", fmt); /* Follow step 9 of section 3.16 in the cx18_av datasheet. Without this PAL may display a vertical ghosting effect. This happens for example with the Yuan MPC622. */ if (fmt >= 4 && fmt < 8) { /* Set format to NTSC-M */ cx18_av_and_or(cx, 0x400, ~0xf, 1); /* Turn off LCOMB */ cx18_av_and_or(cx, 0x47b, ~6, 0); } cx18_av_and_or(cx, 0x400, ~0x2f, fmt | 0x20); cx18_av_and_or(cx, 0x403, ~0x3, pal_m); cx18_av_std_setup(cx); input_change(cx); return 0; } static int cx18_av_s_radio(struct v4l2_subdev *sd) { struct cx18_av_state *state = to_cx18_av_state(sd); state->radio = 1; return 0; } static int cx18_av_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (ctrl->value < 0 || ctrl->value > 255) { CX18_ERR_DEV(sd, "invalid brightness setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x414, ctrl->value - 128); break; case V4L2_CID_CONTRAST: if (ctrl->value < 0 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid contrast setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x415, ctrl->value << 1); break; case V4L2_CID_SATURATION: if (ctrl->value < 0 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid saturation setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x420, ctrl->value << 1); cx18_av_write(cx, 0x421, ctrl->value << 1); break; case V4L2_CID_HUE: if (ctrl->value < -128 || ctrl->value > 127) { CX18_ERR_DEV(sd, "invalid hue setting %d\n", ctrl->value); return -ERANGE; } cx18_av_write(cx, 0x422, ctrl->value); break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_MUTE: return cx18_av_audio_s_ctrl(cx, ctrl); default: return -EINVAL; } return 0; } static int cx18_av_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrl->value = (s8)cx18_av_read(cx, 0x414) + 128; break; case V4L2_CID_CONTRAST: ctrl->value = cx18_av_read(cx, 0x415) >> 1; break; case V4L2_CID_SATURATION: ctrl->value = cx18_av_read(cx, 0x420) >> 1; break; case V4L2_CID_HUE: ctrl->value = (s8)cx18_av_read(cx, 0x422); break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_MUTE: return cx18_av_audio_g_ctrl(cx, ctrl); default: return -EINVAL; } return 0; } static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { struct cx18_av_state *state = to_cx18_av_state(sd); switch (qc->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128); case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64); case V4L2_CID_HUE: return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0); default: break; } switch (qc->id) { case V4L2_CID_AUDIO_VOLUME: return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, state->default_volume); case V4L2_CID_AUDIO_MUTE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0); case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); default: return -EINVAL; } return -EINVAL; } static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct cx18_av_state *state = to_cx18_av_state(sd); struct cx18 *cx = v4l2_get_subdevdata(sd); int HSC, VSC, Vsrc, Hsrc, filter, Vlines; int is_50Hz = !(state->std & V4L2_STD_525_60); if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; Vsrc = (cx18_av_read(cx, 0x476) & 0x3f) << 4; Vsrc |= (cx18_av_read(cx, 0x475) & 0xf0) >> 4; Hsrc = (cx18_av_read(cx, 0x472) & 0x3f) << 4; Hsrc |= (cx18_av_read(cx, 0x471) & 0xf0) >> 4; /* * This adjustment reflects the excess of vactive, set in * cx18_av_std_setup(), above standard values: * * 480 + 1 for 60 Hz systems * 576 + 3 for 50 Hz systems */ Vlines = fmt->height + (is_50Hz ? 3 : 1); /* * Invalid height and width scaling requests are: * 1. width less than 1/16 of the source width * 2. width greater than the source width * 3. height less than 1/8 of the source height * 4. height greater than the source height */ if ((fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) || (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) { CX18_ERR_DEV(sd, "%dx%d is not a valid size!\n", fmt->width, fmt->height); return -ERANGE; } HSC = (Hsrc * (1 << 20)) / fmt->width - (1 << 20); VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9)); VSC &= 0x1fff; if (fmt->width >= 385) filter = 0; else if (fmt->width > 192) filter = 1; else if (fmt->width > 96) filter = 2; else filter = 3; CX18_DEBUG_INFO_DEV(sd, "decoder set size %dx%d -> scale %ux%u\n", fmt->width, fmt->height, HSC, VSC); /* HSCALE=HSC */ cx18_av_write(cx, 0x418, HSC & 0xff); cx18_av_write(cx, 0x419, (HSC >> 8) & 0xff); cx18_av_write(cx, 0x41a, HSC >> 16); /* VSCALE=VSC */ cx18_av_write(cx, 0x41c, VSC & 0xff); cx18_av_write(cx, 0x41d, VSC >> 8); /* VS_INTRLACE=1 VFILT=filter */ cx18_av_write(cx, 0x41e, 0x8 | filter); return 0; } static int cx18_av_s_stream(struct v4l2_subdev *sd, int enable) { struct cx18 *cx = v4l2_get_subdevdata(sd); CX18_DEBUG_INFO_DEV(sd, "%s output\n", enable ? "enable" : "disable"); if (enable) { cx18_av_write(cx, 0x115, 0x8c); cx18_av_write(cx, 0x116, 0x07); } else { cx18_av_write(cx, 0x115, 0x00); cx18_av_write(cx, 0x116, 0x00); } return 0; } static void log_video_status(struct cx18 *cx) { static const char *const fmt_strs[] = { "0x0", "NTSC-M", "NTSC-J", "NTSC-4.43", "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60", "0x9", "0xA", "0xB", "SECAM", "0xD", "0xE", "0xF" }; struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 vidfmt_sel = cx18_av_read(cx, 0x400) & 0xf; u8 gen_stat1 = cx18_av_read(cx, 0x40d); u8 gen_stat2 = cx18_av_read(cx, 0x40e); int vid_input = state->vid_input; CX18_INFO_DEV(sd, "Video signal: %spresent\n", (gen_stat2 & 0x20) ? "" : "not "); CX18_INFO_DEV(sd, "Detected format: %s\n", fmt_strs[gen_stat1 & 0xf]); CX18_INFO_DEV(sd, "Specified standard: %s\n", vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection"); if (vid_input >= CX18_AV_COMPOSITE1 && vid_input <= CX18_AV_COMPOSITE8) { CX18_INFO_DEV(sd, "Specified video input: Composite %d\n", vid_input - CX18_AV_COMPOSITE1 + 1); } else { CX18_INFO_DEV(sd, "Specified video input: " "S-Video (Luma In%d, Chroma In%d)\n", (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8); } CX18_INFO_DEV(sd, "Specified audioclock freq: %d Hz\n", state->audclk_freq); } static void log_audio_status(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd = &state->sd; u8 download_ctl = cx18_av_read(cx, 0x803); u8 mod_det_stat0 = cx18_av_read(cx, 0x804); u8 mod_det_stat1 = cx18_av_read(cx, 0x805); u8 audio_config = cx18_av_read(cx, 0x808); u8 pref_mode = cx18_av_read(cx, 0x809); u8 afc0 = cx18_av_read(cx, 0x80b); u8 mute_ctl = cx18_av_read(cx, 0x8d3); int aud_input = state->aud_input; char *p; switch (mod_det_stat0) { case 0x00: p = "mono"; break; case 0x01: p = "stereo"; break; case 0x02: p = "dual"; break; case 0x04: p = "tri"; break; case 0x10: p = "mono with SAP"; break; case 0x11: p = "stereo with SAP"; break; case 0x12: p = "dual with SAP"; break; case 0x14: p = "tri with SAP"; break; case 0xfe: p = "forced mode"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio mode: %s\n", p); switch (mod_det_stat1) { case 0x00: p = "not defined"; break; case 0x01: p = "EIAJ"; break; case 0x02: p = "A2-M"; break; case 0x03: p = "A2-BG"; break; case 0x04: p = "A2-DK1"; break; case 0x05: p = "A2-DK2"; break; case 0x06: p = "A2-DK3"; break; case 0x07: p = "A1 (6.0 MHz FM Mono)"; break; case 0x08: p = "AM-L"; break; case 0x09: p = "NICAM-BG"; break; case 0x0a: p = "NICAM-DK"; break; case 0x0b: p = "NICAM-I"; break; case 0x0c: p = "NICAM-L"; break; case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break; case 0x0e: p = "IF FM Radio"; break; case 0x0f: p = "BTSC"; break; case 0x10: p = "detected chrominance"; break; case 0xfd: p = "unknown audio standard"; break; case 0xfe: p = "forced audio standard"; break; case 0xff: p = "no detected audio standard"; break; default: p = "not defined"; break; } CX18_INFO_DEV(sd, "Detected audio standard: %s\n", p); CX18_INFO_DEV(sd, "Audio muted: %s\n", (mute_ctl & 0x2) ? "yes" : "no"); CX18_INFO_DEV(sd, "Audio microcontroller: %s\n", (download_ctl & 0x10) ? "running" : "stopped"); switch (audio_config >> 4) { case 0x00: p = "undefined"; break; case 0x01: p = "BTSC"; break; case 0x02: p = "EIAJ"; break; case 0x03: p = "A2-M"; break; case 0x04: p = "A2-BG"; break; case 0x05: p = "A2-DK1"; break; case 0x06: p = "A2-DK2"; break; case 0x07: p = "A2-DK3"; break; case 0x08: p = "A1 (6.0 MHz FM Mono)"; break; case 0x09: p = "AM-L"; break; case 0x0a: p = "NICAM-BG"; break; case 0x0b: p = "NICAM-DK"; break; case 0x0c: p = "NICAM-I"; break; case 0x0d: p = "NICAM-L"; break; case 0x0e: p = "FM radio"; break; case 0x0f: p = "automatic detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio standard: %s\n", p); if ((audio_config >> 4) < 0xF) { switch (audio_config & 0xF) { case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break; case 0x01: p = "MONO2 (LANGUAGE B)"; break; case 0x02: p = "MONO3 (STEREO forced MONO)"; break; case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break; case 0x04: p = "STEREO"; break; case 0x05: p = "DUAL1 (AC)"; break; case 0x06: p = "DUAL2 (BC)"; break; case 0x07: p = "DUAL3 (AB)"; break; default: p = "undefined"; } CX18_INFO_DEV(sd, "Configured audio mode: %s\n", p); } else { switch (audio_config & 0xF) { case 0x00: p = "BG"; break; case 0x01: p = "DK1"; break; case 0x02: p = "DK2"; break; case 0x03: p = "DK3"; break; case 0x04: p = "I"; break; case 0x05: p = "L"; break; case 0x06: p = "BTSC"; break; case 0x07: p = "EIAJ"; break; case 0x08: p = "A2-M"; break; case 0x09: p = "FM Radio (4.5 MHz)"; break; case 0x0a: p = "FM Radio (5.5 MHz)"; break; case 0x0b: p = "S-Video"; break; case 0x0f: p = "automatic standard and mode detection"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Configured audio system: %s\n", p); } if (aud_input) CX18_INFO_DEV(sd, "Specified audio input: Tuner (In%d)\n", aud_input); else CX18_INFO_DEV(sd, "Specified audio input: External\n"); switch (pref_mode & 0xf) { case 0: p = "mono/language A"; break; case 1: p = "language B"; break; case 2: p = "language C"; break; case 3: p = "analog fallback"; break; case 4: p = "stereo"; break; case 5: p = "language AC"; break; case 6: p = "language BC"; break; case 7: p = "language AB"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Preferred audio mode: %s\n", p); if ((audio_config & 0xf) == 0xf) { switch ((afc0 >> 3) & 0x1) { case 0: p = "system DK"; break; case 1: p = "system L"; break; } CX18_INFO_DEV(sd, "Selected 65 MHz format: %s\n", p); switch (afc0 & 0x7) { case 0: p = "Chroma"; break; case 1: p = "BTSC"; break; case 2: p = "EIAJ"; break; case 3: p = "A2-M"; break; case 4: p = "autodetect"; break; default: p = "undefined"; break; } CX18_INFO_DEV(sd, "Selected 45 MHz format: %s\n", p); } } static int cx18_av_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); log_video_status(cx); log_audio_status(cx); return 0; } static inline int cx18_av_dbg_match(const struct v4l2_dbg_match *match) { return match->type == V4L2_CHIP_MATCH_HOST && match->addr == 1; } static int cx18_av_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct cx18_av_state *state = to_cx18_av_state(sd); if (cx18_av_dbg_match(&chip->match)) { chip->ident = state->id; chip->revision = state->rev; } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx18_av_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->size = 4; reg->val = cx18_av_read4(cx, reg->reg & 0x00000ffc); return 0; } static int cx18_av_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct cx18 *cx = v4l2_get_subdevdata(sd); if (!cx18_av_dbg_match(&reg->match)) return -EINVAL; if ((reg->reg & 0x3) != 0) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; cx18_av_write4(cx, reg->reg & 0x00000ffc, reg->val); return 0; } #endif static const struct v4l2_subdev_core_ops cx18_av_general_ops = { .g_chip_ident = cx18_av_g_chip_ident, .log_status = cx18_av_log_status, .load_fw = cx18_av_load_fw, .reset = cx18_av_reset, .queryctrl = cx18_av_queryctrl, .g_ctrl = cx18_av_g_ctrl, .s_ctrl = cx18_av_s_ctrl, .s_std = cx18_av_s_std, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = cx18_av_g_register, .s_register = cx18_av_s_register, #endif }; static const struct v4l2_subdev_tuner_ops cx18_av_tuner_ops = { .s_radio = cx18_av_s_radio, .s_frequency = cx18_av_s_frequency, .g_tuner = cx18_av_g_tuner, .s_tuner = cx18_av_s_tuner, }; static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = { .s_clock_freq = cx18_av_s_clock_freq, .s_routing = cx18_av_s_audio_routing, }; static const struct v4l2_subdev_video_ops cx18_av_video_ops = { .s_routing = cx18_av_s_video_routing, .s_stream = cx18_av_s_stream, .s_mbus_fmt = cx18_av_s_mbus_fmt, }; static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = { .decode_vbi_line = cx18_av_decode_vbi_line, .g_sliced_fmt = cx18_av_g_sliced_fmt, .s_sliced_fmt = cx18_av_s_sliced_fmt, .s_raw_fmt = cx18_av_s_raw_fmt, }; static const struct v4l2_subdev_ops cx18_av_ops = { .core = &cx18_av_general_ops, .tuner = &cx18_av_tuner_ops, .audio = &cx18_av_audio_ops, .video = &cx18_av_video_ops, .vbi = &cx18_av_vbi_ops, }; int cx18_av_probe(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; struct v4l2_subdev *sd; int err; state->rev = cx18_av_read4(cx, CXADEC_CHIP_CTRL) & 0xffff; state->id = ((state->rev >> 4) == CXADEC_CHIP_TYPE_MAKO) ? V4L2_IDENT_CX23418_843 : V4L2_IDENT_UNKNOWN; state->vid_input = CX18_AV_COMPOSITE7; state->aud_input = CX18_AV_AUDIO8; state->audclk_freq = 48000; state->audmode = V4L2_TUNER_MODE_LANG1; state->slicer_line_delay = 0; state->slicer_line_offset = (10 + state->slicer_line_delay - 2); sd = &state->sd; v4l2_subdev_init(sd, &cx18_av_ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %03x", cx->v4l2_dev.name, (state->rev >> 4)); sd->grp_id = CX18_HW_418_AV; err = v4l2_device_register_subdev(&cx->v4l2_dev, sd); if (!err) cx18_av_init(cx); return err; }
gpl-2.0
XCage15/linux-1
arch/arm/kernel/fiq.c
1358
3657
/* * linux/arch/arm/kernel/fiq.c * * Copyright (C) 1998 Russell King * Copyright (C) 1998, 1999 Phil Blundell * * FIQ support written by Philip Blundell <philb@gnu.org>, 1998. * * FIQ support re-written by Russell King to be more generic * * We now properly support a method by which the FIQ handlers can * be stacked onto the vector. We still do not support sharing * the FIQ vector itself. * * Operation is as follows: * 1. Owner A claims FIQ: * - default_fiq relinquishes control. * 2. Owner A: * - inserts code. * - sets any registers, * - enables FIQ. * 3. Owner B claims FIQ: * - if owner A has a relinquish function. * - disable FIQs. * - saves any registers. * - returns zero. * 4. Owner B: * - inserts code. * - sets any registers, * - enables FIQ. * 5. Owner B releases FIQ: * - Owner A is asked to reacquire FIQ: * - inserts code. * - restores saved registers. * - enables FIQ. * 6. Goto 3 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/fiq.h> #include <asm/irq.h> #include <asm/traps.h> #define FIQ_OFFSET ({ \ extern void *vector_fiq_offset; \ (unsigned)&vector_fiq_offset; \ }) static unsigned long dfl_fiq_insn; static struct pt_regs dfl_fiq_regs; /* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) { /* Restore default handler and registers */ local_fiq_disable(); set_fiq_regs(&dfl_fiq_regs); set_fiq_handler(&dfl_fiq_insn, sizeof(dfl_fiq_insn)); local_fiq_enable(); /* FIXME: notify irq controller to standard enable FIQs */ } return 0; } static struct fiq_handler default_owner = { .name = "default", .fiq_op = fiq_def_op, }; static struct fiq_handler *current_fiq = &default_owner; int show_fiq_list(struct seq_file *p, int prec) { if (current_fiq != &default_owner) seq_printf(p, "%*s: %s\n", prec, "FIQ", current_fiq->name); return 0; } void set_fiq_handler(void *start, unsigned int length) { void *base = vectors_page; unsigned offset = FIQ_OFFSET; memcpy(base + offset, start, length); if (!cache_is_vipt_nonaliasing()) flush_icache_range((unsigned long)base + offset, offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); } int claim_fiq(struct fiq_handler *f) { int ret = 0; if (current_fiq) { ret = -EBUSY; if (current_fiq->fiq_op != NULL) ret = current_fiq->fiq_op(current_fiq->dev_id, 1); } if (!ret) { f->next = current_fiq; current_fiq = f; } return ret; } void release_fiq(struct fiq_handler *f) { if (current_fiq != f) { pr_err("%s FIQ trying to release %s FIQ\n", f->name, current_fiq->name); dump_stack(); return; } do current_fiq = current_fiq->next; while (current_fiq->fiq_op(current_fiq->dev_id, 0)); } static int fiq_start; void enable_fiq(int fiq) { enable_irq(fiq + fiq_start); } void disable_fiq(int fiq) { disable_irq(fiq + fiq_start); } EXPORT_SYMBOL(set_fiq_handler); EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */ EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */ EXPORT_SYMBOL(claim_fiq); EXPORT_SYMBOL(release_fiq); EXPORT_SYMBOL(enable_fiq); EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(int start) { unsigned offset = FIQ_OFFSET; dfl_fiq_insn = *(unsigned long *)(0xffff0000 + offset); get_fiq_regs(&dfl_fiq_regs); fiq_start = start; }
gpl-2.0
Davletvm/linux
arch/x86/platform/intel-mid/device_libs/platform_ipc.c
2126
1753
/* * platform_ipc.c: IPC platform library file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/sfi.h> #include <linux/gpio.h> #include <asm/intel-mid.h> #include "platform_ipc.h" void __init ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev) { struct platform_device *pdev; void *pdata = NULL; static struct resource res __initdata = { .name = "IRQ", .flags = IORESOURCE_IRQ, }; pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", pentry->name, pentry->irq); /* * We need to call platform init of IPC devices to fill misc_pdata * structure. It will be used in msic_init for initialization. */ if (dev != NULL) pdata = dev->get_platform_data(pentry); /* * On Medfield the platform device creation is handled by the MSIC * MFD driver so we don't need to do it here. */ if (intel_mid_has_msic()) return; pdev = platform_device_alloc(pentry->name, 0); if (pdev == NULL) { pr_err("out of memory for SFI platform device '%s'.\n", pentry->name); return; } res.start = pentry->irq; platform_device_add_resources(pdev, &res, 1); pdev->dev.platform_data = pdata; intel_scu_device_register(pdev); } static const struct devs_id pmic_audio_dev_id __initconst = { .name = "pmic_audio", .type = SFI_DEV_TYPE_IPC, .delay = 1, .device_handler = &ipc_device_handler, }; sfi_device(pmic_audio_dev_id);
gpl-2.0