repo_name
string
path
string
copies
string
size
string
content
string
license
string
coolbho3k/Xoom-OC
drivers/w1/slaves/w1_ds2431.c
9139
7299
/* * w1_ds2431.c - w1 family 2d (DS2431) driver * * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net> * * Heavily inspired by w1_DS2433 driver from Ben Gardner <bgardner@wabtec.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" #define W1_F2D_EEPROM_SIZE 128 #define W1_F2D_PAGE_COUNT 4 #define W1_F2D_PAGE_BITS 5 #define W1_F2D_PAGE_SIZE (1<<W1_F2D_PAGE_BITS) #define W1_F2D_PAGE_MASK 0x1F #define W1_F2D_SCRATCH_BITS 3 #define W1_F2D_SCRATCH_SIZE (1<<W1_F2D_SCRATCH_BITS) #define W1_F2D_SCRATCH_MASK (W1_F2D_SCRATCH_SIZE-1) #define W1_F2D_READ_EEPROM 0xF0 #define W1_F2D_WRITE_SCRATCH 0x0F #define W1_F2D_READ_SCRATCH 0xAA #define W1_F2D_COPY_SCRATCH 0x55 #define W1_F2D_TPROG_MS 11 #define W1_F2D_READ_RETRIES 10 #define W1_F2D_READ_MAXLEN 8 /* * Check the file size bounds and adjusts count as needed. * This would not be needed if the file size didn't reset to 0 after a write. */ static inline size_t w1_f2d_fix_count(loff_t off, size_t count, size_t size) { if (off > size) return 0; if ((off + count) > size) return size - off; return count; } /* * Read a block from W1 ROM two times and compares the results. * If they are equal they are returned, otherwise the read * is repeated W1_F2D_READ_RETRIES times. * * count must not exceed W1_F2D_READ_MAXLEN. */ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf) { u8 wrbuf[3]; u8 cmp[W1_F2D_READ_MAXLEN]; int tries = W1_F2D_READ_RETRIES; do { wrbuf[0] = W1_F2D_READ_EEPROM; wrbuf[1] = off & 0xff; wrbuf[2] = off >> 8; if (w1_reset_select_slave(sl)) return -1; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, buf, count); if (w1_reset_select_slave(sl)) return -1; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, cmp, count); if (!memcmp(cmp, buf, count)) return 0; } while (--tries); dev_err(&sl->dev, "proof reading failed %d times\n", W1_F2D_READ_RETRIES); return -1; } static ssize_t w1_f2d_read_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int todo = count; count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE); if (count == 0) return 0; mutex_lock(&sl->master->mutex); /* read directly from the EEPROM in chunks of W1_F2D_READ_MAXLEN */ while (todo > 0) { int block_read; if (todo >= W1_F2D_READ_MAXLEN) block_read = W1_F2D_READ_MAXLEN; else block_read = todo; if (w1_f2d_readblock(sl, off, block_read, buf) < 0) count = -EIO; todo -= W1_F2D_READ_MAXLEN; buf += W1_F2D_READ_MAXLEN; off += W1_F2D_READ_MAXLEN; } mutex_unlock(&sl->master->mutex); return count; } /* * Writes to the scratchpad and reads it back for verification. * Then copies the scratchpad to EEPROM. * The data must be aligned at W1_F2D_SCRATCH_SIZE bytes and * must be W1_F2D_SCRATCH_SIZE bytes long. * The master must be locked. * * @param sl The slave structure * @param addr Address for the write * @param len length must be <= (W1_F2D_PAGE_SIZE - (addr & W1_F2D_PAGE_MASK)) * @param data The data to write * @return 0=Success -1=failure */ static int w1_f2d_write(struct w1_slave *sl, int addr, int len, const u8 *data) { int tries = W1_F2D_READ_RETRIES; u8 wrbuf[4]; u8 rdbuf[W1_F2D_SCRATCH_SIZE + 3]; u8 es = (addr + len - 1) % W1_F2D_SCRATCH_SIZE; retry: /* Write the data to the scratchpad */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F2D_WRITE_SCRATCH; wrbuf[1] = addr & 0xff; wrbuf[2] = addr >> 8; w1_write_block(sl->master, wrbuf, 3); w1_write_block(sl->master, data, len); /* Read the scratchpad and verify */ if (w1_reset_select_slave(sl)) return -1; w1_write_8(sl->master, W1_F2D_READ_SCRATCH); w1_read_block(sl->master, rdbuf, len + 3); /* Compare what was read against the data written */ if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) || (rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) { if (--tries) goto retry; dev_err(&sl->dev, "could not write to eeprom, scratchpad compare failed %d times\n", W1_F2D_READ_RETRIES); return -1; } /* Copy the scratchpad to EEPROM */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F2D_COPY_SCRATCH; wrbuf[3] = es; w1_write_block(sl->master, wrbuf, 4); /* Sleep for tprog ms to wait for the write to complete */ msleep(W1_F2D_TPROG_MS); /* Reset the bus to wake up the EEPROM */ w1_reset_bus(sl->master); return 0; } static ssize_t w1_f2d_write_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int addr, len; int copy; count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE); if (count == 0) return 0; mutex_lock(&sl->master->mutex); /* Can only write data in blocks of the size of the scratchpad */ addr = off; len = count; while (len > 0) { /* if len too short or addr not aligned */ if (len < W1_F2D_SCRATCH_SIZE || addr & W1_F2D_SCRATCH_MASK) { char tmp[W1_F2D_SCRATCH_SIZE]; /* read the block and update the parts to be written */ if (w1_f2d_readblock(sl, addr & ~W1_F2D_SCRATCH_MASK, W1_F2D_SCRATCH_SIZE, tmp)) { count = -EIO; goto out_up; } /* copy at most to the boundary of the PAGE or len */ copy = W1_F2D_SCRATCH_SIZE - (addr & W1_F2D_SCRATCH_MASK); if (copy > len) copy = len; memcpy(&tmp[addr & W1_F2D_SCRATCH_MASK], buf, copy); if (w1_f2d_write(sl, addr & ~W1_F2D_SCRATCH_MASK, W1_F2D_SCRATCH_SIZE, tmp) < 0) { count = -EIO; goto out_up; } } else { copy = W1_F2D_SCRATCH_SIZE; if (w1_f2d_write(sl, addr, copy, buf) < 0) { count = -EIO; goto out_up; } } buf += copy; addr += copy; len -= copy; } out_up: mutex_unlock(&sl->master->mutex); return count; } static struct bin_attribute w1_f2d_bin_attr = { .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, }, .size = W1_F2D_EEPROM_SIZE, .read = w1_f2d_read_bin, .write = w1_f2d_write_bin, }; static int w1_f2d_add_slave(struct w1_slave *sl) { return sysfs_create_bin_file(&sl->dev.kobj, &w1_f2d_bin_attr); } static void w1_f2d_remove_slave(struct w1_slave *sl) { sysfs_remove_bin_file(&sl->dev.kobj, &w1_f2d_bin_attr); } static struct w1_family_ops w1_f2d_fops = { .add_slave = w1_f2d_add_slave, .remove_slave = w1_f2d_remove_slave, }; static struct w1_family w1_family_2d = { .fid = W1_EEPROM_DS2431, .fops = &w1_f2d_fops, }; static int __init w1_f2d_init(void) { return w1_register_family(&w1_family_2d); } static void __exit w1_f2d_fini(void) { w1_unregister_family(&w1_family_2d); } module_init(w1_f2d_init); module_exit(w1_f2d_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>"); MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM");
gpl-2.0
lab305itep/linux
drivers/uwb/uwbd.c
9651
10482
/* * Ultra Wide Band * Neighborhood Management Daemon * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * This daemon takes care of maintaing information that describes the * UWB neighborhood that the radios in this machine can see. It also * keeps a tab of which devices are visible, makes sure each HC sits * on a different channel to avoid interfering, etc. * * Different drivers (radio controller, device, any API in general) * communicate with this daemon through an event queue. Daemon wakes * up, takes a list of events and handles them one by one; handling * function is extracted from a table based on the event's type and * subtype. Events are freed only if the handling function says so. * * . Lock protecting the event list has to be an spinlock and locked * with IRQSAVE because it might be called from an interrupt * context (ie: when events arrive and the notification drops * down from the ISR). * * . UWB radio controller drivers queue events to the daemon using * uwbd_event_queue(). They just get the event, chew it to make it * look like UWBD likes it and pass it in a buffer allocated with * uwb_event_alloc(). * * EVENTS * * Events have a type, a subtype, a length, some other stuff and the * data blob, which depends on the event. The header is 'struct * uwb_event'; for payloads, see 'struct uwbd_evt_*'. * * EVENT HANDLER TABLES * * To find a handling function for an event, the type is used to index * a subtype-table in the type-table. The subtype-table is indexed * with the subtype to get the function that handles the event. Start * with the main type-table 'uwbd_evt_type_handler'. * * DEVICES * * Devices are created when a bunch of beacons have been received and * it is stablished that the device has stable radio presence. CREATED * only, not configured. Devices are ONLY configured when an * Application-Specific IE Probe is receieved, in which the device * declares which Protocol ID it groks. Then the device is CONFIGURED * (and the driver->probe() stuff of the device model is invoked). * * Devices are considered disconnected when a certain number of * beacons are not received in an amount of time. * * Handler functions are called normally uwbd_evt_handle_*(). */ #include <linux/kthread.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/freezer.h> #include "uwb-internal.h" /* * UWBD Event handler function signature * * Return !0 if the event needs not to be freed (ie the handler * takes/took care of it). 0 means the daemon code will free the * event. * * @evt->rc is already referenced and guaranteed to exist. See * uwb_evt_handle(). */ typedef int (*uwbd_evt_handler_f)(struct uwb_event *); /** * Properties of a UWBD event * * @handler: the function that will handle this event * @name: text name of event */ struct uwbd_event { uwbd_evt_handler_f handler; const char *name; }; /* Table of handlers for and properties of the UWBD Radio Control Events */ static struct uwbd_event uwbd_urc_events[] = { [UWB_RC_EVT_IE_RCV] = { .handler = uwbd_evt_handle_rc_ie_rcv, .name = "IE_RECEIVED" }, [UWB_RC_EVT_BEACON] = { .handler = uwbd_evt_handle_rc_beacon, .name = "BEACON_RECEIVED" }, [UWB_RC_EVT_BEACON_SIZE] = { .handler = uwbd_evt_handle_rc_beacon_size, .name = "BEACON_SIZE_CHANGE" }, [UWB_RC_EVT_BPOIE_CHANGE] = { .handler = uwbd_evt_handle_rc_bpoie_change, .name = "BPOIE_CHANGE" }, [UWB_RC_EVT_BP_SLOT_CHANGE] = { .handler = uwbd_evt_handle_rc_bp_slot_change, .name = "BP_SLOT_CHANGE" }, [UWB_RC_EVT_DRP_AVAIL] = { .handler = uwbd_evt_handle_rc_drp_avail, .name = "DRP_AVAILABILITY_CHANGE" }, [UWB_RC_EVT_DRP] = { .handler = uwbd_evt_handle_rc_drp, .name = "DRP" }, [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { .handler = uwbd_evt_handle_rc_dev_addr_conflict, .name = "DEV_ADDR_CONFLICT", }, }; struct uwbd_evt_type_handler { const char *name; struct uwbd_event *uwbd_events; size_t size; }; /* Table of handlers for each UWBD Event type. */ static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { [UWB_RC_CET_GENERAL] = { .name = "URC", .uwbd_events = uwbd_urc_events, .size = ARRAY_SIZE(uwbd_urc_events), }, }; static const struct uwbd_event uwbd_message_handlers[] = { [UWB_EVT_MSG_RESET] = { .handler = uwbd_msg_handle_reset, .name = "reset", }, }; /* * Handle an URC event passed to the UWB Daemon * * @evt: the event to handle * @returns: 0 if the event can be kfreed, !0 on the contrary * (somebody else took ownership) [coincidentally, returning * a <0 errno code will free it :)]. * * Looks up the two indirection tables (one for the type, one for the * subtype) to decide which function handles it and then calls the * handler. * * The event structure passed to the event handler has the radio * controller in @evt->rc referenced. The reference will be dropped * once the handler returns, so if it needs it for longer (async), * it'll need to take another one. */ static int uwbd_event_handle_urc(struct uwb_event *evt) { int result = -EINVAL; struct uwbd_evt_type_handler *type_table; uwbd_evt_handler_f handler; u8 type, context; u16 event; type = evt->notif.rceb->bEventType; event = le16_to_cpu(evt->notif.rceb->wEvent); context = evt->notif.rceb->bEventContext; if (type >= ARRAY_SIZE(uwbd_urc_evt_type_handlers)) goto out; type_table = &uwbd_urc_evt_type_handlers[type]; if (type_table->uwbd_events == NULL) goto out; if (event >= type_table->size) goto out; handler = type_table->uwbd_events[event].handler; if (handler == NULL) goto out; result = (*handler)(evt); out: if (result < 0) dev_err(&evt->rc->uwb_dev.dev, "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", type, event, context, result); return result; } static void uwbd_event_handle_message(struct uwb_event *evt) { struct uwb_rc *rc; int result; rc = evt->rc; if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) { dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message); return; } result = uwbd_message_handlers[evt->message].handler(evt); if (result < 0) dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", uwbd_message_handlers[evt->message].name, result); } static void uwbd_event_handle(struct uwb_event *evt) { struct uwb_rc *rc; int should_keep; rc = evt->rc; if (rc->ready) { switch (evt->type) { case UWB_EVT_TYPE_NOTIF: should_keep = uwbd_event_handle_urc(evt); if (should_keep <= 0) kfree(evt->notif.rceb); break; case UWB_EVT_TYPE_MSG: uwbd_event_handle_message(evt); break; default: dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); break; } } __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ } /** * UWB Daemon * * Listens to all UWB notifications and takes care to track the state * of the UWB neighbourhood for the kernel. When we do a run, we * spinlock, move the list to a private copy and release the * lock. Hold it as little as possible. Not a conflict: it is * guaranteed we own the events in the private list. * * FIXME: should change so we don't have a 1HZ timer all the time, but * only if there are devices. */ static int uwbd(void *param) { struct uwb_rc *rc = param; unsigned long flags; struct uwb_event *evt; int should_stop = 0; while (1) { wait_event_interruptible_timeout( rc->uwbd.wq, !list_empty(&rc->uwbd.event_list) || (should_stop = kthread_should_stop()), HZ); if (should_stop) break; try_to_freeze(); spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); if (!list_empty(&rc->uwbd.event_list)) { evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); list_del(&evt->list_node); } else evt = NULL; spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); if (evt) { uwbd_event_handle(evt); kfree(evt); } uwb_beca_purge(rc); /* Purge devices that left */ } return 0; } /** Start the UWB daemon */ void uwbd_start(struct uwb_rc *rc) { rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); if (rc->uwbd.task == NULL) printk(KERN_ERR "UWB: Cannot start management daemon; " "UWB won't work\n"); else rc->uwbd.pid = rc->uwbd.task->pid; } /* Stop the UWB daemon and free any unprocessed events */ void uwbd_stop(struct uwb_rc *rc) { kthread_stop(rc->uwbd.task); uwbd_flush(rc); } /* * Queue an event for the management daemon * * When some lower layer receives an event, it uses this function to * push it forward to the UWB daemon. * * Once you pass the event, you don't own it any more, but the daemon * does. It will uwb_event_free() it when done, so make sure you * uwb_event_alloc()ed it or bad things will happen. * * If the daemon is not running, we just free the event. */ void uwbd_event_queue(struct uwb_event *evt) { struct uwb_rc *rc = evt->rc; unsigned long flags; spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); if (rc->uwbd.pid != 0) { list_add(&evt->list_node, &rc->uwbd.event_list); wake_up_all(&rc->uwbd.wq); } else { __uwb_rc_put(evt->rc); if (evt->type == UWB_EVT_TYPE_NOTIF) kfree(evt->notif.rceb); kfree(evt); } spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); return; } void uwbd_flush(struct uwb_rc *rc) { struct uwb_event *evt, *nxt; spin_lock_irq(&rc->uwbd.event_list_lock); list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { if (evt->rc == rc) { __uwb_rc_put(rc); list_del(&evt->list_node); if (evt->type == UWB_EVT_TYPE_NOTIF) kfree(evt->notif.rceb); kfree(evt); } } spin_unlock_irq(&rc->uwbd.event_list_lock); }
gpl-2.0
SlimRoms/kernel_sony_msm8974pro
crypto/md5.c
9907
4030
/* * Cryptographic API. * * MD5 Message Digest Algorithm (RFC1321). * * Derived from cryptoapi implementation, originally based on the * public domain implementation written by Colin Plumb in 1993. * * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/md5.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/cryptohash.h> #include <asm/byteorder.h> /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { while (words--) { __le32_to_cpus(buf); buf++; } } static inline void cpu_to_le32_array(u32 *buf, unsigned int words) { while (words--) { __cpu_to_le32s(buf); buf++; } } static inline void md5_transform_helper(struct md5_state *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md5_transform(ctx->hash, ctx->block); } static int md5_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; return 0; } static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md5_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md5_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; } static int md5_final(struct shash_desc *desc, u8 *out) { struct md5_state *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md5_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md5_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } static int md5_export(struct shash_desc *desc, void *out) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(out, ctx, sizeof(*ctx)); return 0; } static int md5_import(struct shash_desc *desc, const void *in) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(ctx, in, sizeof(*ctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, .final = md5_final, .export = md5_export, .import = md5_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init md5_mod_init(void) { return crypto_register_shash(&alg); } static void __exit md5_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
gpl-2.0
TeamExodus/kernel_samsung_manta
drivers/message/i2o/config-osm.c
13491
2170
/* * Configuration OSM * * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Fixes/additions: * Markus Lidel <Markus.Lidel@shadowconnect.com> * initial version. */ #include <linux/module.h> #include <linux/i2o.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/fs.h> #include <asm/uaccess.h> #define OSM_NAME "config-osm" #define OSM_VERSION "1.323" #define OSM_DESCRIPTION "I2O Configuration OSM" /* access mode user rw */ #define S_IWRSR (S_IRUSR | S_IWUSR) static struct i2o_driver i2o_config_driver; /* Config OSM driver struct */ static struct i2o_driver i2o_config_driver = { .name = OSM_NAME, }; #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL #include "i2o_config.c" #endif /** * i2o_config_init - Configuration OSM initialization function * * Registers Configuration OSM in the I2O core and if old ioctl's are * compiled in initialize them. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_config_init(void) { printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); if (i2o_driver_register(&i2o_config_driver)) { osm_err("handler register failed.\n"); return -EBUSY; } #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL if (i2o_config_old_init()) { osm_err("old config handler initialization failed\n"); i2o_driver_unregister(&i2o_config_driver); return -EBUSY; } #endif return 0; } /** * i2o_config_exit - Configuration OSM exit function * * If old ioctl's are compiled in exit remove them and unregisters * Configuration OSM from I2O core. */ static void i2o_config_exit(void) { #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL i2o_config_old_exit(); #endif i2o_driver_unregister(&i2o_config_driver); } MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_config_init); module_exit(i2o_config_exit);
gpl-2.0
bozont/2.6.35
drivers/message/i2o/config-osm.c
13491
2170
/* * Configuration OSM * * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Fixes/additions: * Markus Lidel <Markus.Lidel@shadowconnect.com> * initial version. */ #include <linux/module.h> #include <linux/i2o.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/fs.h> #include <asm/uaccess.h> #define OSM_NAME "config-osm" #define OSM_VERSION "1.323" #define OSM_DESCRIPTION "I2O Configuration OSM" /* access mode user rw */ #define S_IWRSR (S_IRUSR | S_IWUSR) static struct i2o_driver i2o_config_driver; /* Config OSM driver struct */ static struct i2o_driver i2o_config_driver = { .name = OSM_NAME, }; #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL #include "i2o_config.c" #endif /** * i2o_config_init - Configuration OSM initialization function * * Registers Configuration OSM in the I2O core and if old ioctl's are * compiled in initialize them. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_config_init(void) { printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); if (i2o_driver_register(&i2o_config_driver)) { osm_err("handler register failed.\n"); return -EBUSY; } #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL if (i2o_config_old_init()) { osm_err("old config handler initialization failed\n"); i2o_driver_unregister(&i2o_config_driver); return -EBUSY; } #endif return 0; } /** * i2o_config_exit - Configuration OSM exit function * * If old ioctl's are compiled in exit remove them and unregisters * Configuration OSM from I2O core. */ static void i2o_config_exit(void) { #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL i2o_config_old_exit(); #endif i2o_driver_unregister(&i2o_config_driver); } MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_config_init); module_exit(i2o_config_exit);
gpl-2.0
CyanogenMod/android_kernel_samsung_exynos5410
fs/dlm/midcomms.c
14771
3813
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ /* * midcomms.c * * This is the appallingly named "mid-level" comms layer. * * Its purpose is to take packets from the "real" comms layer, * split them up into packets and pass them to the interested * part of the locking mechanism. * * It also takes messages from the locking layer, formats them * into packets and sends them to the comms layer. */ #include "dlm_internal.h" #include "lowcomms.h" #include "config.h" #include "lock.h" #include "midcomms.h" static void copy_from_cb(void *dst, const void *base, unsigned offset, unsigned len, unsigned limit) { unsigned copy = len; if ((copy + offset) > limit) copy = limit - offset; memcpy(dst, base + offset, copy); len -= copy; if (len) memcpy(dst + copy, base, len); } /* * Called from the low-level comms layer to process a buffer of * commands. * * Only complete messages are processed here, any "spare" bytes from * the end of a buffer are saved and tacked onto the front of the next * message that comes in. I doubt this will happen very often but we * need to be able to cope with it and I don't want the task to be waiting * for packets to come in when there is useful work to be done. */ int dlm_process_incoming_buffer(int nodeid, const void *base, unsigned offset, unsigned len, unsigned limit) { union { unsigned char __buf[DLM_INBUF_LEN]; /* this is to force proper alignment on some arches */ union dlm_packet p; } __tmp; union dlm_packet *p = &__tmp.p; int ret = 0; int err = 0; uint16_t msglen; uint32_t lockspace; while (len > sizeof(struct dlm_header)) { /* Copy just the header to check the total length. The message may wrap around the end of the buffer back to the start, so we need to use a temp buffer and copy_from_cb. */ copy_from_cb(p, base, offset, sizeof(struct dlm_header), limit); msglen = le16_to_cpu(p->header.h_length); lockspace = p->header.h_lockspace; err = -EINVAL; if (msglen < sizeof(struct dlm_header)) break; if (p->header.h_cmd == DLM_MSG) { if (msglen < sizeof(struct dlm_message)) break; } else { if (msglen < sizeof(struct dlm_rcom)) break; } err = -E2BIG; if (msglen > dlm_config.ci_buffer_size) { log_print("message size %d from %d too big, buf len %d", msglen, nodeid, len); break; } err = 0; /* If only part of the full message is contained in this buffer, then do nothing and wait for lowcomms to call us again later with more data. We return 0 meaning we've consumed none of the input buffer. */ if (msglen > len) break; /* Allocate a larger temp buffer if the full message won't fit in the buffer on the stack (which should work for most ordinary messages). */ if (msglen > sizeof(__tmp) && p == &__tmp.p) { p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); if (p == NULL) return ret; } copy_from_cb(p, base, offset, msglen, limit); BUG_ON(lockspace != p->header.h_lockspace); ret += msglen; offset += msglen; offset &= (limit - 1); len -= msglen; dlm_receive_buffer(p, nodeid); } if (p != &__tmp.p) kfree(p); return err ? err : ret; }
gpl-2.0
sria91/linux
mm/list_lru.c
180
12878
/* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/list_lru.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memcontrol.h> #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) static LIST_HEAD(list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static void list_lru_register(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_add(&lru->list, &list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } #else static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) static inline bool list_lru_memcg_aware(struct list_lru *lru) { /* * This needs node 0 to be always present, even * in the systems supporting sparse numa ids. */ return !!lru->node[0].memcg_lrus; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { /* * The lock protects the array of per cgroup lists from relocation * (see memcg_update_list_lru_node). */ lockdep_assert_held(&nlru->lock); if (nlru->memcg_lrus && idx >= 0) return nlru->memcg_lrus->lru[idx]; return &nlru->lru; } static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) { struct page *page; if (!memcg_kmem_enabled()) return NULL; page = virt_to_head_page(ptr); return page->mem_cgroup; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { struct mem_cgroup *memcg; if (!nlru->memcg_lrus) return &nlru->lru; memcg = mem_cgroup_from_kmem(ptr); if (!memcg) return &nlru->lru; return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); } #else static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { return &nlru->lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { return &nlru->lru; } #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ bool list_lru_add(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (list_empty(item)) { l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_del(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (!list_empty(item)) { l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_del); void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); static unsigned long __list_lru_count_one(struct list_lru *lru, int nid, int memcg_idx) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; unsigned long count; spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); count = l->nr_items; spin_unlock(&nlru->lock); return count; } unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); } EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { long count = 0; int memcg_idx; count += __list_lru_count_one(lru, nid, -1); if (list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) count += __list_lru_count_one(lru, nid, memcg_idx); } return count; } EXPORT_SYMBOL_GPL(list_lru_count_node); static unsigned long __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; struct list_head *item, *n; unsigned long isolated = 0; spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); restart: list_for_each_safe(item, n, &l->list) { enum lru_status ret; /* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbesr of LRU_RETRY items */ if (!*nr_to_walk) break; --*nr_to_walk; ret = isolate(item, l, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; break; case LRU_ROTATE: list_move_tail(item, &l->list); break; case LRU_SKIP: break; case LRU_RETRY: /* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ assert_spin_locked(&nlru->lock); goto restart; default: BUG(); } } spin_unlock(&nlru->lock); return isolated; } unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, cb_arg, nr_to_walk); } EXPORT_SYMBOL_GPL(list_lru_walk_one); unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; int memcg_idx; isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, nr_to_walk); if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) { isolated += __list_lru_walk_one(lru, nid, memcg_idx, isolate, cb_arg, nr_to_walk); if (*nr_to_walk <= 0) break; } } return isolated; } EXPORT_SYMBOL_GPL(list_lru_walk_node); static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) kfree(memcg_lrus->lru[i]); } static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) { struct list_lru_one *l; l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); if (!l) goto fail; init_one_lru(l); memcg_lrus->lru[i] = l; } return 0; fail: __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); return -ENOMEM; } static int memcg_init_list_lru_node(struct list_lru_node *nlru) { int size = memcg_nr_cache_ids; nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL); if (!nlru->memcg_lrus) return -ENOMEM; if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { kfree(nlru->memcg_lrus); return -ENOMEM; } return 0; } static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) { __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); kfree(nlru->memcg_lrus); } static int memcg_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { struct list_lru_memcg *old, *new; BUG_ON(old_size > new_size); old = nlru->memcg_lrus; new = kmalloc(new_size * sizeof(void *), GFP_KERNEL); if (!new) return -ENOMEM; if (__memcg_init_list_lru_node(new, old_size, new_size)) { kfree(new); return -ENOMEM; } memcpy(new, old, old_size * sizeof(void *)); /* * The lock guarantees that we won't race with a reader * (see list_lru_from_memcg_idx). * * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); nlru->memcg_lrus = new; spin_unlock_irq(&nlru->lock); kfree(old); return 0; } static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { /* do not bother shrinking the array back to the old size, because we * cannot handle allocation failures here */ __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size); } static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; if (!memcg_aware) return 0; for_each_node(i) { if (memcg_init_list_lru_node(&lru->node[i])) goto fail; } return 0; fail: for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; memcg_destroy_list_lru_node(&lru->node[i]); } return -ENOMEM; } static void memcg_destroy_list_lru(struct list_lru *lru) { int i; if (!list_lru_memcg_aware(lru)) return; for_each_node(i) memcg_destroy_list_lru_node(&lru->node[i]); } static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return 0; for_each_node(i) { if (memcg_update_list_lru_node(&lru->node[i], old_size, new_size)) goto fail; } return 0; fail: for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); } return -ENOMEM; } static void memcg_cancel_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return; for_each_node(i) memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); } int memcg_update_all_list_lrus(int new_size) { int ret = 0; struct list_lru *lru; int old_size = memcg_nr_cache_ids; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) { ret = memcg_update_list_lru(lru, old_size, new_size); if (ret) goto fail; } out: mutex_unlock(&list_lrus_mutex); return ret; fail: list_for_each_entry_continue_reverse(lru, &list_lrus, list) memcg_cancel_update_list_lru(lru, old_size, new_size); goto out; } static void memcg_drain_list_lru_node(struct list_lru_node *nlru, int src_idx, int dst_idx) { struct list_lru_one *src, *dst; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(nlru, src_idx); dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); dst->nr_items += src->nr_items; src->nr_items = 0; spin_unlock_irq(&nlru->lock); } static void memcg_drain_list_lru(struct list_lru *lru, int src_idx, int dst_idx) { int i; if (!list_lru_memcg_aware(lru)) return; for_each_node(i) memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); } void memcg_drain_all_list_lrus(int src_idx, int dst_idx) { struct list_lru *lru; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) memcg_drain_list_lru(lru, src_idx, dst_idx); mutex_unlock(&list_lrus_mutex); } #else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) { } #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key) { int i; size_t size = sizeof(*lru->node) * nr_node_ids; int err = -ENOMEM; memcg_get_cache_ids(); lru->node = kzalloc(size, GFP_KERNEL); if (!lru->node) goto out; for_each_node(i) { spin_lock_init(&lru->node[i].lock); if (key) lockdep_set_class(&lru->node[i].lock, key); init_one_lru(&lru->node[i].lru); } err = memcg_init_list_lru(lru, memcg_aware); if (err) { kfree(lru->node); goto out; } list_lru_register(lru); out: memcg_put_cache_ids(); return err; } EXPORT_SYMBOL_GPL(__list_lru_init); void list_lru_destroy(struct list_lru *lru) { /* Already destroyed or not yet initialized? */ if (!lru->node) return; memcg_get_cache_ids(); list_lru_unregister(lru); memcg_destroy_list_lru(lru); kfree(lru->node); lru->node = NULL; memcg_put_cache_ids(); } EXPORT_SYMBOL_GPL(list_lru_destroy);
gpl-2.0
zjh171/gcc
gcc/testsuite/gfortran.dg/namelist_13.f90
180
1030
!{ dg-do run } ! Tests simple derived types. ! Provided by Paul Thomas - pault@gcc.gnu.org program namelist_13 type :: yourtype integer, dimension(2) :: yi = (/8,9/) real, dimension(2) :: yx = (/80.,90./) character(len=2) :: ych = "xx" end type yourtype type :: mytype integer, dimension(2) :: myi = (/800,900/) real, dimension(2) :: myx = (/8000.,9000./) character(len=2) :: mych = "zz" type(yourtype) :: my_yourtype end type mytype type(mytype) :: z integer :: ier integer :: zeros(10) namelist /mynml/ zeros, z zeros = 0 zeros(5) = 1 open(10,status="scratch", delim="apostrophe") write (10, nml=mynml, iostat=ier) if (ier.ne.0) call abort rewind (10) read (10, NML=mynml, IOSTAT=ier) if (ier.ne.0) call abort close (10) end program namelist_13
gpl-2.0
metredigm/linux
drivers/net/ethernet/broadcom/bcm63xx_enet.c
692
74269
/* * Driver for BCM963xx builtin Ethernet mac * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/if_vlan.h> #include <bcm63xx_dev_enet.h> #include "bcm63xx_enet.h" static char bcm_enet_driver_name[] = "bcm63xx_enet"; static char bcm_enet_driver_version[] = "1.0"; static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); /* io registers memory shared between all devices */ static void __iomem *bcm_enet_shared_base[3]; /* * io helpers to access mac registers */ static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(priv->base + off); } static inline void enet_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { bcm_writel(val, priv->base + off); } /* * io helpers to access switch registers */ static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(priv->base + off); } static inline void enetsw_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { bcm_writel(val, priv->base + off); } static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) { return bcm_readw(priv->base + off); } static inline void enetsw_writew(struct bcm_enet_priv *priv, u16 val, u32 off) { bcm_writew(val, priv->base + off); } static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) { return bcm_readb(priv->base + off); } static inline void enetsw_writeb(struct bcm_enet_priv *priv, u8 val, u32 off) { bcm_writeb(val, priv->base + off); } /* io helpers to access shared registers */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(bcm_enet_shared_base[0] + off); } static inline void enet_dma_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { bcm_writel(val, bcm_enet_shared_base[0] + off); } static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) { return bcm_readl(bcm_enet_shared_base[1] + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); } static inline void enet_dmac_writel(struct bcm_enet_priv *priv, u32 val, u32 off, int chan) { bcm_writel(val, bcm_enet_shared_base[1] + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); } static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) { return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } static inline void enet_dmas_writel(struct bcm_enet_priv *priv, u32 val, u32 off, int chan) { bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } /* * write given data into mii register and wait for transfer to end * with timeout (average measured transfer time is 25us) */ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) { int limit; /* make sure mii interrupt status is cleared */ enet_writel(priv, ENET_IR_MII, ENET_IR_REG); enet_writel(priv, data, ENET_MIIDATA_REG); wmb(); /* busy wait on mii interrupt bit, with timeout */ limit = 1000; do { if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) break; udelay(1); } while (limit-- > 0); return (limit < 0) ? 1 : 0; } /* * MII internal read callback */ static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, int regnum) { u32 tmp, val; tmp = regnum << ENET_MIIDATA_REG_SHIFT; tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; tmp |= ENET_MIIDATA_OP_READ_MASK; if (do_mdio_op(priv, tmp)) return -1; val = enet_readl(priv, ENET_MIIDATA_REG); val &= 0xffff; return val; } /* * MII internal write callback */ static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, int regnum, u16 value) { u32 tmp; tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; tmp |= regnum << ENET_MIIDATA_REG_SHIFT; tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; tmp |= ENET_MIIDATA_OP_WRITE_MASK; (void)do_mdio_op(priv, tmp); return 0; } /* * MII read callback from phylib */ static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, int regnum) { return bcm_enet_mdio_read(bus->priv, mii_id, regnum); } /* * MII write callback from phylib */ static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, int regnum, u16 value) { return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); } /* * MII read callback from mii core */ static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, int regnum) { return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); } /* * MII write callback from mii core */ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, int regnum, int value) { bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); } /* * refill rx queue */ static int bcm_enet_refill_rx(struct net_device *dev) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); while (priv->rx_desc_count < priv->rx_ring_size) { struct bcm_enet_desc *desc; struct sk_buff *skb; dma_addr_t p; int desc_idx; u32 len_stat; desc_idx = priv->rx_dirty_desc; desc = &priv->rx_desc_cpu[desc_idx]; if (!priv->rx_skb[desc_idx]) { skb = netdev_alloc_skb(dev, priv->rx_skb_size); if (!skb) break; priv->rx_skb[desc_idx] = skb; p = dma_map_single(&priv->pdev->dev, skb->data, priv->rx_skb_size, DMA_FROM_DEVICE); desc->address = p; } len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; } wmb(); desc->len_stat = len_stat; priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ if (priv->dma_has_sram) enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); else enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); } /* If rx ring is still empty, set a timer to try allocating * again at a later time. */ if (priv->rx_desc_count == 0 && netif_running(dev)) { dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); priv->rx_timeout.expires = jiffies + HZ; add_timer(&priv->rx_timeout); } return 0; } /* * timer callback to defer refill rx queue in case we're OOM */ static void bcm_enet_refill_rx_timer(unsigned long data) { struct net_device *dev; struct bcm_enet_priv *priv; dev = (struct net_device *)data; priv = netdev_priv(dev); spin_lock(&priv->rx_lock); bcm_enet_refill_rx((struct net_device *)data); spin_unlock(&priv->rx_lock); } /* * extract packet from rx queue */ static int bcm_enet_receive_queue(struct net_device *dev, int budget) { struct bcm_enet_priv *priv; struct device *kdev; int processed; priv = netdev_priv(dev); kdev = &priv->pdev->dev; processed = 0; /* don't scan ring further than number of refilled * descriptor */ if (budget > priv->rx_desc_count) budget = priv->rx_desc_count; do { struct bcm_enet_desc *desc; struct sk_buff *skb; int desc_idx; u32 len_stat; unsigned int len; desc_idx = priv->rx_curr_desc; desc = &priv->rx_desc_cpu[desc_idx]; /* make sure we actually read the descriptor status at * each loop */ rmb(); len_stat = desc->len_stat; /* break if dma ownership belongs to hw */ if (len_stat & DMADESC_OWNER_MASK) break; processed++; priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size) priv->rx_curr_desc = 0; priv->rx_desc_count--; /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { dev->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ if (!priv->enet_is_sw && unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) dev->stats.rx_length_errors++; if (len_stat & DMADESC_CRC_MASK) dev->stats.rx_crc_errors++; if (len_stat & DMADESC_UNDER_MASK) dev->stats.rx_frame_errors++; if (len_stat & DMADESC_OV_MASK) dev->stats.rx_fifo_errors++; continue; } /* valid packet */ skb = priv->rx_skb[desc_idx]; len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */ len -= 4; if (len < copybreak) { struct sk_buff *nskb; nskb = napi_alloc_skb(&priv->napi, len); if (!nskb) { /* forget packet, just rearm desc */ dev->stats.rx_dropped++; continue; } dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); memcpy(nskb->data, skb->data, len); dma_sync_single_for_device(kdev, desc->address, len, DMA_FROM_DEVICE); skb = nskb; } else { dma_unmap_single(&priv->pdev->dev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); priv->rx_skb[desc_idx] = NULL; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += len; netif_receive_skb(skb); } while (--budget > 0); if (processed || !priv->rx_desc_count) { bcm_enet_refill_rx(dev); /* kick rx dma */ enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); } return processed; } /* * try to or force reclaim of transmitted buffers */ static int bcm_enet_tx_reclaim(struct net_device *dev, int force) { struct bcm_enet_priv *priv; int released; priv = netdev_priv(dev); released = 0; while (priv->tx_desc_count < priv->tx_ring_size) { struct bcm_enet_desc *desc; struct sk_buff *skb; /* We run in a bh and fight against start_xmit, which * is called with bh disabled */ spin_lock(&priv->tx_lock); desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { spin_unlock(&priv->tx_lock); break; } /* ensure other field of the descriptor were not read * before we checked ownership */ rmb(); skb = priv->tx_skb[priv->tx_dirty_desc]; priv->tx_skb[priv->tx_dirty_desc] = NULL; dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, DMA_TO_DEVICE); priv->tx_dirty_desc++; if (priv->tx_dirty_desc == priv->tx_ring_size) priv->tx_dirty_desc = 0; priv->tx_desc_count++; spin_unlock(&priv->tx_lock); if (desc->len_stat & DMADESC_UNDER_MASK) dev->stats.tx_errors++; dev_kfree_skb(skb); released++; } if (netif_queue_stopped(dev) && released) netif_wake_queue(dev); return released; } /* * poll func, called by network core */ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; int rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; /* ack interrupts */ enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IR, priv->rx_chan); enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); if (rx_work_done >= budget) { /* rx queue is not yet empty/clean */ return rx_work_done; } /* no more packet in rx/tx queue, remove device from poll * queue */ napi_complete(napi); /* restore rx/tx interrupt */ enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IRMASK, priv->tx_chan); return rx_work_done; } /* * mac interrupt handler */ static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) { struct net_device *dev; struct bcm_enet_priv *priv; u32 stat; dev = dev_id; priv = netdev_priv(dev); stat = enet_readl(priv, ENET_IR_REG); if (!(stat & ENET_IR_MIB)) return IRQ_NONE; /* clear & mask interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, 0, ENET_IRMASK_REG); /* read mib registers in workqueue */ schedule_work(&priv->mib_update_task); return IRQ_HANDLED; } /* * rx/tx dma interrupt handler */ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) { struct net_device *dev; struct bcm_enet_priv *priv; dev = dev_id; priv = netdev_priv(dev); /* mask rx/tx interrupts */ enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); napi_schedule(&priv->napi); return IRQ_HANDLED; } /* * tx request callback */ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; int ret; priv = netdev_priv(dev); /* lock against tx reclaim */ spin_lock(&priv->tx_lock); /* make sure the tx hw queue is not full, should not happen * since we stop queue before it's the case */ if (unlikely(!priv->tx_desc_count)) { netif_stop_queue(dev); dev_err(&priv->pdev->dev, "xmit called with no tx desc " "available?\n"); ret = NETDEV_TX_BUSY; goto out_unlock; } /* pad small packets sent on a switch device */ if (priv->enet_is_sw && skb->len < 64) { int needed = 64 - skb->len; char *data; if (unlikely(skb_tailroom(skb) < needed)) { struct sk_buff *nskb; nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); if (!nskb) { ret = NETDEV_TX_BUSY; goto out_unlock; } dev_kfree_skb(skb); skb = nskb; } data = skb_put(skb, needed); memset(data, 0, needed); } /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; /* fill descriptor */ desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); } priv->tx_desc_count--; /* dma might be already polling, make sure we update desc * fields in correct order */ wmb(); desc->len_stat = len_stat; wmb(); /* kick tx dma */ enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->tx_chan); /* stop queue if no more desc available */ if (!priv->tx_desc_count) netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; ret = NETDEV_TX_OK; out_unlock: spin_unlock(&priv->tx_lock); return ret; } /* * Change the interface's mac address. */ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) { struct bcm_enet_priv *priv; struct sockaddr *addr = p; u32 val; priv = netdev_priv(dev); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); /* use perfect match register 0 to store my mac address */ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | (dev->dev_addr[4] << 8) | dev->dev_addr[5]; enet_writel(priv, val, ENET_PML_REG(0)); val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); val |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, val, ENET_PMH_REG(0)); return 0; } /* * Change rx mode (promiscuous/allmulti) and update multicast list */ static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; struct netdev_hw_addr *ha; u32 val; int i; priv = netdev_priv(dev); val = enet_readl(priv, ENET_RXCFG_REG); if (dev->flags & IFF_PROMISC) val |= ENET_RXCFG_PROMISC_MASK; else val &= ~ENET_RXCFG_PROMISC_MASK; /* only 3 perfect match registers left, first one is used for * own mac address */ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) val |= ENET_RXCFG_ALLMCAST_MASK; else val &= ~ENET_RXCFG_ALLMCAST_MASK; /* no need to set perfect match registers if we catch all * multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) { enet_writel(priv, val, ENET_RXCFG_REG); return; } i = 0; netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); tmp = (dmi_addr[0] << 8 | dmi_addr[1]); tmp |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); } for (; i < 3; i++) { enet_writel(priv, 0, ENET_PML_REG(i + 1)); enet_writel(priv, 0, ENET_PMH_REG(i + 1)); } enet_writel(priv, val, ENET_RXCFG_REG); } /* * set mac duplex parameters */ static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) { u32 val; val = enet_readl(priv, ENET_TXCTL_REG); if (fullduplex) val |= ENET_TXCTL_FD_MASK; else val &= ~ENET_TXCTL_FD_MASK; enet_writel(priv, val, ENET_TXCTL_REG); } /* * set mac flow control parameters */ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) { u32 val; /* rx flow control (pause frame handling) */ val = enet_readl(priv, ENET_RXCFG_REG); if (rx_en) val |= ENET_RXCFG_ENFLOW_MASK; else val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); if (!priv->dma_has_sram) return; /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); else val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); enet_dma_writel(priv, val, ENETDMA_CFG_REG); } /* * link changed callback (from phylib) */ static void bcm_enet_adjust_phy_link(struct net_device *dev) { struct bcm_enet_priv *priv; struct phy_device *phydev; int status_changed; priv = netdev_priv(dev); phydev = priv->phydev; status_changed = 0; if (priv->old_link != phydev->link) { status_changed = 1; priv->old_link = phydev->link; } /* reflect duplex change in mac configuration */ if (phydev->link && phydev->duplex != priv->old_duplex) { bcm_enet_set_duplex(priv, (phydev->duplex == DUPLEX_FULL) ? 1 : 0); status_changed = 1; priv->old_duplex = phydev->duplex; } /* enable flow control if remote advertise it (trust phylib to * check that duplex is full */ if (phydev->link && phydev->pause != priv->old_pause) { int rx_pause_en, tx_pause_en; if (phydev->pause) { /* pause was advertised by lpa and us */ rx_pause_en = 1; tx_pause_en = 1; } else if (!priv->pause_auto) { /* pause setting overrided by user */ rx_pause_en = priv->pause_rx; tx_pause_en = priv->pause_tx; } else { rx_pause_en = 0; tx_pause_en = 0; } bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); status_changed = 1; priv->old_pause = phydev->pause; } if (status_changed) { pr_info("%s: link %s", dev->name, phydev->link ? "UP" : "DOWN"); if (phydev->link) pr_cont(" - %d/%s - flow control %s", phydev->speed, DUPLEX_FULL == phydev->duplex ? "full" : "half", phydev->pause == 1 ? "rx&tx" : "off"); pr_cont("\n"); } } /* * link changed callback (if phylib is not used) */ static void bcm_enet_adjust_link(struct net_device *dev) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); bcm_enet_set_duplex(priv, priv->force_duplex_full); bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); netif_carrier_on(dev); pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", dev->name, priv->force_speed_100 ? 100 : 10, priv->force_duplex_full ? "full" : "half", priv->pause_rx ? "rx" : "off", priv->pause_tx ? "tx" : "off"); } /* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enet_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct sockaddr addr; struct device *kdev; struct phy_device *phydev; int i, ret; unsigned int size; char phy_id[MII_BUS_ID_SIZE + 3]; void *p; u32 val; priv = netdev_priv(dev); kdev = &priv->pdev->dev; if (priv->has_phy) { /* connect to PHY */ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->mii_bus->id, priv->phy_id); phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_MII); phydev->advertising = phydev->supported; if (priv->pause_auto && priv->pause_rx && priv->pause_tx) phydev->advertising |= SUPPORTED_Pause; else phydev->advertising &= ~SUPPORTED_Pause; dev_info(kdev, "attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; priv->phydev = phydev; } /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) goto out_phy_disconnect; ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, dev->name, dev); if (ret) goto out_freeirq; ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 0, dev->name, dev); if (ret) goto out_freeirq_rx; /* initialize perfect match registers */ for (i = 0; i < 4; i++) { enet_writel(priv, 0, ENET_PML_REG(i)); enet_writel(priv, 0, ENET_PMH_REG(i)); } /* write device mac address */ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); bcm_enet_set_mac_address(dev, &addr); /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_freeirq_tx; } priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_free_rx_ring; } priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skb) { ret = -ENOMEM; goto out_free_tx_ring; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skb) { ret = -ENOMEM; goto out_free_tx_skb; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ if (priv->dma_has_sram) enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); else enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMAC_BUFALLOC, priv->rx_chan); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ if (priv->dma_has_sram) { enet_dmas_writel(priv, priv->rx_desc_dma, ENETDMAS_RSTART_REG, priv->rx_chan); enet_dmas_writel(priv, priv->tx_desc_dma, ENETDMAS_RSTART_REG, priv->tx_chan); } else { enet_dmac_writel(priv, priv->rx_desc_dma, ENETDMAC_RSTART, priv->rx_chan); enet_dmac_writel(priv, priv->tx_desc_dma, ENETDMAC_RSTART, priv->tx_chan); } /* clear remaining state ram for rx & tx channel */ if (priv->dma_has_sram) { enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); } else { enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST, priv->rx_chan); enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST, priv->tx_chan); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ if (priv->dma_has_sram) { val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); } else { enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IR, priv->rx_chan); enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, priv->dma_chan_int_mask, ENETDMAC_IRMASK, priv->tx_chan); if (priv->has_phy) phy_start(priv->phydev); else bcm_enet_adjust_link(dev); netif_start_queue(dev); return 0; out: for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } kfree(priv->rx_skb); out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: free_irq(priv->irq_tx, dev); out_freeirq_rx: free_irq(priv->irq_rx, dev); out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: phy_disconnect(priv->phydev); return ret; } /* * disable mac */ static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) { int limit; u32 val; val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_DISABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); limit = 1000; do { u32 val; val = enet_readl(priv, ENET_CTL_REG); if (!(val & ENET_CTL_DISABLE_MASK)) break; udelay(1); } while (limit--); } /* * disable dma in given channel */ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); limit = 1000; do { u32 val; val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); } while (limit--); } /* * stop callback */ static int bcm_enet_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; netif_stop_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(priv->phydev); del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); bcm_enet_disable_dma(priv, priv->rx_chan); bcm_enet_disable_mac(priv); /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); /* free the rx skb ring */ for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } /* free remaining allocated memory */ kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); free_irq(dev->irq, dev); /* release phy */ if (priv->has_phy) { phy_disconnect(priv->phydev); priv->phydev = NULL; } return 0; } /* * ethtool callbacks */ struct bcm_enet_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; int mib_reg; }; #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ offsetof(struct bcm_enet_priv, m) #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ offsetof(struct net_device_stats, m) static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { { "rx_packets", DEV_STAT(rx_packets), -1 }, { "tx_packets", DEV_STAT(tx_packets), -1 }, { "rx_bytes", DEV_STAT(rx_bytes), -1 }, { "tx_bytes", DEV_STAT(tx_bytes), -1 }, { "rx_errors", DEV_STAT(rx_errors), -1 }, { "tx_errors", DEV_STAT(tx_errors), -1 }, { "rx_dropped", DEV_STAT(rx_dropped), -1 }, { "tx_dropped", DEV_STAT(tx_dropped), -1 }, { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, }; #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) static const u32 unused_mib_regs[] = { ETH_MIB_TX_ALL_OCTETS, ETH_MIB_TX_ALL_PKTS, ETH_MIB_RX_ALL_OCTETS, ETH_MIB_RX_ALL_PKTS, }; static void bcm_enet_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, bcm_enet_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); drvinfo->n_stats = BCM_ENET_STATS_LEN; } static int bcm_enet_get_sset_count(struct net_device *netdev, int string_set) { switch (string_set) { case ETH_SS_STATS: return BCM_ENET_STATS_LEN; default: return -EINVAL; } } static void bcm_enet_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCM_ENET_STATS_LEN; i++) { memcpy(data + i * ETH_GSTRING_LEN, bcm_enet_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); } break; } } static void update_mib_counters(struct bcm_enet_priv *priv) { int i; for (i = 0; i < BCM_ENET_STATS_LEN; i++) { const struct bcm_enet_stats *s; u32 val; char *p; s = &bcm_enet_gstrings_stats[i]; if (s->mib_reg == -1) continue; val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); p = (char *)priv + s->stat_offset; if (s->sizeof_stat == sizeof(u64)) *(u64 *)p += val; else *(u32 *)p += val; } /* also empty unused mib counters to make sure mib counter * overflow interrupt is cleared */ for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); } static void bcm_enet_update_mib_counters_defer(struct work_struct *t) { struct bcm_enet_priv *priv; priv = container_of(t, struct bcm_enet_priv, mib_update_task); mutex_lock(&priv->mib_update_lock); update_mib_counters(priv); mutex_unlock(&priv->mib_update_lock); /* reenable mib interrupt */ if (netif_running(priv->net_dev)) enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); } static void bcm_enet_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct bcm_enet_priv *priv; int i; priv = netdev_priv(netdev); mutex_lock(&priv->mib_update_lock); update_mib_counters(priv); for (i = 0; i < BCM_ENET_STATS_LEN; i++) { const struct bcm_enet_stats *s; char *p; s = &bcm_enet_gstrings_stats[i]; if (s->mib_reg == -1) p = (char *)&netdev->stats; else p = (char *)priv; p += s->stat_offset; data[i] = (s->sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } mutex_unlock(&priv->mib_update_lock); } static int bcm_enet_nway_reset(struct net_device *dev) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return genphy_restart_aneg(priv->phydev); } return -EOPNOTSUPP; } static int bcm_enet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); cmd->maxrxpkt = 0; cmd->maxtxpkt = 0; if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_ethtool_gset(priv->phydev, cmd); } else { cmd->autoneg = 0; ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) ? SPEED_100 : SPEED_10)); cmd->duplex = (priv->force_duplex_full) ? DUPLEX_FULL : DUPLEX_HALF; cmd->supported = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; cmd->advertising = 0; cmd->port = PORT_MII; cmd->transceiver = XCVR_EXTERNAL; } return 0; } static int bcm_enet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_ethtool_sset(priv->phydev, cmd); } else { if (cmd->autoneg || (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || cmd->port != PORT_MII) return -EINVAL; priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; if (netif_running(dev)) bcm_enet_adjust_link(dev); return 0; } } static void bcm_enet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); /* rx/tx ring is actually only limited by memory */ ering->rx_max_pending = 8192; ering->tx_max_pending = 8192; ering->rx_pending = priv->rx_ring_size; ering->tx_pending = priv->tx_ring_size; } static int bcm_enet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; int was_running; priv = netdev_priv(dev); was_running = 0; if (netif_running(dev)) { bcm_enet_stop(dev); was_running = 1; } priv->rx_ring_size = ering->rx_pending; priv->tx_ring_size = ering->tx_pending; if (was_running) { int err; err = bcm_enet_open(dev); if (err) dev_close(dev); else bcm_enet_set_multicast_list(dev); } return 0; } static void bcm_enet_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); ecmd->autoneg = priv->pause_auto; ecmd->rx_pause = priv->pause_rx; ecmd->tx_pause = priv->pause_tx; } static int bcm_enet_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { /* asymetric pause mode not supported, * actually possible but integrated PHY has RO * asym_pause bit */ return -EINVAL; } } else { /* no pause autoneg on direct mii connection */ if (ecmd->autoneg) return -EINVAL; } priv->pause_auto = ecmd->autoneg; priv->pause_rx = ecmd->rx_pause; priv->pause_tx = ecmd->tx_pause; return 0; } static const struct ethtool_ops bcm_enet_ethtool_ops = { .get_strings = bcm_enet_get_strings, .get_sset_count = bcm_enet_get_sset_count, .get_ethtool_stats = bcm_enet_get_ethtool_stats, .nway_reset = bcm_enet_nway_reset, .get_settings = bcm_enet_get_settings, .set_settings = bcm_enet_set_settings, .get_drvinfo = bcm_enet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = bcm_enet_get_ringparam, .set_ringparam = bcm_enet_set_ringparam, .get_pauseparam = bcm_enet_get_pauseparam, .set_pauseparam = bcm_enet_set_pauseparam, }; static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); if (priv->has_phy) { if (!priv->phydev) return -ENODEV; return phy_mii_ioctl(priv->phydev, rq, cmd); } else { struct mii_if_info mii; mii.dev = dev; mii.mdio_read = bcm_enet_mdio_read_mii; mii.mdio_write = bcm_enet_mdio_write_mii; mii.phy_id = 0; mii.phy_id_mask = 0x3f; mii.reg_num_mask = 0x1f; return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); } } /* * calculate actual hardware mtu */ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) { int actual_mtu; actual_mtu = mtu; /* add ethernet header + vlan tag size */ actual_mtu += VLAN_ETH_HLEN; if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) return -EINVAL; /* * setup maximum size before we get overflow mark in * descriptor, note that this will not prevent reception of * big frames, they will be split into multiple buffers * anyway */ priv->hw_mtu = actual_mtu; /* * align rx buffer size to dma burst len, account FCS since * it's appended */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, priv->dma_maxburst * 4); return 0; } /* * adjust mtu, can't be called while device is running */ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) { int ret; if (netif_running(dev)) return -EBUSY; ret = compute_hw_mtu(netdev_priv(dev), new_mtu); if (ret) return ret; dev->mtu = new_mtu; return 0; } /* * preinit hardware to allow mii operation while device is down */ static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) { u32 val; int limit; /* make sure mac is disabled */ bcm_enet_disable_mac(priv); /* soft reset mac */ val = ENET_CTL_SRESET_MASK; enet_writel(priv, val, ENET_CTL_REG); wmb(); limit = 1000; do { val = enet_readl(priv, ENET_CTL_REG); if (!(val & ENET_CTL_SRESET_MASK)) break; udelay(1); } while (limit--); /* select correct mii interface */ val = enet_readl(priv, ENET_CTL_REG); if (priv->use_external_mii) val |= ENET_CTL_EPHYSEL_MASK; else val &= ~ENET_CTL_EPHYSEL_MASK; enet_writel(priv, val, ENET_CTL_REG); /* turn on mdc clock */ enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); /* set mib counters to self-clear when read */ val = enet_readl(priv, ENET_MIBCTL_REG); val |= ENET_MIBCTL_RDCLEAR_MASK; enet_writel(priv, val, ENET_MIBCTL_REG); } static const struct net_device_ops bcm_enet_ops = { .ndo_open = bcm_enet_open, .ndo_stop = bcm_enet_stop, .ndo_start_xmit = bcm_enet_start_xmit, .ndo_set_mac_address = bcm_enet_set_mac_address, .ndo_set_rx_mode = bcm_enet_set_multicast_list, .ndo_do_ioctl = bcm_enet_ioctl, .ndo_change_mtu = bcm_enet_change_mtu, }; /* * allocate netdevice, request register memory and register device. */ static int bcm_enet_probe(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enet_platform_data *pd; struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; const char *clk_name; int i, ret; /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ if (!bcm_enet_shared_base[0]) return -ENODEV; res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); if (!res_irq || !res_irq_rx || !res_irq_tx) return -ENODEV; ret = 0; dev = alloc_etherdev(sizeof(*priv)); if (!dev) return -ENOMEM; priv = netdev_priv(dev); priv->enet_is_sw = false; priv->dma_maxburst = BCMENET_DMA_MAXBURST; ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res_mem); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out; } dev->irq = priv->irq = res_irq->start; priv->irq_rx = res_irq_rx->start; priv->irq_tx = res_irq_tx->start; priv->mac_id = pdev->id; /* get rx & tx dma channel id for this mac */ if (priv->mac_id == 0) { priv->rx_chan = 0; priv->tx_chan = 1; clk_name = "enet0"; } else { priv->rx_chan = 2; priv->tx_chan = 3; clk_name = "enet1"; } priv->mac_clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out; } clk_prepare_enable(priv->mac_clk); /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); priv->has_phy = pd->has_phy; priv->phy_id = pd->phy_id; priv->has_phy_interrupt = pd->has_phy_interrupt; priv->phy_interrupt = pd->phy_interrupt; priv->use_external_mii = !pd->use_internal_phy; priv->pause_auto = pd->pause_auto; priv->pause_rx = pd->pause_rx; priv->pause_tx = pd->pause_tx; priv->force_duplex_full = pd->force_duplex_full; priv->force_speed_100 = pd->force_speed_100; priv->dma_chan_en_mask = pd->dma_chan_en_mask; priv->dma_chan_int_mask = pd->dma_chan_int_mask; priv->dma_chan_width = pd->dma_chan_width; priv->dma_has_sram = pd->dma_has_sram; priv->dma_desc_shift = pd->dma_desc_shift; } if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { /* using internal PHY, enable clock */ priv->phy_clk = clk_get(&pdev->dev, "ephy"); if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; goto out_put_clk_mac; } clk_prepare_enable(priv->phy_clk); } /* do minimal hardware init to be able to probe mii bus */ bcm_enet_hw_preinit(priv); /* MII bus registration */ if (priv->has_phy) { priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) { ret = -ENOMEM; goto out_uninit_hw; } bus = priv->mii_bus; bus->name = "bcm63xx_enet MII bus"; bus->parent = &pdev->dev; bus->priv = priv; bus->read = bcm_enet_mdio_read_phylib; bus->write = bcm_enet_mdio_write_phylib; sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); /* only probe bus where we think the PHY is, because * the mdio read operation return 0 instead of 0xffff * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto out_free_mdio; } if (priv->has_phy_interrupt) bus->irq[priv->phy_id] = priv->phy_interrupt; else bus->irq[priv->phy_id] = PHY_POLL; ret = mdiobus_register(bus); if (ret) { dev_err(&pdev->dev, "unable to register mdio bus\n"); goto out_free_mdio; } } else { /* run platform code to initialize PHY device */ if (pd->mii_config && pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii)) { dev_err(&pdev->dev, "unable to configure mdio bus\n"); goto out_uninit_hw; } } spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ init_timer(&priv->rx_timeout); priv->rx_timeout.function = bcm_enet_refill_rx_timer; priv->rx_timeout.data = (unsigned long)dev; /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); /* zero mib counters */ for (i = 0; i < ENET_MIB_REG_COUNT; i++) enet_writel(priv, 0, ENET_MIB_REG(i)); /* register netdevice */ dev->netdev_ops = &bcm_enet_ops; netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); dev->ethtool_ops = &bcm_enet_ethtool_ops; SET_NETDEV_DEV(dev, &pdev->dev); ret = register_netdev(dev); if (ret) goto out_unregister_mdio; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); priv->pdev = pdev; priv->net_dev = dev; return 0; out_unregister_mdio: if (priv->mii_bus) mdiobus_unregister(priv->mii_bus); out_free_mdio: if (priv->mii_bus) mdiobus_free(priv->mii_bus); out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->phy_clk) { clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } out_put_clk_mac: clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); out: free_netdev(dev); return ret; } /* * exit func, stops hardware and unregisters netdevice */ static int bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; /* stop netdevice */ dev = platform_get_drvdata(pdev); priv = netdev_priv(dev); unregister_netdev(dev); /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->has_phy) { mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); } else { struct bcm63xx_enet_platform_data *pd; pd = dev_get_platdata(&pdev->dev); if (pd && pd->mii_config) pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii); } /* disable hw block clocks */ if (priv->phy_clk) { clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); free_netdev(dev); return 0; } struct platform_driver bcm63xx_enet_driver = { .probe = bcm_enet_probe, .remove = bcm_enet_remove, .driver = { .name = "bcm63xx_enet", .owner = THIS_MODULE, }, }; /* * switch mii access callbacks */ static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, int ext, int phy_id, int location) { u32 reg; int ret; spin_lock_bh(&priv->enetsw_mdio_lock); enetsw_writel(priv, 0, ENETSW_MDIOC_REG); reg = ENETSW_MDIOC_RD_MASK | (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | (location << ENETSW_MDIOC_REG_SHIFT); if (ext) reg |= ENETSW_MDIOC_EXT_MASK; enetsw_writel(priv, reg, ENETSW_MDIOC_REG); udelay(50); ret = enetsw_readw(priv, ENETSW_MDIOD_REG); spin_unlock_bh(&priv->enetsw_mdio_lock); return ret; } static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, int ext, int phy_id, int location, uint16_t data) { u32 reg; spin_lock_bh(&priv->enetsw_mdio_lock); enetsw_writel(priv, 0, ENETSW_MDIOC_REG); reg = ENETSW_MDIOC_WR_MASK | (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | (location << ENETSW_MDIOC_REG_SHIFT); if (ext) reg |= ENETSW_MDIOC_EXT_MASK; reg |= data; enetsw_writel(priv, reg, ENETSW_MDIOC_REG); udelay(50); spin_unlock_bh(&priv->enetsw_mdio_lock); } static inline int bcm_enet_port_is_rgmii(int portid) { return portid >= ENETSW_RGMII_PORT0; } /* * enet sw PHY polling */ static void swphy_poll_timer(unsigned long data) { struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; unsigned int i; for (i = 0; i < priv->num_ports; i++) { struct bcm63xx_enetsw_port *port; int val, j, up, advertise, lpa, lpa2, speed, duplex, media; int external_phy = bcm_enet_port_is_rgmii(i); u8 override; port = &priv->used_ports[i]; if (!port->used) continue; if (port->bypass_link) continue; /* dummy read to clear */ for (j = 0; j < 2; j++) val = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_BMSR); if (val == 0xffff) continue; up = (val & BMSR_LSTATUS) ? 1 : 0; if (!(up ^ priv->sw_port_link[i])) continue; priv->sw_port_link[i] = up; /* link changed */ if (!up) { dev_info(&priv->pdev->dev, "link DOWN on %s\n", port->name); enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, ENETSW_PORTOV_REG(i)); enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | ENETSW_PTCTRL_TXDIS_MASK, ENETSW_PTCTRL_REG(i)); continue; } advertise = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_ADVERTISE); lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_LPA); lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) speed = 1000; else { if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) speed = 100; else speed = 10; } dev_info(&priv->pdev->dev, "link UP on %s, %dMbps, %s-duplex\n", port->name, speed, duplex ? "full" : "half"); override = ENETSW_PORTOV_ENABLE_MASK | ENETSW_PORTOV_LINKUP_MASK; if (speed == 1000) override |= ENETSW_IMPOV_1000_MASK; else if (speed == 100) override |= ENETSW_IMPOV_100_MASK; if (duplex) override |= ENETSW_IMPOV_FDX_MASK; enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); } priv->swphy_poll.expires = jiffies + HZ; add_timer(&priv->swphy_poll); } /* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enetsw_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i, ret; unsigned int size; void *p; u32 val; priv = netdev_priv(dev); kdev = &priv->pdev->dev; /* mask all interrupts and request them */ enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, dev->name, dev); if (ret) goto out_freeirq; if (priv->irq_tx != -1) { ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 0, dev->name, dev); if (ret) goto out_freeirq_rx; } /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* disable all ports */ for (i = 0; i < priv->num_ports; i++) { enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, ENETSW_PORTOV_REG(i)); enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | ENETSW_PTCTRL_TXDIS_MASK, ENETSW_PTCTRL_REG(i)); priv->sw_port_link[i] = 0; } /* reset mib */ val = enetsw_readb(priv, ENETSW_GMCR_REG); val |= ENETSW_GMCR_RST_MIB_MASK; enetsw_writeb(priv, val, ENETSW_GMCR_REG); mdelay(1); val &= ~ENETSW_GMCR_RST_MIB_MASK; enetsw_writeb(priv, val, ENETSW_GMCR_REG); mdelay(1); /* force CPU port state */ val = enetsw_readb(priv, ENETSW_IMPOV_REG); val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; enetsw_writeb(priv, val, ENETSW_IMPOV_REG); /* enable switch forward engine */ val = enetsw_readb(priv, ENETSW_SWMODE_REG); val |= ENETSW_SWMODE_FWD_EN_MASK; enetsw_writeb(priv, val, ENETSW_SWMODE_REG); /* enable jumbo on all ports */ enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); /* initialize flow control buffer allocation */ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ enet_dmas_writel(priv, priv->rx_desc_dma, ENETDMAS_RSTART_REG, priv->rx_chan); enet_dmas_writel(priv, priv->tx_desc_dma, ENETDMAS_RSTART_REG, priv->tx_chan); /* clear remaining state ram for rx & tx channel */ enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); /* set dma maximum burst len */ enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST, priv->rx_chan); enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST, priv->tx_chan); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, ENETDMAC_CHANCFG, priv->rx_chan); /* watch "packet transferred" interrupt in rx and tx */ enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, ENETDMAC_IR, priv->rx_chan); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, ENETDMAC_IRMASK, priv->tx_chan); netif_carrier_on(dev); netif_start_queue(dev); /* apply override config for bypass_link ports here. */ for (i = 0; i < priv->num_ports; i++) { struct bcm63xx_enetsw_port *port; u8 override; port = &priv->used_ports[i]; if (!port->used) continue; if (!port->bypass_link) continue; override = ENETSW_PORTOV_ENABLE_MASK | ENETSW_PORTOV_LINKUP_MASK; switch (port->force_speed) { case 1000: override |= ENETSW_IMPOV_1000_MASK; break; case 100: override |= ENETSW_IMPOV_100_MASK; break; case 10: break; default: pr_warn("invalid forced speed on port %s: assume 10\n", port->name); break; } if (port->force_duplex_full) override |= ENETSW_IMPOV_FDX_MASK; enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); } /* start phy polling timer */ init_timer(&priv->swphy_poll); priv->swphy_poll.function = swphy_poll_timer; priv->swphy_poll.data = (unsigned long)priv; priv->swphy_poll.expires = jiffies; add_timer(&priv->swphy_poll); return 0; out: for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } kfree(priv->rx_skb); out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: if (priv->irq_tx != -1) free_irq(priv->irq_tx, dev); out_freeirq_rx: free_irq(priv->irq_rx, dev); out_freeirq: return ret; } /* stop callback */ static int bcm_enetsw_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; del_timer_sync(&priv->swphy_poll); netif_stop_queue(dev); napi_disable(&priv->napi); del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); bcm_enet_disable_dma(priv, priv->rx_chan); /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); /* free the rx skb ring */ for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } /* free remaining allocated memory */ kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); if (priv->irq_tx != -1) free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); return 0; } /* try to sort out phy external status by walking the used_port field * in the bcm_enet_priv structure. in case the phy address is not * assigned to any physical port on the switch, assume it is external * (and yell at the user). */ static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) { int i; for (i = 0; i < priv->num_ports; ++i) { if (!priv->used_ports[i].used) continue; if (priv->used_ports[i].phy_id == phy_id) return bcm_enet_port_is_rgmii(i); } printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", phy_id); return 1; } /* can't use bcmenet_sw_mdio_read directly as we need to sort out * external/internal status of the given phy_id first. */ static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, int location) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); return bcmenet_sw_mdio_read(priv, bcm_enetsw_phy_is_external(priv, phy_id), phy_id, location); } /* can't use bcmenet_sw_mdio_write directly as we need to sort out * external/internal status of the given phy_id first. */ static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), phy_id, location, val); } static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mii_if_info mii; mii.dev = dev; mii.mdio_read = bcm_enetsw_mii_mdio_read; mii.mdio_write = bcm_enetsw_mii_mdio_write; mii.phy_id = 0; mii.phy_id_mask = 0x3f; mii.reg_num_mask = 0x1f; return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); } static const struct net_device_ops bcm_enetsw_ops = { .ndo_open = bcm_enetsw_open, .ndo_stop = bcm_enetsw_stop, .ndo_start_xmit = bcm_enet_start_xmit, .ndo_change_mtu = bcm_enet_change_mtu, .ndo_do_ioctl = bcm_enetsw_ioctl, }; static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { { "rx_packets", DEV_STAT(rx_packets), -1 }, { "tx_packets", DEV_STAT(tx_packets), -1 }, { "rx_bytes", DEV_STAT(rx_bytes), -1 }, { "tx_bytes", DEV_STAT(tx_bytes), -1 }, { "rx_errors", DEV_STAT(rx_errors), -1 }, { "tx_errors", DEV_STAT(tx_errors), -1 }, { "rx_dropped", DEV_STAT(rx_dropped), -1 }, { "tx_dropped", DEV_STAT(tx_dropped), -1 }, { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), ETHSW_MIB_RX_1024_1522 }, { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), ETHSW_MIB_RX_1523_2047 }, { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), ETHSW_MIB_RX_2048_4095 }, { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), ETHSW_MIB_RX_4096_8191 }, { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), ETHSW_MIB_RX_8192_9728 }, { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, }; #define BCM_ENETSW_STATS_LEN \ (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) static void bcm_enetsw_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { memcpy(data + i * ETH_GSTRING_LEN, bcm_enetsw_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); } break; } } static int bcm_enetsw_get_sset_count(struct net_device *netdev, int string_set) { switch (string_set) { case ETH_SS_STATS: return BCM_ENETSW_STATS_LEN; default: return -EINVAL; } } static void bcm_enetsw_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, bcm_enet_driver_name, 32); strncpy(drvinfo->version, bcm_enet_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, "bcm63xx", 32); drvinfo->n_stats = BCM_ENETSW_STATS_LEN; } static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct bcm_enet_priv *priv; int i; priv = netdev_priv(netdev); for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { const struct bcm_enet_stats *s; u32 lo, hi; char *p; int reg; s = &bcm_enetsw_gstrings_stats[i]; reg = s->mib_reg; if (reg == -1) continue; lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); p = (char *)priv + s->stat_offset; if (s->sizeof_stat == sizeof(u64)) { hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); *(u64 *)p = ((u64)hi << 32 | lo); } else { *(u32 *)p = lo; } } for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { const struct bcm_enet_stats *s; char *p; s = &bcm_enetsw_gstrings_stats[i]; if (s->mib_reg == -1) p = (char *)&netdev->stats + s->stat_offset; else p = (char *)priv + s->stat_offset; data[i] = (s->sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } static void bcm_enetsw_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; priv = netdev_priv(dev); /* rx/tx ring is actually only limited by memory */ ering->rx_max_pending = 8192; ering->tx_max_pending = 8192; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; ering->rx_pending = priv->rx_ring_size; ering->tx_pending = priv->tx_ring_size; } static int bcm_enetsw_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bcm_enet_priv *priv; int was_running; priv = netdev_priv(dev); was_running = 0; if (netif_running(dev)) { bcm_enetsw_stop(dev); was_running = 1; } priv->rx_ring_size = ering->rx_pending; priv->tx_ring_size = ering->tx_pending; if (was_running) { int err; err = bcm_enetsw_open(dev); if (err) dev_close(dev); } return 0; } static struct ethtool_ops bcm_enetsw_ethtool_ops = { .get_strings = bcm_enetsw_get_strings, .get_sset_count = bcm_enetsw_get_sset_count, .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, .get_drvinfo = bcm_enetsw_get_drvinfo, .get_ringparam = bcm_enetsw_get_ringparam, .set_ringparam = bcm_enetsw_set_ringparam, }; /* allocate netdevice, request register memory and register device. */ static int bcm_enetsw_probe(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enetsw_platform_data *pd; struct resource *res_mem; int ret, irq_rx, irq_tx; /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ if (!bcm_enet_shared_base[0]) return -ENODEV; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq_rx = platform_get_irq(pdev, 0); irq_tx = platform_get_irq(pdev, 1); if (!res_mem || irq_rx < 0) return -ENODEV; ret = 0; dev = alloc_etherdev(sizeof(*priv)); if (!dev) return -ENOMEM; priv = netdev_priv(dev); memset(priv, 0, sizeof(*priv)); /* initialize default and fetch platform data */ priv->enet_is_sw = true; priv->irq_rx = irq_rx; priv->irq_tx = irq_tx; priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(priv->used_ports, pd->used_ports, sizeof(pd->used_ports)); priv->num_ports = pd->num_ports; priv->dma_has_sram = pd->dma_has_sram; priv->dma_chan_en_mask = pd->dma_chan_en_mask; priv->dma_chan_int_mask = pd->dma_chan_int_mask; priv->dma_chan_width = pd->dma_chan_width; } ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; if (!request_mem_region(res_mem->start, resource_size(res_mem), "bcm63xx_enetsw")) { ret = -EBUSY; goto out; } priv->base = ioremap(res_mem->start, resource_size(res_mem)); if (priv->base == NULL) { ret = -ENOMEM; goto out_release_mem; } priv->mac_clk = clk_get(&pdev->dev, "enetsw"); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out_unmap; } clk_enable(priv->mac_clk); priv->rx_chan = 0; priv->tx_chan = 1; spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ init_timer(&priv->rx_timeout); priv->rx_timeout.function = bcm_enet_refill_rx_timer; priv->rx_timeout.data = (unsigned long)dev; /* register netdevice */ dev->netdev_ops = &bcm_enetsw_ops; netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); dev->ethtool_ops = &bcm_enetsw_ethtool_ops; SET_NETDEV_DEV(dev, &pdev->dev); spin_lock_init(&priv->enetsw_mdio_lock); ret = register_netdev(dev); if (ret) goto out_put_clk; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); priv->pdev = pdev; priv->net_dev = dev; return 0; out_put_clk: clk_put(priv->mac_clk); out_unmap: iounmap(priv->base); out_release_mem: release_mem_region(res_mem->start, resource_size(res_mem)); out: free_netdev(dev); return ret; } /* exit func, stops hardware and unregisters netdevice */ static int bcm_enetsw_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); priv = netdev_priv(dev); unregister_netdev(dev); /* release device resources */ iounmap(priv->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); free_netdev(dev); return 0; } struct platform_driver bcm63xx_enetsw_driver = { .probe = bcm_enetsw_probe, .remove = bcm_enetsw_remove, .driver = { .name = "bcm63xx_enetsw", .owner = THIS_MODULE, }, }; /* reserve & remap memory space shared between all macs */ static int bcm_enet_shared_probe(struct platform_device *pdev) { struct resource *res; void __iomem *p[3]; unsigned int i; memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); for (i = 0; i < 3; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); p[i] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(p[i])) return PTR_ERR(p[i]); } memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); return 0; } static int bcm_enet_shared_remove(struct platform_device *pdev) { return 0; } /* this "shared" driver is needed because both macs share a single * address space */ struct platform_driver bcm63xx_enet_shared_driver = { .probe = bcm_enet_shared_probe, .remove = bcm_enet_shared_remove, .driver = { .name = "bcm63xx_enet_shared", .owner = THIS_MODULE, }, }; /* entry point */ static int __init bcm_enet_init(void) { int ret; ret = platform_driver_register(&bcm63xx_enet_shared_driver); if (ret) return ret; ret = platform_driver_register(&bcm63xx_enet_driver); if (ret) platform_driver_unregister(&bcm63xx_enet_shared_driver); ret = platform_driver_register(&bcm63xx_enetsw_driver); if (ret) { platform_driver_unregister(&bcm63xx_enet_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } return ret; } static void __exit bcm_enet_exit(void) { platform_driver_unregister(&bcm63xx_enet_driver); platform_driver_unregister(&bcm63xx_enetsw_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } module_init(bcm_enet_init); module_exit(bcm_enet_exit); MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_LICENSE("GPL");
gpl-2.0
chhapil/Kernel-Lenovo-A6000-KK
drivers/base/power/runtime.c
1716
39602
/* * drivers/base/power/runtime.c - Helper functions for device runtime PM * * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> * * This file is released under the GPLv2. */ #include <linux/sched.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <trace/events/rpm.h> #include "power.h" static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); /** * update_pm_runtime_accounting - Update the time accounting of power states * @dev: Device to update the accounting for * * In order to be able to have time accounting of the various power states * (as used by programs such as PowerTOP to show the effectiveness of runtime * PM), we need to track the time spent in each state. * update_pm_runtime_accounting must be called each time before the * runtime_status field is updated, to account the time in the old state * correctly. */ void update_pm_runtime_accounting(struct device *dev) { unsigned long now = jiffies; unsigned long delta; delta = now - dev->power.accounting_timestamp; dev->power.accounting_timestamp = now; if (dev->power.disable_depth > 0) return; if (dev->power.runtime_status == RPM_SUSPENDED) dev->power.suspended_jiffies += delta; else dev->power.active_jiffies += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) { update_pm_runtime_accounting(dev); dev->power.runtime_status = status; } /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. */ static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { del_timer(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } /** * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. * @dev: Device to handle. */ static void pm_runtime_cancel_pending(struct device *dev) { pm_runtime_deactivate_timer(dev); /* * In case there's a request pending, make sure its work function will * return without doing anything. */ dev->power.request = RPM_REQ_NONE; } /* * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. * @dev: Device to handle. * * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. * Otherwise return the expiration time in jiffies (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; long elapsed; unsigned long last_busy; unsigned long expires = 0; if (!dev->power.use_autosuspend) goto out; autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) goto out; last_busy = ACCESS_ONCE(dev->power.last_busy); elapsed = jiffies - last_busy; if (elapsed < 0) goto out; /* jiffies has wrapped around. */ /* * If the autosuspend_delay is >= 1 second, align the timer by rounding * up to the nearest second. */ expires = last_busy + msecs_to_jiffies(autosuspend_delay); if (autosuspend_delay >= 1000) expires = round_jiffies(expires); expires += !expires; if (elapsed >= expires - last_busy) expires = 0; /* Already expired. */ out: return expires; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); static int dev_memalloc_noio(struct device *dev, void *data) { return dev->power.memalloc_noio; } /* * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. * @dev: Device to handle. * @enable: True for setting the flag and False for clearing the flag. * * Set the flag for all devices in the path from the device to the * root device in the device tree if @enable is true, otherwise clear * the flag for devices in the path whose siblings don't set the flag. * * The function should only be called by block device, or network * device driver for solving the deadlock problem during runtime * resume/suspend: * * If memory allocation with GFP_KERNEL is called inside runtime * resume/suspend callback of any one of its ancestors(or the * block device itself), the deadlock may be triggered inside the * memory allocation since it might not complete until the block * device becomes active and the involed page I/O finishes. The * situation is pointed out first by Alan Stern. Network device * are involved in iSCSI kind of situation. * * The lock of dev_hotplug_mutex is held in the function for handling * hotplug race because pm_runtime_set_memalloc_noio() may be called * in async probe(). * * The function should be called between device_add() and device_del() * on the affected device(block/network device). */ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) { static DEFINE_MUTEX(dev_hotplug_mutex); mutex_lock(&dev_hotplug_mutex); for (;;) { bool enabled; /* hold power lock since bitfield is not SMP-safe. */ spin_lock_irq(&dev->power.lock); enabled = dev->power.memalloc_noio; dev->power.memalloc_noio = enable; spin_unlock_irq(&dev->power.lock); /* * not need to enable ancestors any more if the device * has been enabled. */ if (enabled && enable) break; dev = dev->parent; /* * clear flag of the parent device only if all the * children don't set the flag because ancestor's * flag was set by any one of the descendants. */ if (!dev || (!enable && device_for_each_child(dev, NULL, dev_memalloc_noio))) break; } mutex_unlock(&dev_hotplug_mutex); } EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); /** * rpm_check_suspend_allowed - Test whether a device may be suspended. * @dev: Device to test. */ static int rpm_check_suspend_allowed(struct device *dev) { int retval = 0; if (dev->power.runtime_error) retval = -EINVAL; else if (dev->power.disable_depth > 0) retval = -EACCES; else if (atomic_read(&dev->power.usage_count) > 0) retval = -EAGAIN; else if (!pm_children_suspended(dev)) retval = -EBUSY; /* Pending resume requests take precedence over suspends. */ else if ((dev->power.deferred_resume && dev->power.runtime_status == RPM_SUSPENDING) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; else if (__dev_pm_qos_read_value(dev) < 0) retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; return retval; } /** * __rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. * @dev: Device to run the callback for. */ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int retval; if (dev->power.irq_safe) spin_unlock(&dev->power.lock); else spin_unlock_irq(&dev->power.lock); retval = cb(dev); if (dev->power.irq_safe) spin_lock(&dev->power.lock); else spin_lock_irq(&dev->power.lock); return retval; } /** * rpm_idle - Notify device bus type if the device can be suspended. * @dev: Device to notify the bus type about. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be suspended. If * another idle notification has been started earlier, return immediately. If * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise * run the ->runtime_idle() callback directly. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_idle(struct device *dev, int rpmflags) { int (*callback)(struct device *); int retval; trace_rpm_idle(dev, rpmflags); retval = rpm_check_suspend_allowed(dev); if (retval < 0) ; /* Conditions are wrong. */ /* Idle notifications are allowed only in the RPM_ACTIVE state. */ else if (dev->power.runtime_status != RPM_ACTIVE) retval = -EAGAIN; /* * Any pending request other than an idle notification takes * precedence over us, except that the timer may be running. */ else if (dev->power.request_pending && dev->power.request > RPM_REQ_IDLE) retval = -EAGAIN; /* Act as though RPM_NOWAIT is always set. */ else if (dev->power.idle_notification) retval = -EINPROGRESS; if (retval) goto out; /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; if (dev->power.no_callbacks) { /* Assume ->runtime_idle() callback would have suspended. */ retval = rpm_suspend(dev, rpmflags); goto out; } /* Carry out an asynchronous or a synchronous idle notification. */ if (rpmflags & RPM_ASYNC) { dev->power.request = RPM_REQ_IDLE; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } goto out; } dev->power.idle_notification = true; if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_idle; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_idle; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_idle; else if (dev->bus && dev->bus->pm) callback = dev->bus->pm->runtime_idle; else callback = NULL; if (!callback && dev->driver && dev->driver->pm) callback = dev->driver->pm->runtime_idle; if (callback) __rpm_callback(callback, dev); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); out: trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } /** * rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. * @dev: Device to run the callback for. */ static int rpm_callback(int (*cb)(struct device *), struct device *dev) { int retval; if (!cb) return -ENOSYS; if (dev->power.memalloc_noio) { unsigned int noio_flag; /* * Deadlock might be caused if memory allocation with * GFP_KERNEL happens inside runtime_suspend and * runtime_resume callbacks of one block device's * ancestor or the block device itself. Network * device might be thought as part of iSCSI block * device, so network device and its ancestor should * be marked as memalloc_noio too. */ noio_flag = memalloc_noio_save(); retval = __rpm_callback(cb, dev); memalloc_noio_restore(noio_flag); } else { retval = __rpm_callback(cb, dev); } dev->power.runtime_error = retval; return retval != -EACCES ? retval : -EIO; } /** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be suspended. * Cancel a pending idle notification, autosuspend or suspend. If * another suspend has been started earlier, either return immediately * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC * flags. If the RPM_ASYNC flag is set then queue a suspend request; * otherwise run the ->runtime_suspend() callback directly. When * ->runtime_suspend succeeded, if a deferred resume was requested while * the callback was running then carry it out, otherwise send an idle * notification for its parent (if the suspend succeeded and both * ignore_children of parent->power and irq_safe of dev->power are not set). * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO * flag is set and the next autosuspend-delay expiration time is in the * future, schedule another autosuspend attempt. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_suspend(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; int retval; trace_rpm_suspend(dev, rpmflags); repeat: retval = rpm_check_suspend_allowed(dev); if (retval < 0) ; /* Conditions are wrong. */ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ else if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) retval = -EAGAIN; if (retval) goto out; /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { unsigned long expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; /* * Optimization: If the timer is already running and is * set to expire at or before the autosuspend delay, * avoid the overhead of resetting it. Just let it * expire; pm_suspend_timer_fn() will take care of the * rest. */ if (!(dev->power.timer_expires && time_before_eq( dev->power.timer_expires, expires))) { dev->power.timer_expires = expires; mod_timer(&dev->power.suspend_timer, expires); } dev->power.timer_autosuspends = 1; goto out; } } /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); if (dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { retval = -EINPROGRESS; goto out; } if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); cpu_relax(); spin_lock(&dev->power.lock); goto repeat; } /* Wait for the other suspend running in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_SUSPENDING) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); goto repeat; } if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ /* Carry out an asynchronous or a synchronous suspend. */ if (rpmflags & RPM_ASYNC) { dev->power.request = (rpmflags & RPM_AUTO) ? RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } goto out; } __update_runtime_status(dev, RPM_SUSPENDING); if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_suspend; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_suspend; else if (dev->bus && dev->bus->pm) callback = dev->bus->pm->runtime_suspend; else callback = NULL; if (!callback && dev->driver && dev->driver->pm) callback = dev->driver->pm->runtime_suspend; retval = rpm_callback(callback, dev); if (retval) goto fail; no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); if (dev->parent) { parent = dev->parent; atomic_add_unless(&parent->power.child_count, -1, 0); } wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; } /* Maybe the parent is now able to suspend. */ if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { spin_unlock(&dev->power.lock); spin_lock(&parent->power.lock); rpm_idle(parent, RPM_ASYNC); spin_unlock(&parent->power.lock); spin_lock(&dev->power.lock); } out: trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; fail: __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); if (retval == -EAGAIN || retval == -EBUSY) { dev->power.runtime_error = 0; /* * If the callback routine failed an autosuspend, and * if the last_busy time has been updated so that there * is a new autosuspend expiration time, automatically * reschedule another autosuspend. */ if ((rpmflags & RPM_AUTO) && pm_runtime_autosuspend_expiration(dev) != 0) goto repeat; } else { pm_runtime_cancel_pending(dev); } goto out; } /** * rpm_resume - Carry out runtime resume of given device. * @dev: Device to resume. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be resumed. Cancel * any scheduled or pending requests. If another resume has been started * earlier, either return immediately or wait for it to finish, depending on the * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in * parallel with this function, either tell the other process to resume after * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC * flag is set then queue a resume request; otherwise run the * ->runtime_resume() callback directly. Queue an idle notification for the * device if the resume succeeded. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_resume(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; int retval = 0; trace_rpm_resume(dev, rpmflags); repeat: if (dev->power.runtime_error) retval = -EINVAL; else if (dev->power.disable_depth == 1 && dev->power.is_suspended && dev->power.runtime_status == RPM_ACTIVE) retval = 1; else if (dev->power.disable_depth > 0) retval = -EACCES; if (retval) goto out; /* * Other scheduled or pending requests need to be canceled. Small * optimization: If an autosuspend timer is running, leave it running * rather than cancelling it now only to restart it again in the near * future. */ dev->power.request = RPM_REQ_NONE; if (!dev->power.timer_autosuspends) pm_runtime_deactivate_timer(dev); if (dev->power.runtime_status == RPM_ACTIVE) { retval = 1; goto out; } if (dev->power.runtime_status == RPM_RESUMING || dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { if (dev->power.runtime_status == RPM_SUSPENDING) dev->power.deferred_resume = true; else retval = -EINPROGRESS; goto out; } if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); cpu_relax(); spin_lock(&dev->power.lock); goto repeat; } /* Wait for the operation carried out in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_RESUMING && dev->power.runtime_status != RPM_SUSPENDING) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); goto repeat; } /* * See if we can skip waking up the parent. This is safe only if * power.no_callbacks is set, because otherwise we don't know whether * the resume will actually succeed. */ if (dev->power.no_callbacks && !parent && dev->parent) { spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); if (dev->parent->power.disable_depth > 0 || dev->parent->power.ignore_children || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); } /* Carry out an asynchronous or a synchronous resume. */ if (rpmflags & RPM_ASYNC) { dev->power.request = RPM_REQ_RESUME; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } retval = 0; goto out; } if (!parent && dev->parent) { /* * Increment the parent's usage counter and resume it if * necessary. Not needed if dev is irq-safe; then the * parent is permanently resumed. */ parent = dev->parent; if (dev->power.irq_safe) goto skip_parent; spin_unlock(&dev->power.lock); pm_runtime_get_noresume(parent); spin_lock(&parent->power.lock); /* * We can resume if the parent's runtime PM is disabled or it * is set to ignore children. */ if (!parent->power.disable_depth && !parent->power.ignore_children) { rpm_resume(parent, 0); if (parent->power.runtime_status != RPM_ACTIVE) retval = -EBUSY; } spin_unlock(&parent->power.lock); spin_lock(&dev->power.lock); if (retval) goto out; goto repeat; } skip_parent: if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ __update_runtime_status(dev, RPM_RESUMING); if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_resume; else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_resume; else if (dev->bus && dev->bus->pm) callback = dev->bus->pm->runtime_resume; else callback = NULL; if (!callback && dev->driver && dev->driver->pm) callback = dev->driver->pm->runtime_resume; retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); } else { no_callback: __update_runtime_status(dev, RPM_ACTIVE); if (parent) atomic_inc(&parent->power.child_count); } wake_up_all(&dev->power.wait_queue); if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: if (parent && !dev->power.irq_safe) { spin_unlock_irq(&dev->power.lock); pm_runtime_put(parent); spin_lock_irq(&dev->power.lock); } trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } /** * pm_runtime_work - Universal runtime PM work function. * @work: Work structure used for scheduling the execution of this function. * * Use @work to get the device object the work is to be done for, determine what * is to be done and execute the appropriate runtime PM function. */ static void pm_runtime_work(struct work_struct *work) { struct device *dev = container_of(work, struct device, power.work); enum rpm_request req; spin_lock_irq(&dev->power.lock); if (!dev->power.request_pending) goto out; req = dev->power.request; dev->power.request = RPM_REQ_NONE; dev->power.request_pending = false; switch (req) { case RPM_REQ_NONE: break; case RPM_REQ_IDLE: rpm_idle(dev, RPM_NOWAIT); break; case RPM_REQ_SUSPEND: rpm_suspend(dev, RPM_NOWAIT); break; case RPM_REQ_AUTOSUSPEND: rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); break; case RPM_REQ_RESUME: rpm_resume(dev, RPM_NOWAIT); break; } out: spin_unlock_irq(&dev->power.lock); } /** * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). * @data: Device pointer passed by pm_schedule_suspend(). * * Check if the time is right and queue a suspend request. */ static void pm_suspend_timer_fn(unsigned long data) { struct device *dev = (struct device *)data; unsigned long flags; unsigned long expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; /* If 'expire' is after 'jiffies' we've been called too early. */ if (expires > 0 && !time_after(expires, jiffies)) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); } /** * pm_schedule_suspend - Set up a timer to submit a suspend request in future. * @dev: Device to suspend. * @delay: Time to wait before submitting a suspend request, in milliseconds. */ int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; int retval; spin_lock_irqsave(&dev->power.lock, flags); if (!delay) { retval = rpm_suspend(dev, RPM_ASYNC); goto out; } retval = rpm_check_suspend_allowed(dev); if (retval) goto out; /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); dev->power.timer_expires += !dev->power.timer_expires; dev->power.timer_autosuspends = 0; mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); out: spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(pm_schedule_suspend); /** * __pm_runtime_idle - Entry point for runtime idle operations. * @dev: Device to send idle notification for. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and * return immediately if it is larger than zero. Then carry out an idle * notification, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_idle(struct device *dev, int rpmflags) { unsigned long flags; int retval; might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_idle); /** * __pm_runtime_suspend - Entry point for runtime put/suspend operations. * @dev: Device to suspend. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and * return immediately if it is larger than zero. Then carry out a suspend, * either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_suspend(struct device *dev, int rpmflags) { unsigned long flags; int retval; might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_suspend); /** * __pm_runtime_resume - Entry point for runtime resume operations. * @dev: Device to resume. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, increment the device's usage count. Then * carry out a resume, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_resume(struct device *dev, int rpmflags) { unsigned long flags; int retval; might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_resume(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. * @status: New runtime PM status of the device. * * If runtime PM of the device is disabled or its power.runtime_error field is * different from zero, the status may be changed either to RPM_ACTIVE, or to * RPM_SUSPENDED, as long as that reflects the actual state of the device. * However, if the device has a parent and the parent is not active, and the * parent's power.ignore_children flag is unset, the device's status cannot be * set to RPM_ACTIVE, so -EBUSY is returned in that case. * * If successful, __pm_runtime_set_status() clears the power.runtime_error field * and the device parent's counter of unsuspended children is modified to * reflect the new status. If the new status is RPM_SUSPENDED, an idle * notification request for the parent is submitted. */ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; unsigned long flags; bool notify_parent = false; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; spin_lock_irqsave(&dev->power.lock, flags); if (!dev->power.runtime_error && !dev->power.disable_depth) { error = -EAGAIN; goto out; } if (dev->power.runtime_status == status) goto out_set; if (status == RPM_SUSPENDED) { /* It always is possible to set the status to 'suspended'. */ if (parent) { atomic_add_unless(&parent->power.child_count, -1, 0); notify_parent = !parent->power.ignore_children; } goto out_set; } if (parent) { spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); /* * It is invalid to put an active child under a parent that is * not active, has runtime PM enabled and the * 'power.ignore_children' flag unset. */ if (!parent->power.disable_depth && !parent->power.ignore_children && parent->power.runtime_status != RPM_ACTIVE) error = -EBUSY; else if (dev->power.runtime_status == RPM_SUSPENDED) atomic_inc(&parent->power.child_count); spin_unlock(&parent->power.lock); if (error) goto out; } out_set: __update_runtime_status(dev, status); dev->power.runtime_error = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); if (notify_parent) pm_request_idle(parent); return error; } EXPORT_SYMBOL_GPL(__pm_runtime_set_status); /** * __pm_runtime_barrier - Cancel pending requests and wait for completions. * @dev: Device to handle. * * Flush all pending requests for the device from pm_wq and wait for all * runtime PM operations involving the device in progress to complete. * * Should be called under dev->power.lock with interrupts disabled. */ static void __pm_runtime_barrier(struct device *dev) { pm_runtime_deactivate_timer(dev); if (dev->power.request_pending) { dev->power.request = RPM_REQ_NONE; spin_unlock_irq(&dev->power.lock); cancel_work_sync(&dev->power.work); spin_lock_irq(&dev->power.lock); dev->power.request_pending = false; } if (dev->power.runtime_status == RPM_SUSPENDING || dev->power.runtime_status == RPM_RESUMING || dev->power.idle_notification) { DEFINE_WAIT(wait); /* Suspend, wake-up or idle notification in progress. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_SUSPENDING && dev->power.runtime_status != RPM_RESUMING && !dev->power.idle_notification) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); } } /** * pm_runtime_barrier - Flush pending requests and wait for completions. * @dev: Device to handle. * * Prevent the device from being suspended by incrementing its usage counter and * if there's a pending resume request for the device, wake the device up. * Next, make sure that all pending requests for the device have been flushed * from pm_wq and wait for all runtime PM operations involving the device in * progress to complete. * * Return value: * 1, if there was a resume request pending and the device had to be woken up, * 0, otherwise */ int pm_runtime_barrier(struct device *dev) { int retval = 0; pm_runtime_get_noresume(dev); spin_lock_irq(&dev->power.lock); if (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME) { rpm_resume(dev, 0); retval = 1; } __pm_runtime_barrier(dev); spin_unlock_irq(&dev->power.lock); pm_runtime_put_noidle(dev); return retval; } EXPORT_SYMBOL_GPL(pm_runtime_barrier); /** * __pm_runtime_disable - Disable runtime PM of a device. * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * * Increment power.disable_depth for the device and if was zero previously, * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or * suspended after its runtime PM has been disabled. * * If @check_resume is set and there's a resume request pending when * __pm_runtime_disable() is called and power.disable_depth is zero, the * function will wake up the device before disabling its runtime PM. */ void __pm_runtime_disable(struct device *dev, bool check_resume) { spin_lock_irq(&dev->power.lock); if (dev->power.disable_depth > 0) { dev->power.disable_depth++; goto out; } /* * Wake up the device if there's a resume request pending, because that * means there probably is some I/O to process and disabling runtime PM * shouldn't prevent the device from processing the I/O. */ if (check_resume && dev->power.request_pending && dev->power.request == RPM_REQ_RESUME) { /* * Prevent suspends and idle notifications from being carried * out after we have woken up the device. */ pm_runtime_get_noresume(dev); rpm_resume(dev, 0); pm_runtime_put_noidle(dev); } if (!dev->power.disable_depth++) __pm_runtime_barrier(dev); out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(__pm_runtime_disable); /** * pm_runtime_enable - Enable runtime PM of a device. * @dev: Device to handle. */ void pm_runtime_enable(struct device *dev) { unsigned long flags; spin_lock_irqsave(&dev->power.lock, flags); if (dev->power.disable_depth > 0) dev->power.disable_depth--; else dev_warn(dev, "Unbalanced %s!\n", __func__); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_runtime_enable); /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * * Increase the device's usage count and clear its power.runtime_auto flag, * so that it cannot be suspended at run time until pm_runtime_allow() is called * for it. */ void pm_runtime_forbid(struct device *dev) { spin_lock_irq(&dev->power.lock); if (!dev->power.runtime_auto) goto out; dev->power.runtime_auto = false; atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_forbid); /** * pm_runtime_allow - Unblock runtime PM of a device. * @dev: Device to handle. * * Decrease the device's usage count and set its power.runtime_auto flag. */ void pm_runtime_allow(struct device *dev) { spin_lock_irq(&dev->power.lock); if (dev->power.runtime_auto) goto out; dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) rpm_idle(dev, RPM_AUTO); out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_allow); /** * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. * @dev: Device to handle. * * Set the power.no_callbacks flag, which tells the PM core that this * device is power-managed through its parent and has no runtime PM * callbacks of its own. The runtime sysfs attributes will be removed. */ void pm_runtime_no_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_callbacks = 1; spin_unlock_irq(&dev->power.lock); if (device_is_registered(dev)) rpm_sysfs_remove(dev); } EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); /** * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. * @dev: Device to handle * * Set the power.irq_safe flag, which tells the PM core that the * ->runtime_suspend() and ->runtime_resume() callbacks for this device should * always be invoked with the spinlock held and interrupts disabled. It also * causes the parent's usage counter to be permanently incremented, preventing * the parent from runtime suspending -- otherwise an irq-safe child might have * to wait for a non-irq-safe parent. */ void pm_runtime_irq_safe(struct device *dev) { if (dev->parent) pm_runtime_get_sync(dev->parent); spin_lock_irq(&dev->power.lock); dev->power.irq_safe = 1; spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); /** * update_autosuspend - Handle a change to a device's autosuspend settings. * @dev: Device to handle. * @old_delay: The former autosuspend_delay value. * @old_use: The former use_autosuspend value. * * Prevent runtime suspend if the new delay is negative and use_autosuspend is * set; otherwise allow it. Send an idle notification if suspends are allowed. * * This function must be called under dev->power.lock with interrupts disabled. */ static void update_autosuspend(struct device *dev, int old_delay, int old_use) { int delay = dev->power.autosuspend_delay; /* Should runtime suspend be prevented now? */ if (dev->power.use_autosuspend && delay < 0) { /* If it used to be allowed then prevent it. */ if (!old_use || old_delay >= 0) { atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); } } /* Runtime suspend should be allowed now. */ else { /* If it used to be prevented then allow it. */ if (old_use && old_delay < 0) atomic_dec(&dev->power.usage_count); /* Maybe we can autosuspend now. */ rpm_idle(dev, RPM_AUTO); } } /** * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. * @dev: Device to handle. * @delay: Value of the new delay in milliseconds. * * Set the device's power.autosuspend_delay value. If it changes to negative * and the power.use_autosuspend flag is set, prevent runtime suspends. If it * changes the other way, allow runtime suspends. */ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) { int old_delay, old_use; spin_lock_irq(&dev->power.lock); old_delay = dev->power.autosuspend_delay; old_use = dev->power.use_autosuspend; dev->power.autosuspend_delay = delay; update_autosuspend(dev, old_delay, old_use); spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); /** * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. * @dev: Device to handle. * @use: New value for use_autosuspend. * * Set the device's power.use_autosuspend flag, and allow or prevent runtime * suspends as needed. */ void __pm_runtime_use_autosuspend(struct device *dev, bool use) { int old_delay, old_use; spin_lock_irq(&dev->power.lock); old_delay = dev->power.autosuspend_delay; old_use = dev->power.use_autosuspend; dev->power.use_autosuspend = use; update_autosuspend(dev, old_delay, old_use); spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); /** * pm_runtime_init - Initialize runtime PM fields in given device object. * @dev: Device object to initialize. */ void pm_runtime_init(struct device *dev) { dev->power.runtime_status = RPM_SUSPENDED; dev->power.idle_notification = false; dev->power.disable_depth = 1; atomic_set(&dev->power.usage_count, 0); dev->power.runtime_error = 0; atomic_set(&dev->power.child_count, 0); pm_suspend_ignore_children(dev, false); dev->power.runtime_auto = true; dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, (unsigned long)dev); init_waitqueue_head(&dev->power.wait_queue); } /** * pm_runtime_remove - Prepare for removing a device from device hierarchy. * @dev: Device object being removed from device hierarchy. */ void pm_runtime_remove(struct device *dev) { __pm_runtime_disable(dev, false); /* Change the status back to 'suspended' to match the initial status. */ if (dev->power.runtime_status == RPM_ACTIVE) pm_runtime_set_suspended(dev); if (dev->power.irq_safe && dev->parent) pm_runtime_put(dev->parent); }
gpl-2.0
shuiqingliu/android_kernel_lenovo_stuttgart
drivers/media/video/saa7134/saa7134-tvaudio.c
3252
29143
/* * * device driver for philips saa7134 based TV cards * tv audio decoder (fm stereo, nicam, ...) * * (c) 2001-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/freezer.h> #include <asm/div64.h> #include "saa7134-reg.h" #include "saa7134.h" /* ------------------------------------------------------------------ */ static unsigned int audio_debug; module_param(audio_debug, int, 0644); MODULE_PARM_DESC(audio_debug,"enable debug messages [tv audio]"); static unsigned int audio_ddep; module_param(audio_ddep, int, 0644); MODULE_PARM_DESC(audio_ddep,"audio ddep overwrite"); static int audio_clock_override = UNSET; module_param(audio_clock_override, int, 0644); static int audio_clock_tweak; module_param(audio_clock_tweak, int, 0644); MODULE_PARM_DESC(audio_clock_tweak, "Audio clock tick fine tuning for cards with audio crystal that's slightly off (range [-1024 .. 1024])"); #define dprintk(fmt, arg...) if (audio_debug) \ printk(KERN_DEBUG "%s/audio: " fmt, dev->name , ## arg) #define d2printk(fmt, arg...) if (audio_debug > 1) \ printk(KERN_DEBUG "%s/audio: " fmt, dev->name, ## arg) #define print_regb(reg) printk("%s: reg 0x%03x [%-16s]: 0x%02x\n", \ dev->name,(SAA7134_##reg),(#reg),saa_readb((SAA7134_##reg))) /* msecs */ #define SCAN_INITIAL_DELAY 1000 #define SCAN_SAMPLE_DELAY 200 #define SCAN_SUBCARRIER_DELAY 2000 /* ------------------------------------------------------------------ */ /* saa7134 code */ static struct mainscan { char *name; v4l2_std_id std; int carr; } mainscan[] = { { .name = "MN", .std = V4L2_STD_MN, .carr = 4500, },{ .name = "BGH", .std = V4L2_STD_B | V4L2_STD_GH, .carr = 5500, },{ .name = "I", .std = V4L2_STD_PAL_I, .carr = 6000, },{ .name = "DKL", .std = V4L2_STD_DK | V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC, .carr = 6500, } }; static struct saa7134_tvaudio tvaudio[] = { { .name = "PAL-B/G FM-stereo", .std = V4L2_STD_PAL_BG, .mode = TVAUDIO_FM_BG_STEREO, .carr1 = 5500, .carr2 = 5742, },{ .name = "PAL-D/K1 FM-stereo", .std = V4L2_STD_PAL_DK, .carr1 = 6500, .carr2 = 6258, .mode = TVAUDIO_FM_BG_STEREO, },{ .name = "PAL-D/K2 FM-stereo", .std = V4L2_STD_PAL_DK, .carr1 = 6500, .carr2 = 6742, .mode = TVAUDIO_FM_BG_STEREO, },{ .name = "PAL-D/K3 FM-stereo", .std = V4L2_STD_PAL_DK, .carr1 = 6500, .carr2 = 5742, .mode = TVAUDIO_FM_BG_STEREO, },{ .name = "PAL-B/G NICAM", .std = V4L2_STD_PAL_BG, .carr1 = 5500, .carr2 = 5850, .mode = TVAUDIO_NICAM_FM, },{ .name = "PAL-I NICAM", .std = V4L2_STD_PAL_I, .carr1 = 6000, .carr2 = 6552, .mode = TVAUDIO_NICAM_FM, },{ .name = "PAL-D/K NICAM", .std = V4L2_STD_PAL_DK, .carr1 = 6500, .carr2 = 5850, .mode = TVAUDIO_NICAM_FM, },{ .name = "SECAM-L NICAM", .std = V4L2_STD_SECAM_L, .carr1 = 6500, .carr2 = 5850, .mode = TVAUDIO_NICAM_AM, },{ .name = "SECAM-D/K NICAM", .std = V4L2_STD_SECAM_DK, .carr1 = 6500, .carr2 = 5850, .mode = TVAUDIO_NICAM_FM, },{ .name = "NTSC-A2 FM-stereo", .std = V4L2_STD_NTSC, .carr1 = 4500, .carr2 = 4724, .mode = TVAUDIO_FM_K_STEREO, },{ .name = "NTSC-M", .std = V4L2_STD_NTSC, .carr1 = 4500, .carr2 = -1, .mode = TVAUDIO_FM_MONO, } }; #define TVAUDIO ARRAY_SIZE(tvaudio) /* ------------------------------------------------------------------ */ static u32 tvaudio_carr2reg(u32 carrier) { u64 a = carrier; a <<= 24; do_div(a,12288); return a; } static void tvaudio_setcarrier(struct saa7134_dev *dev, int primary, int secondary) { if (-1 == secondary) secondary = primary; saa_writel(SAA7134_CARRIER1_FREQ0 >> 2, tvaudio_carr2reg(primary)); saa_writel(SAA7134_CARRIER2_FREQ0 >> 2, tvaudio_carr2reg(secondary)); } #define SAA7134_MUTE_MASK 0xbb #define SAA7134_MUTE_ANALOG 0x04 #define SAA7134_MUTE_I2S 0x40 static void mute_input_7134(struct saa7134_dev *dev) { unsigned int mute; struct saa7134_input *in; int ausel=0, ics=0, ocs=0; int mask; /* look what is to do ... */ in = dev->input; mute = (dev->ctl_mute || (dev->automute && (&card(dev).radio) != in)); if (card(dev).mute.name) { /* * 7130 - we'll mute using some unconnected audio input * 7134 - we'll probably should switch external mux with gpio */ if (mute) in = &card(dev).mute; } if (dev->hw_mute == mute && dev->hw_input == in && !dev->insuspend) { dprintk("mute/input: nothing to do [mute=%d,input=%s]\n", mute,in->name); return; } dprintk("ctl_mute=%d automute=%d input=%s => mute=%d input=%s\n", dev->ctl_mute,dev->automute,dev->input->name,mute,in->name); dev->hw_mute = mute; dev->hw_input = in; if (PCI_DEVICE_ID_PHILIPS_SAA7134 == dev->pci->device) /* 7134 mute */ saa_writeb(SAA7134_AUDIO_MUTE_CTRL, mute ? SAA7134_MUTE_MASK | SAA7134_MUTE_ANALOG | SAA7134_MUTE_I2S : SAA7134_MUTE_MASK); /* switch internal audio mux */ switch (in->amux) { case TV: ausel=0xc0; ics=0x00; ocs=0x02; break; case LINE1: ausel=0x80; ics=0x00; ocs=0x00; break; case LINE2: ausel=0x80; ics=0x08; ocs=0x01; break; case LINE2_LEFT: ausel=0x80; ics=0x08; ocs=0x05; break; } saa_andorb(SAA7134_AUDIO_FORMAT_CTRL, 0xc0, ausel); saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x08, ics); saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, ocs); // for oss, we need to change the clock configuration if (in->amux == TV) saa_andorb(SAA7134_SIF_SAMPLE_FREQ, 0x03, 0x00); else saa_andorb(SAA7134_SIF_SAMPLE_FREQ, 0x03, 0x01); /* switch gpio-connected external audio mux */ if (0 == card(dev).gpiomask) return; mask = card(dev).gpiomask; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio); saa7134_track_gpio(dev,in->name); } static void tvaudio_setmode(struct saa7134_dev *dev, struct saa7134_tvaudio *audio, char *note) { int acpf, tweak = 0; if (dev->tvnorm->id == V4L2_STD_NTSC) { acpf = 0x19066; } else { acpf = 0x1e000; } if (audio_clock_tweak > -1024 && audio_clock_tweak < 1024) tweak = audio_clock_tweak; if (note) dprintk("tvaudio_setmode: %s %s [%d.%03d/%d.%03d MHz] acpf=%d%+d\n", note,audio->name, audio->carr1 / 1000, audio->carr1 % 1000, audio->carr2 / 1000, audio->carr2 % 1000, acpf, tweak); acpf += tweak; saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD0, (acpf & 0x0000ff) >> 0); saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD1, (acpf & 0x00ff00) >> 8); saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD2, (acpf & 0x030000) >> 16); tvaudio_setcarrier(dev,audio->carr1,audio->carr2); switch (audio->mode) { case TVAUDIO_FM_MONO: case TVAUDIO_FM_BG_STEREO: saa_writeb(SAA7134_DEMODULATOR, 0x00); saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00); saa_writeb(SAA7134_FM_DEEMPHASIS, 0x22); saa_writeb(SAA7134_FM_DEMATRIX, 0x80); saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa0); break; case TVAUDIO_FM_K_STEREO: saa_writeb(SAA7134_DEMODULATOR, 0x00); saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x01); saa_writeb(SAA7134_FM_DEEMPHASIS, 0x22); saa_writeb(SAA7134_FM_DEMATRIX, 0x80); saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa0); break; case TVAUDIO_NICAM_FM: saa_writeb(SAA7134_DEMODULATOR, 0x10); saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00); saa_writeb(SAA7134_FM_DEEMPHASIS, 0x44); saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1); saa_writeb(SAA7134_NICAM_CONFIG, 0x00); break; case TVAUDIO_NICAM_AM: saa_writeb(SAA7134_DEMODULATOR, 0x12); saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00); saa_writeb(SAA7134_FM_DEEMPHASIS, 0x44); saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1); saa_writeb(SAA7134_NICAM_CONFIG, 0x00); break; case TVAUDIO_FM_SAT_STEREO: /* not implemented (yet) */ break; } } static int tvaudio_sleep(struct saa7134_dev *dev, int timeout) { if (dev->thread.scan1 == dev->thread.scan2 && !kthread_should_stop()) { if (timeout < 0) { set_current_state(TASK_INTERRUPTIBLE); schedule(); } else { schedule_timeout_interruptible (msecs_to_jiffies(timeout)); } } return dev->thread.scan1 != dev->thread.scan2; } static int tvaudio_checkcarrier(struct saa7134_dev *dev, struct mainscan *scan) { __s32 left,right,value; if (audio_debug > 1) { int i; dprintk("debug %d:",scan->carr); for (i = -150; i <= 150; i += 30) { tvaudio_setcarrier(dev,scan->carr+i,scan->carr+i); saa_readl(SAA7134_LEVEL_READOUT1 >> 2); if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY)) return -1; value = saa_readl(SAA7134_LEVEL_READOUT1 >> 2); if (0 == i) printk(" # %6d # ",value >> 16); else printk(" %6d",value >> 16); } printk("\n"); } if (dev->tvnorm->id & scan->std) { tvaudio_setcarrier(dev,scan->carr-90,scan->carr-90); saa_readl(SAA7134_LEVEL_READOUT1 >> 2); if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY)) return -1; left = saa_readl(SAA7134_LEVEL_READOUT1 >> 2); tvaudio_setcarrier(dev,scan->carr+90,scan->carr+90); saa_readl(SAA7134_LEVEL_READOUT1 >> 2); if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY)) return -1; right = saa_readl(SAA7134_LEVEL_READOUT1 >> 2); left >>= 16; right >>= 16; value = left > right ? left - right : right - left; dprintk("scanning %d.%03d MHz [%4s] => dc is %5d [%d/%d]\n", scan->carr / 1000, scan->carr % 1000, scan->name, value, left, right); } else { value = 0; dprintk("skipping %d.%03d MHz [%4s]\n", scan->carr / 1000, scan->carr % 1000, scan->name); } return value; } static int tvaudio_getstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *audio) { __u32 idp, nicam, nicam_status; int retval = -1; switch (audio->mode) { case TVAUDIO_FM_MONO: return V4L2_TUNER_SUB_MONO; case TVAUDIO_FM_K_STEREO: case TVAUDIO_FM_BG_STEREO: idp = (saa_readb(SAA7134_IDENT_SIF) & 0xe0) >> 5; dprintk("getstereo: fm/stereo: idp=0x%x\n",idp); if (0x03 == (idp & 0x03)) retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; else if (0x05 == (idp & 0x05)) retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; else if (0x01 == (idp & 0x01)) retval = V4L2_TUNER_SUB_MONO; break; case TVAUDIO_FM_SAT_STEREO: /* not implemented (yet) */ break; case TVAUDIO_NICAM_FM: case TVAUDIO_NICAM_AM: nicam = saa_readb(SAA7134_AUDIO_STATUS); dprintk("getstereo: nicam=0x%x\n",nicam); if (nicam & 0x1) { nicam_status = saa_readb(SAA7134_NICAM_STATUS); dprintk("getstereo: nicam_status=0x%x\n", nicam_status); switch (nicam_status & 0x03) { case 0x01: retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 0x02: retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; break; default: retval = V4L2_TUNER_SUB_MONO; } } else { /* No nicam detected */ } break; } if (retval != -1) dprintk("found audio subchannels:%s%s%s%s\n", (retval & V4L2_TUNER_SUB_MONO) ? " mono" : "", (retval & V4L2_TUNER_SUB_STEREO) ? " stereo" : "", (retval & V4L2_TUNER_SUB_LANG1) ? " lang1" : "", (retval & V4L2_TUNER_SUB_LANG2) ? " lang2" : ""); return retval; } static int tvaudio_setstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *audio, u32 mode) { static char *name[] = { [ V4L2_TUNER_MODE_MONO ] = "mono", [ V4L2_TUNER_MODE_STEREO ] = "stereo", [ V4L2_TUNER_MODE_LANG1 ] = "lang1", [ V4L2_TUNER_MODE_LANG2 ] = "lang2", [ V4L2_TUNER_MODE_LANG1_LANG2 ] = "lang1+lang2", }; static u32 fm[] = { [ V4L2_TUNER_MODE_MONO ] = 0x00, /* ch1 */ [ V4L2_TUNER_MODE_STEREO ] = 0x80, /* auto */ [ V4L2_TUNER_MODE_LANG1 ] = 0x00, /* ch1 */ [ V4L2_TUNER_MODE_LANG2 ] = 0x01, /* ch2 */ [ V4L2_TUNER_MODE_LANG1_LANG2 ] = 0x80, /* auto */ }; u32 reg; switch (audio->mode) { case TVAUDIO_FM_MONO: /* nothing to do ... */ break; case TVAUDIO_FM_K_STEREO: case TVAUDIO_FM_BG_STEREO: case TVAUDIO_NICAM_AM: case TVAUDIO_NICAM_FM: dprintk("setstereo [fm] => %s\n", name[ mode % ARRAY_SIZE(name) ]); reg = fm[ mode % ARRAY_SIZE(fm) ]; saa_writeb(SAA7134_FM_DEMATRIX, reg); break; case TVAUDIO_FM_SAT_STEREO: /* Not implemented */ break; } return 0; } static int tvaudio_thread(void *data) { struct saa7134_dev *dev = data; int carr_vals[ARRAY_SIZE(mainscan)]; unsigned int i, audio, nscan; int max1,max2,carrier,rx,mode,lastmode,default_carrier; set_freezable(); for (;;) { tvaudio_sleep(dev,-1); if (kthread_should_stop()) goto done; restart: try_to_freeze(); dev->thread.scan1 = dev->thread.scan2; dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); dev->tvaudio = NULL; saa_writeb(SAA7134_MONITOR_SELECT, 0xa0); saa_writeb(SAA7134_FM_DEMATRIX, 0x80); if (dev->ctl_automute) dev->automute = 1; mute_input_7134(dev); /* give the tuner some time */ if (tvaudio_sleep(dev,SCAN_INITIAL_DELAY)) goto restart; max1 = 0; max2 = 0; nscan = 0; carrier = 0; default_carrier = 0; for (i = 0; i < ARRAY_SIZE(mainscan); i++) { if (!(dev->tvnorm->id & mainscan[i].std)) continue; if (!default_carrier) default_carrier = mainscan[i].carr; nscan++; } if (1 == nscan) { /* only one candidate -- skip scan ;) */ dprintk("only one main carrier candidate - skipping scan\n"); max1 = 12345; carrier = default_carrier; } else { /* scan for the main carrier */ saa_writeb(SAA7134_MONITOR_SELECT,0x00); tvaudio_setmode(dev,&tvaudio[0],NULL); for (i = 0; i < ARRAY_SIZE(mainscan); i++) { carr_vals[i] = tvaudio_checkcarrier(dev, mainscan+i); if (dev->thread.scan1 != dev->thread.scan2) goto restart; } for (max1 = 0, max2 = 0, i = 0; i < ARRAY_SIZE(mainscan); i++) { if (max1 < carr_vals[i]) { max2 = max1; max1 = carr_vals[i]; carrier = mainscan[i].carr; } else if (max2 < carr_vals[i]) { max2 = carr_vals[i]; } } } if (0 != carrier && max1 > 2000 && max1 > max2*3) { /* found good carrier */ dprintk("found %s main sound carrier @ %d.%03d MHz [%d/%d]\n", dev->tvnorm->name, carrier/1000, carrier%1000, max1, max2); dev->last_carrier = carrier; } else if (0 != dev->last_carrier) { /* no carrier -- try last detected one as fallback */ carrier = dev->last_carrier; dprintk("audio carrier scan failed, " "using %d.%03d MHz [last detected]\n", carrier/1000, carrier%1000); } else { /* no carrier + no fallback -- use default */ carrier = default_carrier; dprintk("audio carrier scan failed, " "using %d.%03d MHz [default]\n", carrier/1000, carrier%1000); } tvaudio_setcarrier(dev,carrier,carrier); dev->automute = 0; saa_andorb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0x30, 0x00); saa7134_tvaudio_setmute(dev); /* find the exact tv audio norm */ for (audio = UNSET, i = 0; i < TVAUDIO; i++) { if (dev->tvnorm->id != UNSET && !(dev->tvnorm->id & tvaudio[i].std)) continue; if (tvaudio[i].carr1 != carrier) continue; /* Note: at least the primary carrier is right here */ if (UNSET == audio) audio = i; tvaudio_setmode(dev,&tvaudio[i],"trying"); if (tvaudio_sleep(dev,SCAN_SUBCARRIER_DELAY)) goto restart; if (-1 != tvaudio_getstereo(dev,&tvaudio[i])) { audio = i; break; } } saa_andorb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0x30, 0x30); if (UNSET == audio) continue; tvaudio_setmode(dev,&tvaudio[audio],"using"); tvaudio_setstereo(dev,&tvaudio[audio],V4L2_TUNER_MODE_MONO); dev->tvaudio = &tvaudio[audio]; lastmode = 42; for (;;) { try_to_freeze(); if (tvaudio_sleep(dev,5000)) goto restart; if (kthread_should_stop()) break; if (UNSET == dev->thread.mode) { rx = tvaudio_getstereo(dev,&tvaudio[i]); mode = saa7134_tvaudio_rx2mode(rx); } else { mode = dev->thread.mode; } if (lastmode != mode) { tvaudio_setstereo(dev,&tvaudio[audio],mode); lastmode = mode; } } } done: dev->thread.stopped = 1; return 0; } /* ------------------------------------------------------------------ */ /* saa7133 / saa7135 code */ static char *stdres[0x20] = { [0x00] = "no standard detected", [0x01] = "B/G (in progress)", [0x02] = "D/K (in progress)", [0x03] = "M (in progress)", [0x04] = "B/G A2", [0x05] = "B/G NICAM", [0x06] = "D/K A2 (1)", [0x07] = "D/K A2 (2)", [0x08] = "D/K A2 (3)", [0x09] = "D/K NICAM", [0x0a] = "L NICAM", [0x0b] = "I NICAM", [0x0c] = "M Korea", [0x0d] = "M BTSC ", [0x0e] = "M EIAJ", [0x0f] = "FM radio / IF 10.7 / 50 deemp", [0x10] = "FM radio / IF 10.7 / 75 deemp", [0x11] = "FM radio / IF sel / 50 deemp", [0x12] = "FM radio / IF sel / 75 deemp", [0x13 ... 0x1e ] = "unknown", [0x1f] = "??? [in progress]", }; #define DSP_RETRY 32 #define DSP_DELAY 16 #define SAA7135_DSP_RWCLEAR_RERR 1 static inline int saa_dsp_reset_error_bit(struct saa7134_dev *dev) { int state = saa_readb(SAA7135_DSP_RWSTATE); if (unlikely(state & SAA7135_DSP_RWSTATE_ERR)) { d2printk("%s: resetting error bit\n", dev->name); saa_writeb(SAA7135_DSP_RWCLEAR, SAA7135_DSP_RWCLEAR_RERR); } return 0; } static inline int saa_dsp_wait_bit(struct saa7134_dev *dev, int bit) { int state, count = DSP_RETRY; state = saa_readb(SAA7135_DSP_RWSTATE); if (unlikely(state & SAA7135_DSP_RWSTATE_ERR)) { printk(KERN_WARNING "%s: dsp access error\n", dev->name); saa_dsp_reset_error_bit(dev); return -EIO; } while (0 == (state & bit)) { if (unlikely(0 == count)) { printk("%s: dsp access wait timeout [bit=%s]\n", dev->name, (bit & SAA7135_DSP_RWSTATE_WRR) ? "WRR" : (bit & SAA7135_DSP_RWSTATE_RDB) ? "RDB" : (bit & SAA7135_DSP_RWSTATE_IDA) ? "IDA" : "???"); return -EIO; } saa_wait(DSP_DELAY); state = saa_readb(SAA7135_DSP_RWSTATE); count--; } return 0; } int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value) { int err; d2printk("dsp write reg 0x%x = 0x%06x\n",reg<<2,value); err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR); if (err < 0) return err; saa_writel(reg,value); err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR); if (err < 0) return err; return 0; } static int getstereo_7133(struct saa7134_dev *dev) { int retval = V4L2_TUNER_SUB_MONO; u32 value; value = saa_readl(0x528 >> 2); if (value & 0x20) retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; if (value & 0x40) retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; return retval; } static int mute_input_7133(struct saa7134_dev *dev) { u32 reg = 0; u32 xbarin, xbarout; int mask; struct saa7134_input *in; xbarin = 0x03; switch (dev->input->amux) { case TV: reg = 0x02; xbarin = 0; break; case LINE1: reg = 0x00; break; case LINE2: case LINE2_LEFT: reg = 0x09; break; } saa_dsp_writel(dev, 0x464 >> 2, xbarin); if (dev->ctl_mute) { reg = 0x07; xbarout = 0xbbbbbb; } else xbarout = 0xbbbb10; saa_dsp_writel(dev, 0x46c >> 2, xbarout); saa_writel(0x594 >> 2, reg); /* switch gpio-connected external audio mux */ if (0 != card(dev).gpiomask) { mask = card(dev).gpiomask; if (card(dev).mute.name && dev->ctl_mute) in = &card(dev).mute; else in = dev->input; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio); saa7134_track_gpio(dev,in->name); } return 0; } static int tvaudio_thread_ddep(void *data) { struct saa7134_dev *dev = data; u32 value, norms; set_freezable(); for (;;) { tvaudio_sleep(dev,-1); if (kthread_should_stop()) goto done; restart: try_to_freeze(); dev->thread.scan1 = dev->thread.scan2; dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); if (audio_ddep >= 0x04 && audio_ddep <= 0x0e) { /* insmod option override */ norms = (audio_ddep << 2) | 0x01; dprintk("ddep override: %s\n",stdres[audio_ddep]); } else if (&card(dev).radio == dev->input) { dprintk("FM Radio\n"); if (dev->tuner_type == TUNER_PHILIPS_TDA8290) { norms = (0x11 << 2) | 0x01; saa_dsp_writel(dev, 0x42c >> 2, 0x729555); } else { norms = (0x0f << 2) | 0x01; } } else { /* (let chip) scan for sound carrier */ norms = 0; if (dev->tvnorm->id & (V4L2_STD_B | V4L2_STD_GH)) norms |= 0x04; if (dev->tvnorm->id & V4L2_STD_PAL_I) norms |= 0x20; if (dev->tvnorm->id & V4L2_STD_DK) norms |= 0x08; if (dev->tvnorm->id & V4L2_STD_MN) norms |= 0x40; if (dev->tvnorm->id & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) norms |= 0x10; if (0 == norms) norms = 0x7c; /* all */ dprintk("scanning:%s%s%s%s%s\n", (norms & 0x04) ? " B/G" : "", (norms & 0x08) ? " D/K" : "", (norms & 0x10) ? " L/L'" : "", (norms & 0x20) ? " I" : "", (norms & 0x40) ? " M" : ""); } /* kick automatic standard detection */ saa_dsp_writel(dev, 0x454 >> 2, 0); saa_dsp_writel(dev, 0x454 >> 2, norms | 0x80); /* setup crossbars */ saa_dsp_writel(dev, 0x464 >> 2, 0x000000); saa_dsp_writel(dev, 0x470 >> 2, 0x101010); if (tvaudio_sleep(dev,3000)) goto restart; value = saa_readl(0x528 >> 2) & 0xffffff; dprintk("tvaudio thread status: 0x%x [%s%s%s]\n", value, stdres[value & 0x1f], (value & 0x000020) ? ",stereo" : "", (value & 0x000040) ? ",dual" : ""); dprintk("detailed status: " "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n", (value & 0x000080) ? " A2/EIAJ pilot tone " : "", (value & 0x000100) ? " A2/EIAJ dual " : "", (value & 0x000200) ? " A2/EIAJ stereo " : "", (value & 0x000400) ? " A2/EIAJ noise mute " : "", (value & 0x000800) ? " BTSC/FM radio pilot " : "", (value & 0x001000) ? " SAP carrier " : "", (value & 0x002000) ? " BTSC stereo noise mute " : "", (value & 0x004000) ? " SAP noise mute " : "", (value & 0x008000) ? " VDSP " : "", (value & 0x010000) ? " NICST " : "", (value & 0x020000) ? " NICDU " : "", (value & 0x040000) ? " NICAM muted " : "", (value & 0x080000) ? " NICAM reserve sound " : "", (value & 0x100000) ? " init done " : ""); } done: dev->thread.stopped = 1; return 0; } /* ------------------------------------------------------------------ */ /* common stuff + external entry points */ void saa7134_enable_i2s(struct saa7134_dev *dev) { int i2s_format; if (!card_is_empress(dev)) return; if (dev->pci->device == PCI_DEVICE_ID_PHILIPS_SAA7130) return; /* configure GPIO for out */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0E000000, 0x00000000); switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: /* Set I2S format (SONY)  */ saa_writeb(SAA7133_I2S_AUDIO_CONTROL, 0x00); /* Start I2S */ saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x11); break; case PCI_DEVICE_ID_PHILIPS_SAA7134: i2s_format = (dev->input->amux == TV) ? 0x00 : 0x01; /* enable I2S audio output for the mpeg encoder */ saa_writeb(SAA7134_I2S_OUTPUT_SELECT, 0x80); saa_writeb(SAA7134_I2S_OUTPUT_FORMAT, i2s_format); saa_writeb(SAA7134_I2S_OUTPUT_LEVEL, 0x0F); saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x01); default: break; } } int saa7134_tvaudio_rx2mode(u32 rx) { u32 mode; mode = V4L2_TUNER_MODE_MONO; if (rx & V4L2_TUNER_SUB_STEREO) mode = V4L2_TUNER_MODE_STEREO; else if (rx & V4L2_TUNER_SUB_LANG1) mode = V4L2_TUNER_MODE_LANG1; else if (rx & V4L2_TUNER_SUB_LANG2) mode = V4L2_TUNER_MODE_LANG2; return mode; } void saa7134_tvaudio_setmute(struct saa7134_dev *dev) { switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7130: case PCI_DEVICE_ID_PHILIPS_SAA7134: mute_input_7134(dev); break; case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: mute_input_7133(dev); break; } } void saa7134_tvaudio_setinput(struct saa7134_dev *dev, struct saa7134_input *in) { dev->input = in; switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7130: case PCI_DEVICE_ID_PHILIPS_SAA7134: mute_input_7134(dev); break; case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: mute_input_7133(dev); break; } saa7134_enable_i2s(dev); } void saa7134_tvaudio_setvolume(struct saa7134_dev *dev, int level) { switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7134: saa_writeb(SAA7134_CHANNEL1_LEVEL, level & 0x1f); saa_writeb(SAA7134_CHANNEL2_LEVEL, level & 0x1f); saa_writeb(SAA7134_NICAM_LEVEL_ADJUST, level & 0x1f); break; } } int saa7134_tvaudio_getstereo(struct saa7134_dev *dev) { int retval = V4L2_TUNER_SUB_MONO; switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7134: if (dev->tvaudio) retval = tvaudio_getstereo(dev,dev->tvaudio); break; case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: retval = getstereo_7133(dev); break; } return retval; } void saa7134_tvaudio_init(struct saa7134_dev *dev) { int clock = saa7134_boards[dev->board].audio_clock; if (UNSET != audio_clock_override) clock = audio_clock_override; switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7134: /* init all audio registers */ saa_writeb(SAA7134_AUDIO_PLL_CTRL, 0x00); if (need_resched()) schedule(); else udelay(10); saa_writeb(SAA7134_AUDIO_CLOCK0, clock & 0xff); saa_writeb(SAA7134_AUDIO_CLOCK1, (clock >> 8) & 0xff); saa_writeb(SAA7134_AUDIO_CLOCK2, (clock >> 16) & 0xff); /* frame locked audio is mandatory for NICAM */ saa_writeb(SAA7134_AUDIO_PLL_CTRL, 0x01); saa_writeb(SAA7134_NICAM_ERROR_LOW, 0x14); saa_writeb(SAA7134_NICAM_ERROR_HIGH, 0x50); break; case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: saa_writel(0x598 >> 2, clock); saa_dsp_writel(dev, 0x474 >> 2, 0x00); saa_dsp_writel(dev, 0x450 >> 2, 0x00); } } int saa7134_tvaudio_init2(struct saa7134_dev *dev) { int (*my_thread)(void *data) = NULL; switch (dev->pci->device) { case PCI_DEVICE_ID_PHILIPS_SAA7134: my_thread = tvaudio_thread; break; case PCI_DEVICE_ID_PHILIPS_SAA7133: case PCI_DEVICE_ID_PHILIPS_SAA7135: my_thread = tvaudio_thread_ddep; break; } dev->thread.thread = NULL; if (my_thread) { saa7134_tvaudio_init(dev); /* start tvaudio thread */ dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name); if (IS_ERR(dev->thread.thread)) { printk(KERN_WARNING "%s: kernel_thread() failed\n", dev->name); /* XXX: missing error handling here */ } saa7134_tvaudio_do_scan(dev); } saa7134_enable_i2s(dev); return 0; } int saa7134_tvaudio_fini(struct saa7134_dev *dev) { /* shutdown tvaudio thread */ if (dev->thread.thread && !dev->thread.stopped) kthread_stop(dev->thread.thread); saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */ return 0; } int saa7134_tvaudio_do_scan(struct saa7134_dev *dev) { if (dev->input->amux != TV) { dprintk("sound IF not in use, skipping scan\n"); dev->automute = 0; saa7134_tvaudio_setmute(dev); } else if (dev->thread.thread) { dev->thread.mode = UNSET; dev->thread.scan2++; if (!dev->insuspend && !dev->thread.stopped) wake_up_process(dev->thread.thread); } else { dev->automute = 0; saa7134_tvaudio_setmute(dev); } return 0; } EXPORT_SYMBOL(saa_dsp_writel); EXPORT_SYMBOL(saa7134_tvaudio_setmute); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
christianjann/L4T_PREEMPT_RT
drivers/oprofile/buffer_sync.c
3508
13627
/** * @file buffer_sync.c * * @remark Copyright 2002-2009 OProfile authors * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> * @author Barry Kasindorf * @author Robert Richter <robert.richter@amd.com> * * This is the core of the buffer management. Each * CPU buffer is processed and entered into the * global event buffer. Such processing is necessary * in several circumstances, mentioned below. * * The processing does the job of converting the * transitory EIP value into a persistent dentry/offset * value that the profiler can record at its leisure. * * See fs/dcookies.c for a description of the dentry/offset * objects. */ #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/notifier.h> #include <linux/dcookies.h> #include <linux/profile.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/gfp.h> #include "oprofile_stats.h" #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" static LIST_HEAD(dying_tasks); static LIST_HEAD(dead_tasks); static cpumask_var_t marked_cpus; static DEFINE_SPINLOCK(task_mortuary); static void process_task_mortuary(void); /* Take ownership of the task struct and place it on the * list for processing. Only after two full buffer syncs * does the task eventually get freed, because by then * we are sure we will not reference it again. * Can be invoked from softirq via RCU callback due to * call_rcu() of the task struct, hence the _irqsave. */ static int task_free_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long flags; struct task_struct *task = data; spin_lock_irqsave(&task_mortuary, flags); list_add(&task->tasks, &dying_tasks); spin_unlock_irqrestore(&task_mortuary, flags); return NOTIFY_OK; } /* The task is on its way out. A sync of the buffer means we can catch * any remaining samples for this task. */ static int task_exit_notify(struct notifier_block *self, unsigned long val, void *data) { /* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ sync_buffer(raw_smp_processor_id()); return 0; } /* The task is about to try a do_munmap(). We peek at what it's going to * do, and if it's an executable region, process the samples first, so * we don't lose any. This does not have to be exact, it's a QoI issue * only. */ static int munmap_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long addr = (unsigned long)data; struct mm_struct *mm = current->mm; struct vm_area_struct *mpnt; down_read(&mm->mmap_sem); mpnt = find_vma(mm, addr); if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { up_read(&mm->mmap_sem); /* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ sync_buffer(raw_smp_processor_id()); return 0; } up_read(&mm->mmap_sem); return 0; } /* We need to be told about new modules so we don't attribute to a previously * loaded module, or drop the samples on the floor. */ static int module_load_notify(struct notifier_block *self, unsigned long val, void *data) { #ifdef CONFIG_MODULES if (val != MODULE_STATE_COMING) return 0; /* FIXME: should we process all CPU buffers ? */ mutex_lock(&buffer_mutex); add_event_entry(ESCAPE_CODE); add_event_entry(MODULE_LOADED_CODE); mutex_unlock(&buffer_mutex); #endif return 0; } static struct notifier_block task_free_nb = { .notifier_call = task_free_notify, }; static struct notifier_block task_exit_nb = { .notifier_call = task_exit_notify, }; static struct notifier_block munmap_nb = { .notifier_call = munmap_notify, }; static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; static void free_all_tasks(void) { /* make sure we don't leak task structs */ process_task_mortuary(); process_task_mortuary(); } int sync_start(void) { int err; if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; err = task_handoff_register(&task_free_nb); if (err) goto out1; err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); if (err) goto out2; err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); if (err) goto out3; err = register_module_notifier(&module_load_nb); if (err) goto out4; start_cpu_work(); out: return err; out4: profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); out3: profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); out2: task_handoff_unregister(&task_free_nb); free_all_tasks(); out1: free_cpumask_var(marked_cpus); goto out; } void sync_stop(void) { end_cpu_work(); unregister_module_notifier(&module_load_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); task_handoff_unregister(&task_free_nb); barrier(); /* do all of the above first */ flush_cpu_work(); free_all_tasks(); free_cpumask_var(marked_cpus); } /* Optimisation. We can manage without taking the dcookie sem * because we cannot reach this code without at least one * dcookie user still being registered (namely, the reader * of the event buffer). */ static inline unsigned long fast_get_dcookie(struct path *path) { unsigned long cookie; if (path->dentry->d_flags & DCACHE_COOKIE) return (unsigned long)path->dentry; get_dcookie(path, &cookie); return cookie; } /* Look up the dcookie for the task's mm->exe_file, * which corresponds loosely to "application name". This is * not strictly necessary but allows oprofile to associate * shared-library samples with particular applications */ static unsigned long get_exec_dcookie(struct mm_struct *mm) { unsigned long cookie = NO_COOKIE; if (mm && mm->exe_file) cookie = fast_get_dcookie(&mm->exe_file->f_path); return cookie; } /* Convert the EIP value of a sample into a persistent dentry/offset * pair that can then be added to the global event buffer. We make * sure to do this lookup before a mm->mmap modification happens so * we don't lose track. */ static unsigned long lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) { unsigned long cookie = NO_COOKIE; struct vm_area_struct *vma; for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { if (addr < vma->vm_start || addr >= vma->vm_end) continue; if (vma->vm_file) { cookie = fast_get_dcookie(&vma->vm_file->f_path); *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; } else { /* must be an anonymous map */ *offset = addr; } break; } if (!vma) cookie = INVALID_COOKIE; return cookie; } static unsigned long last_cookie = INVALID_COOKIE; static void add_cpu_switch(int i) { add_event_entry(ESCAPE_CODE); add_event_entry(CPU_SWITCH_CODE); add_event_entry(i); last_cookie = INVALID_COOKIE; } static void add_kernel_ctx_switch(unsigned int in_kernel) { add_event_entry(ESCAPE_CODE); if (in_kernel) add_event_entry(KERNEL_ENTER_SWITCH_CODE); else add_event_entry(KERNEL_EXIT_SWITCH_CODE); } static void add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) { add_event_entry(ESCAPE_CODE); add_event_entry(CTX_SWITCH_CODE); add_event_entry(task->pid); add_event_entry(cookie); /* Another code for daemon back-compat */ add_event_entry(ESCAPE_CODE); add_event_entry(CTX_TGID_CODE); add_event_entry(task->tgid); } static void add_cookie_switch(unsigned long cookie) { add_event_entry(ESCAPE_CODE); add_event_entry(COOKIE_SWITCH_CODE); add_event_entry(cookie); } static void add_trace_begin(void) { add_event_entry(ESCAPE_CODE); add_event_entry(TRACE_BEGIN_CODE); } static void add_data(struct op_entry *entry, struct mm_struct *mm) { unsigned long code, pc, val; unsigned long cookie; off_t offset; if (!op_cpu_buffer_get_data(entry, &code)) return; if (!op_cpu_buffer_get_data(entry, &pc)) return; if (!op_cpu_buffer_get_size(entry)) return; if (mm) { cookie = lookup_dcookie(mm, pc, &offset); if (cookie == NO_COOKIE) offset = pc; if (cookie == INVALID_COOKIE) { atomic_inc(&oprofile_stats.sample_lost_no_mapping); offset = pc; } if (cookie != last_cookie) { add_cookie_switch(cookie); last_cookie = cookie; } } else offset = pc; add_event_entry(ESCAPE_CODE); add_event_entry(code); add_event_entry(offset); /* Offset from Dcookie */ while (op_cpu_buffer_get_data(entry, &val)) add_event_entry(val); } static inline void add_sample_entry(unsigned long offset, unsigned long event) { add_event_entry(offset); add_event_entry(event); } /* * Add a sample to the global event buffer. If possible the * sample is converted into a persistent dentry/offset pair * for later lookup from userspace. Return 0 on failure. */ static int add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) { unsigned long cookie; off_t offset; if (in_kernel) { add_sample_entry(s->eip, s->event); return 1; } /* add userspace sample */ if (!mm) { atomic_inc(&oprofile_stats.sample_lost_no_mm); return 0; } cookie = lookup_dcookie(mm, s->eip, &offset); if (cookie == INVALID_COOKIE) { atomic_inc(&oprofile_stats.sample_lost_no_mapping); return 0; } if (cookie != last_cookie) { add_cookie_switch(cookie); last_cookie = cookie; } add_sample_entry(offset, s->event); return 1; } static void release_mm(struct mm_struct *mm) { if (!mm) return; up_read(&mm->mmap_sem); mmput(mm); } static struct mm_struct *take_tasks_mm(struct task_struct *task) { struct mm_struct *mm = get_task_mm(task); if (mm) down_read(&mm->mmap_sem); return mm; } static inline int is_code(unsigned long val) { return val == ESCAPE_CODE; } /* Move tasks along towards death. Any tasks on dead_tasks * will definitely have no remaining references in any * CPU buffers at this point, because we use two lists, * and to have reached the list, it must have gone through * one full sync already. */ static void process_task_mortuary(void) { unsigned long flags; LIST_HEAD(local_dead_tasks); struct task_struct *task; struct task_struct *ttask; spin_lock_irqsave(&task_mortuary, flags); list_splice_init(&dead_tasks, &local_dead_tasks); list_splice_init(&dying_tasks, &dead_tasks); spin_unlock_irqrestore(&task_mortuary, flags); list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { list_del(&task->tasks); free_task(task); } } static void mark_done(int cpu) { int i; cpumask_set_cpu(cpu, marked_cpus); for_each_online_cpu(i) { if (!cpumask_test_cpu(i, marked_cpus)) return; } /* All CPUs have been processed at least once, * we can process the mortuary once */ process_task_mortuary(); cpumask_clear(marked_cpus); } /* FIXME: this is not sufficient if we implement syscall barrier backtrace * traversal, the code switch to sb_sample_start at first kernel enter/exit * switch so we need a fifth state and some special handling in sync_buffer() */ typedef enum { sb_bt_ignore = -2, sb_buffer_start, sb_bt_start, sb_sample_start, } sync_buffer_state; /* Sync one of the CPU's buffers into the global event buffer. * Here we need to go through each batch of samples punctuated * by context switch notes, taking the task's mmap_sem and doing * lookup in task->mm->mmap to convert EIP into dcookie/offset * value. */ void sync_buffer(int cpu) { struct mm_struct *mm = NULL; struct mm_struct *oldmm; unsigned long val; struct task_struct *new; unsigned long cookie = 0; int in_kernel = 1; sync_buffer_state state = sb_buffer_start; unsigned int i; unsigned long available; unsigned long flags; struct op_entry entry; struct op_sample *sample; mutex_lock(&buffer_mutex); add_cpu_switch(cpu); op_cpu_buffer_reset(cpu); available = op_cpu_buffer_entries(cpu); for (i = 0; i < available; ++i) { sample = op_cpu_buffer_read_entry(&entry, cpu); if (!sample) break; if (is_code(sample->eip)) { flags = sample->event; if (flags & TRACE_BEGIN) { state = sb_bt_start; add_trace_begin(); } if (flags & KERNEL_CTX_SWITCH) { /* kernel/userspace switch */ in_kernel = flags & IS_KERNEL; if (state == sb_buffer_start) state = sb_sample_start; add_kernel_ctx_switch(flags & IS_KERNEL); } if (flags & USER_CTX_SWITCH && op_cpu_buffer_get_data(&entry, &val)) { /* userspace context switch */ new = (struct task_struct *)val; oldmm = mm; release_mm(oldmm); mm = take_tasks_mm(new); if (mm != oldmm) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); } if (op_cpu_buffer_get_size(&entry)) add_data(&entry, mm); continue; } if (state < sb_bt_start) /* ignore sample */ continue; if (add_sample(mm, sample, in_kernel)) continue; /* ignore backtraces if failed to add a sample */ if (state == sb_bt_start) { state = sb_bt_ignore; atomic_inc(&oprofile_stats.bt_lost_no_mapping); } } release_mm(mm); mark_done(cpu); mutex_unlock(&buffer_mutex); } /* The function can be used to add a buffer worth of data directly to * the kernel buffer. The buffer is assumed to be a circular buffer. * Take the entries from index start and end at index end, wrapping * at max_entries. */ void oprofile_put_buff(unsigned long *buf, unsigned int start, unsigned int stop, unsigned int max) { int i; i = start; mutex_lock(&buffer_mutex); while (i != stop) { add_event_entry(buf[i++]); if (i >= max) i = 0; } mutex_unlock(&buffer_mutex); }
gpl-2.0
jstotero/Old_Cucciolone
arch/m68k/amiga/amiints.c
4532
6489
/* * linux/arch/m68k/amiga/amiints.c -- Amiga Linux interrupt handling code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * 11/07/96: rewritten interrupt handling, irq lists are exists now only for * this sources where it makes sense (VERTB/PORTS/EXTER) and you must * be careful that dev_id for this sources is unique since this the * only possibility to distinguish between different handlers for * free_irq. irq lists also have different irq flags: * - IRQ_FLG_FAST: handler is inserted at top of list (after other * fast handlers) * - IRQ_FLG_SLOW: handler is inserted at bottom of list and before * they're executed irq level is set to the previous * one, but handlers don't need to be reentrant, if * reentrance occurred, slow handlers will be just * called again. * The whole interrupt handling for CIAs is moved to cia.c * /Roman Zippel * * 07/08/99: rewamp of the interrupt handling - we now have two types of * interrupts, normal and fast handlers, fast handlers being * marked with IRQF_DISABLED and runs with all other interrupts * disabled. Normal interrupts disable their own source but * run with all other interrupt sources enabled. * PORTS and EXTER interrupts are always shared even if the * drivers do not explicitly mark this when calling * request_irq which they really should do. * This is similar to the way interrupts are handled on all * other architectures and makes a ton of sense besides * having the advantage of making it easier to share * drivers. * /Jes */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amipcmcia.h> static void amiga_enable_irq(unsigned int irq); static void amiga_disable_irq(unsigned int irq); static irqreturn_t ami_int1(int irq, void *dev_id); static irqreturn_t ami_int3(int irq, void *dev_id); static irqreturn_t ami_int4(int irq, void *dev_id); static irqreturn_t ami_int5(int irq, void *dev_id); static struct irq_controller amiga_irq_controller = { .name = "amiga", .lock = __SPIN_LOCK_UNLOCKED(amiga_irq_controller.lock), .enable = amiga_enable_irq, .disable = amiga_disable_irq, }; /* * void amiga_init_IRQ(void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the amiga IRQ handling routines. */ void __init amiga_init_IRQ(void) { if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL)) pr_err("Couldn't register int%d\n", 1); if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL)) pr_err("Couldn't register int%d\n", 3); if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL)) pr_err("Couldn't register int%d\n", 4); if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL)) pr_err("Couldn't register int%d\n", 5); m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS); /* turn off PCMCIA interrupts */ if (AMIGAHW_PRESENT(PCMCIA)) gayle.inten = GAYLE_IRQ_IDE; /* turn off all interrupts and enable the master interrupt bit */ amiga_custom.intena = 0x7fff; amiga_custom.intreq = 0x7fff; amiga_custom.intena = IF_SETCLR | IF_INTEN; cia_init_IRQ(&ciaa_base); cia_init_IRQ(&ciab_base); } /* * Enable/disable a particular machine specific interrupt source. * Note that this may affect other interrupts in case of a shared interrupt. * This function should only be called for a _very_ short time to change some * internal data, that may not be changed by the interrupt at the same time. */ static void amiga_enable_irq(unsigned int irq) { amiga_custom.intena = IF_SETCLR | (1 << (irq - IRQ_USER)); } static void amiga_disable_irq(unsigned int irq) { amiga_custom.intena = 1 << (irq - IRQ_USER); } /* * The builtin Amiga hardware interrupt handlers. */ static irqreturn_t ami_int1(int irq, void *dev_id) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial transmit buffer empty, interrupt */ if (ints & IF_TBE) { amiga_custom.intreq = IF_TBE; m68k_handle_int(IRQ_AMIGA_TBE); } /* if floppy disk transfer complete, interrupt */ if (ints & IF_DSKBLK) { amiga_custom.intreq = IF_DSKBLK; m68k_handle_int(IRQ_AMIGA_DSKBLK); } /* if software interrupt set, interrupt */ if (ints & IF_SOFT) { amiga_custom.intreq = IF_SOFT; m68k_handle_int(IRQ_AMIGA_SOFT); } return IRQ_HANDLED; } static irqreturn_t ami_int3(int irq, void *dev_id) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if a blitter interrupt */ if (ints & IF_BLIT) { amiga_custom.intreq = IF_BLIT; m68k_handle_int(IRQ_AMIGA_BLIT); } /* if a copper interrupt */ if (ints & IF_COPER) { amiga_custom.intreq = IF_COPER; m68k_handle_int(IRQ_AMIGA_COPPER); } /* if a vertical blank interrupt */ if (ints & IF_VERTB) { amiga_custom.intreq = IF_VERTB; m68k_handle_int(IRQ_AMIGA_VERTB); } return IRQ_HANDLED; } static irqreturn_t ami_int4(int irq, void *dev_id) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if audio 0 interrupt */ if (ints & IF_AUD0) { amiga_custom.intreq = IF_AUD0; m68k_handle_int(IRQ_AMIGA_AUD0); } /* if audio 1 interrupt */ if (ints & IF_AUD1) { amiga_custom.intreq = IF_AUD1; m68k_handle_int(IRQ_AMIGA_AUD1); } /* if audio 2 interrupt */ if (ints & IF_AUD2) { amiga_custom.intreq = IF_AUD2; m68k_handle_int(IRQ_AMIGA_AUD2); } /* if audio 3 interrupt */ if (ints & IF_AUD3) { amiga_custom.intreq = IF_AUD3; m68k_handle_int(IRQ_AMIGA_AUD3); } return IRQ_HANDLED; } static irqreturn_t ami_int5(int irq, void *dev_id) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial receive buffer full interrupt */ if (ints & IF_RBF) { /* acknowledge of IF_RBF must be done by the serial interrupt */ m68k_handle_int(IRQ_AMIGA_RBF); } /* if a disk sync interrupt */ if (ints & IF_DSKSYN) { amiga_custom.intreq = IF_DSKSYN; m68k_handle_int(IRQ_AMIGA_DSKSYN); } return IRQ_HANDLED; }
gpl-2.0
MilysTW/linux-sunxi-cb2
arch/mips/kernel/syscall.c
4532
8002
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/unistd.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/compiler.h> #include <linux/ipc.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/elf.h> #include <asm/asm.h> #include <asm/branch.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> #include <asm/signal.h> #include <asm/sim.h> #include <asm/shmparam.h> #include <asm/sysmips.h> #include <asm/uaccess.h> #include <asm/switch_to.h> /* * For historic reasons the pipe(2) syscall on MIPS has an unusual calling * convention. It returns results in registers $v0 / $v1 which means there * is no need for it to do verify the validity of a userspace pointer * argument. Historically that used to be expensive in Linux. These days * the performance advantage is negligible. */ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) { int fd[2]; int error, res; error = do_pipe_flags(fd, 0); if (error) { res = error; goto out; } regs.regs[3] = fd[1]; res = fd[0]; out: return res; } SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) { unsigned long result; result = -EINVAL; if (offset & ~PAGE_MASK) goto out; result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); out: return result; } SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { if (pgoff & (~PAGE_MASK >> 12)) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); } save_static_function(sys_fork); static int __used noinline _sys_fork(nabi_no_regargs struct pt_regs regs) { return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL); } save_static_function(sys_clone); static int __used noinline _sys_clone(nabi_no_regargs struct pt_regs regs) { unsigned long clone_flags; unsigned long newsp; int __user *parent_tidptr, *child_tidptr; clone_flags = regs.regs[4]; newsp = regs.regs[5]; if (!newsp) newsp = regs.regs[29]; parent_tidptr = (int __user *) regs.regs[6]; #ifdef CONFIG_32BIT /* We need to fetch the fifth argument off the stack. */ child_tidptr = NULL; if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { int __user *__user *usp = (int __user *__user *) regs.regs[29]; if (regs.regs[2] == __NR_syscall) { if (get_user (child_tidptr, &usp[5])) return -EFAULT; } else if (get_user (child_tidptr, &usp[4])) return -EFAULT; } #else child_tidptr = (int __user *) regs.regs[8]; #endif return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr); } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) { int error; char * filename; filename = getname((const char __user *) (long)regs.regs[4]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (const char __user *const __user *) (long)regs.regs[5], (const char __user *const __user *) (long)regs.regs[6], &regs); putname(filename); out: return error; } SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) { struct thread_info *ti = task_thread_info(current); ti->tp_value = addr; if (cpu_has_userlocal) write_c0_userlocal(addr); return 0; } static inline int mips_atomic_set(struct pt_regs *regs, unsigned long addr, unsigned long new) { unsigned long old, tmp; unsigned int err; if (unlikely(addr & 3)) return -EINVAL; if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) return -EINVAL; if (cpu_has_llsc && R10000_LLSC_WAR) { __asm__ __volatile__ ( " .set mips3 \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" "2: sc %[tmp], (%[addr]) \n" " beqzl %[tmp], 1b \n" "3: \n" " .section .fixup,\"ax\" \n" "4: li %[err], %[efault] \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "STR(PTR)" 1b, 4b \n" " "STR(PTR)" 2b, 4b \n" " .previous \n" " .set mips0 \n" : [old] "=&r" (old), [err] "=&r" (err), [tmp] "=&r" (tmp) : [addr] "r" (addr), [new] "r" (new), [efault] "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( " .set mips3 \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" "2: sc %[tmp], (%[addr]) \n" " bnez %[tmp], 4f \n" "3: \n" " .subsection 2 \n" "4: b 1b \n" " .previous \n" " \n" " .section .fixup,\"ax\" \n" "5: li %[err], %[efault] \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "STR(PTR)" 1b, 5b \n" " "STR(PTR)" 2b, 5b \n" " .previous \n" " .set mips0 \n" : [old] "=&r" (old), [err] "=&r" (err), [tmp] "=&r" (tmp) : [addr] "r" (addr), [new] "r" (new), [efault] "i" (-EFAULT) : "memory"); } else { do { preempt_disable(); ll_bit = 1; ll_task = current; preempt_enable(); err = __get_user(old, (unsigned int *) addr); err |= __put_user(new, (unsigned int *) addr); if (err) break; rmb(); } while (!ll_bit); } if (unlikely(err)) return err; regs->regs[2] = old; regs->regs[7] = 0; /* No error */ /* * Don't let your children do this ... */ __asm__ __volatile__( " move $29, %0 \n" " j syscall_exit \n" : /* no outputs */ : "r" (regs)); /* unreached. Honestly. */ while (1); } save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; switch (cmd) { case MIPS_ATOMIC_SET: return mips_atomic_set(&regs, arg1, arg2); case MIPS_FIXADE: if (arg1 & ~3) return -EINVAL; if (arg1 & 1) set_thread_flag(TIF_FIXADE); else clear_thread_flag(TIF_FIXADE); if (arg1 & 2) set_thread_flag(TIF_LOGADE); else clear_thread_flag(TIF_LOGADE); return 0; case FLUSH_CACHE: __flush_cache_all(); return 0; } return -EINVAL; } /* * No implemented yet ... */ SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) { return -ENOSYS; } /* * If we ever come here the user sp is bad. Zap the process right away. * Due to the bad stack signaling wouldn't work. */ asmlinkage void bad_stack(void) { do_exit(SIGSEGV); } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register unsigned long __a0 asm("$4") = (unsigned long) filename; register unsigned long __a1 asm("$5") = (unsigned long) argv; register unsigned long __a2 asm("$6") = (unsigned long) envp; register unsigned long __a3 asm("$7"); unsigned long __v0; __asm__ volatile (" \n" " .set noreorder \n" " li $2, %5 # __NR_execve \n" " syscall \n" " move %0, $2 \n" " .set reorder \n" : "=&r" (__v0), "=r" (__a3) : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "memory"); if (__a3 == 0) return __v0; return -__v0; }
gpl-2.0
herod2k/buildroot-linux-kernel-m6
arch/x86/kernel/cpu/mcheck/p5.c
4788
1711
/* * P5 specific Machine Check Exception Reporting * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/mce.h> #include <asm/msr.h> /* By default disabled */ int mce_p5_enabled __read_mostly; /* Machine check handler for Pentium class Intel CPUs: */ static void pentium_machine_check(struct pt_regs *regs, long error_code) { u32 loaddr, hi, lotype; rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); if (lotype & (1<<5)) { printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); } add_taint(TAINT_MACHINE_CHECK); } /* Set up machine check reporting for processors with Intel style MCE: */ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) { u32 l, h; /* Default P5 to off as its often misconnected: */ if (!mce_p5_enabled) return; /* Check for MCE support: */ if (!cpu_has(c, X86_FEATURE_MCE)) return; machine_check_vector = pentium_machine_check; /* Make sure the vector pointer is visible before we enable MCEs: */ wmb(); /* Read registers before enabling: */ rdmsr(MSR_IA32_P5_MC_ADDR, l, h); rdmsr(MSR_IA32_P5_MC_TYPE, l, h); printk(KERN_INFO "Intel old style machine check architecture supported.\n"); /* Enable MCE: */ set_in_cr4(X86_CR4_MCE); printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); }
gpl-2.0
Freack-v/android_kernel_eagle
drivers/ide/icside.c
5044
16655
/* * Copyright (c) 1996-2004 Russell King. * * Please note that this platform does not support 32-bit IDE IO. */ #include <linux/string.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/errno.h> #include <linux/ide.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #define DRV_NAME "icside" #define ICS_IDENT_OFFSET 0x2280 #define ICS_ARCIN_V5_INTRSTAT 0x0000 #define ICS_ARCIN_V5_INTROFFSET 0x0004 #define ICS_ARCIN_V5_IDEOFFSET 0x2800 #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80 #define ICS_ARCIN_V5_IDESTEPPING 6 #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290 #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380 #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290 #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380 #define ICS_ARCIN_V6_IDESTEPPING 6 struct cardinfo { unsigned int dataoffset; unsigned int ctrloffset; unsigned int stepping; }; static struct cardinfo icside_cardinfo_v5 = { .dataoffset = ICS_ARCIN_V5_IDEOFFSET, .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET, .stepping = ICS_ARCIN_V5_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_1 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; static struct cardinfo icside_cardinfo_v6_2 = { .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2, .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2, .stepping = ICS_ARCIN_V6_IDESTEPPING, }; struct icside_state { unsigned int channel; unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; unsigned int type; struct ide_host *host; }; #define ICS_TYPE_A3IN 0 #define ICS_TYPE_A3USER 1 #define ICS_TYPE_V6 3 #define ICS_TYPE_V5 15 #define ICS_TYPE_NOTYPE ((unsigned int)-1) /* ---------------- Version 5 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); } /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); } static const expansioncard_ops_t icside_ops_arcin_v5 = { .irqenable = icside_irqenable_arcin_v5, .irqdisable = icside_irqdisable_arcin_v5, }; /* ---------------- Version 6 PCB Support Functions --------------------- */ /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : enable interrupts from card */ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; state->enabled = 1; switch (state->channel) { case 0: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); readb(base + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); readb(base + ICS_ARCIN_V6_INTROFFSET_1); break; } } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) * Purpose : disable interrupts from card */ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; state->enabled = 0; readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } /* Prototype: icside_irqprobe(struct expansion_card *ec) * Purpose : detect an active interrupt from card */ static int icside_irqpending_arcin_v6(struct expansion_card *ec) { struct icside_state *state = ec->irq_data; return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; } static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqenable = icside_irqenable_arcin_v6, .irqdisable = icside_irqdisable_arcin_v6, .irqpending = icside_irqpending_arcin_v6, }; /* * Handle routing of interrupts. This is called before * we write the command to the drive. */ static void icside_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; local_irq_save(flags); state->channel = hwif->channel; if (state->enabled && !mask) { switch (hwif->channel) { case 0: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); break; case 1: writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); break; } } else { readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); } local_irq_restore(flags); } static const struct ide_port_ops icside_v6_no_dma_port_ops = { .maskproc = icside_maskproc, }; #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. * * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. * There is only one DMA controller per card, which means that only * one drive can be accessed at one time. NOTE! We do not enforce that * here, but we rely on the main IDE driver spotting that both * interfaces use the same IRQ, which should guarantee this. */ /* * Configure the IOMD to give the appropriate timings for the transfer * mode being requested. We take the advice of the ATA standards, and * calculate the cycle time based on the transfer mode, and the EIDE * MW DMA specs that the drive provides in the IDENTIFY command. * * We have the following IOMD DMA modes to choose from: * * Type Active Recovery Cycle * A 250 (250) 312 (550) 562 (800) * B 187 250 437 * C 125 (125) 125 (375) 250 (500) * D 62 125 187 * * (figures in brackets are actual measured timings) * * However, we also need to take care of the read/write active and * recovery timings: * * Read Write * Mode Active -- Recovery -- Cycle IOMD type * MW0 215 50 215 480 A * MW1 80 50 50 150 C * MW2 70 25 25 120 C */ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long cycle_time; int use_dma_info = 0; const u8 xfer_mode = drive->dma_mode; switch (xfer_mode) { case XFER_MW_DMA_2: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_1: cycle_time = 250; use_dma_info = 1; break; case XFER_MW_DMA_0: cycle_time = 480; break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: cycle_time = 480; break; } /* * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should * take care to note the values in the ID... */ if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time) cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME]; ide_set_drivedata(drive, (void *)cycle_time); printk("%s: %s selected (peak %dMB/s)\n", drive->name, ide_xfer_verbose(xfer_mode), 2000 / (unsigned long)ide_get_drivedata(drive)); } static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) { } static int icside_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); disable_dma(ec->dma); return get_dma_residue(ec->dma) != 0; } static void icside_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); /* We can not enable DMA on both channels simultaneously. */ BUG_ON(dma_channel_active(ec->dma)); enable_dma(ec->dma); } static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); unsigned int dma_mode; if (cmd->tf_flags & IDE_TFLAG_WRITE) dma_mode = DMA_MODE_WRITE; else dma_mode = DMA_MODE_READ; /* * We can not enable DMA on both channels. */ BUG_ON(dma_channel_active(ec->dma)); /* * Ensure that we have the right interrupt routed. */ icside_maskproc(drive, 0); /* * Route the DMA signals to the correct interface. */ writeb(state->sel | hwif->channel, state->ioc_base); /* * Select the correct timing for this drive. */ set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive)); /* * Tell the DMA engine about the SG table and * data direction. */ set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents); set_dma_mode(ec->dma, dma_mode); return 0; } static int icside_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct expansion_card *ec = ECARD_DEV(hwif->dev); struct icside_state *state = ecard_get_drvdata(ec); return readb(state->irq_port + (hwif->channel ? ICS_ARCIN_V6_INTRSTAT_2 : ICS_ARCIN_V6_INTRSTAT_1)) & 1; } static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { hwif->dmatable_cpu = NULL; hwif->dmatable_dma = 0; return 0; } static const struct ide_dma_ops icside_v6_dma_ops = { .dma_host_set = icside_dma_host_set, .dma_setup = icside_dma_setup, .dma_start = icside_dma_start, .dma_end = icside_dma_end, .dma_test_irq = icside_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, }; #else #define icside_v6_dma_ops NULL #endif static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) { return -EOPNOTSUPP; } static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; hw->io_ports.data_addr = port; hw->io_ports.error_addr = port + (1 << info->stepping); hw->io_ports.nsect_addr = port + (2 << info->stepping); hw->io_ports.lbal_addr = port + (3 << info->stepping); hw->io_ports.lbam_addr = port + (4 << info->stepping); hw->io_ports.lbah_addr = port + (5 << info->stepping); hw->io_ports.device_addr = port + (6 << info->stepping); hw->io_ports.status_addr = port + (7 << info->stepping); hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; hw->irq = ec->irq; hw->dev = &ec->dev; } static const struct ide_port_info icside_v5_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_acorn, }; static int __devinit icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) return -ENOMEM; state->irq_port = base; ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; ec->irqmask = 1; ecard_setirq(ec, &icside_ops_arcin_v5, state); /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); host = ide_host_alloc(&icside_v5_port_info, hws, 1); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); ret = ide_host_register(host, &icside_v5_port_info, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); ecard_set_drvdata(ec, NULL); return ret; } static const struct ide_port_info icside_v6_port_info __initdata = { .init_dma = icside_dma_off_init, .port_ops = &icside_v6_no_dma_port_ops, .dma_ops = &icside_v6_dma_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, .swdma_mask = ATA_SWDMA2, .chipset = ide_acorn, }; static int __devinit icside_register_v6(struct icside_state *state, struct expansion_card *ec) { void __iomem *ioc_base, *easi_base; struct ide_host *host; unsigned int sel = 0; int ret; struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ioc_base) { ret = -ENOMEM; goto out; } easi_base = ioc_base; if (ecard_resource_flags(ec, ECARD_RES_EASI)) { easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); if (!easi_base) { ret = -ENOMEM; goto out; } /* * Enable access to the EASI region. */ sel = 1 << 5; } writeb(sel, ioc_base); ecard_setirq(ec, &icside_ops_arcin_v6, state); state->irq_port = easi_base; state->ioc_base = ioc_base; state->sel = sel; /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); host = ide_host_alloc(&d, hws, 2); if (host == NULL) return -ENODEV; state->host = host; ecard_set_drvdata(ec, state); if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; } else d.dma_ops = NULL; ret = ide_host_register(host, &d, hws); if (ret) goto err_free; return 0; err_free: ide_host_free(host); if (d.dma_ops) free_dma(ec->dma); ecard_set_drvdata(ec, NULL); out: return ret; } static int __devinit icside_probe(struct expansion_card *ec, const struct ecard_id *id) { struct icside_state *state; void __iomem *idmem; int ret; ret = ecard_request_resources(ec); if (ret) goto out; state = kzalloc(sizeof(struct icside_state), GFP_KERNEL); if (!state) { ret = -ENOMEM; goto release; } state->type = ICS_TYPE_NOTYPE; idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (idmem) { unsigned int type; type = readb(idmem + ICS_IDENT_OFFSET) & 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; ecardm_iounmap(ec, idmem); state->type = type; } switch (state->type) { case ICS_TYPE_A3IN: dev_warn(&ec->dev, "A3IN unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_A3USER: dev_warn(&ec->dev, "A3USER unsupported\n"); ret = -ENODEV; break; case ICS_TYPE_V5: ret = icside_register_v5(state, ec); break; case ICS_TYPE_V6: ret = icside_register_v6(state, ec); break; default: dev_warn(&ec->dev, "unknown interface type\n"); ret = -ENODEV; break; } if (ret == 0) goto out; kfree(state); release: ecard_release_resources(ec); out: return ret; } static void __devexit icside_remove(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); switch (state->type) { case ICS_TYPE_V5: /* FIXME: tell IDE to stop using the interface */ /* Disable interrupts */ icside_irqdisable_arcin_v5(ec, 0); break; case ICS_TYPE_V6: /* FIXME: tell IDE to stop using the interface */ if (ec->dma != NO_DMA) free_dma(ec->dma); /* Disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); /* Reset the ROM pointer/EASI selection */ writeb(0, state->ioc_base); break; } ecard_set_drvdata(ec, NULL); kfree(state); ecard_release_resources(ec); } static void icside_shutdown(struct expansion_card *ec) { struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; /* * Disable interrupts from this card. We need to do * this before disabling EASI since we may be accessing * this register via that region. */ local_irq_save(flags); ec->ops->irqdisable(ec, 0); local_irq_restore(flags); /* * Reset the ROM pointer so that we can read the ROM * after a soft reboot. This also disables access to * the IDE taskfile via the EASI region. */ if (state->ioc_base) writeb(0, state->ioc_base); } static const struct ecard_id icside_ids[] = { { MANU_ICS, PROD_ICS_IDE }, { MANU_ICS2, PROD_ICS2_IDE }, { 0xffff, 0xffff } }; static struct ecard_driver icside_driver = { .probe = icside_probe, .remove = __devexit_p(icside_remove), .shutdown = icside_shutdown, .id_table = icside_ids, .drv = { .name = "icside", }, }; static int __init icside_init(void) { return ecard_register_driver(&icside_driver); } static void __exit icside_exit(void) { ecard_remove_driver(&icside_driver); } MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ICS IDE driver"); module_init(icside_init); module_exit(icside_exit);
gpl-2.0
Jovy23/M919_Kernel
arch/arm/mach-kirkwood/mpp.c
5044
1064
/* * arch/arm/mach-kirkwood/mpp.c * * MPP functions for Marvell Kirkwood SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <plat/mpp.h> #include "common.h" #include "mpp.h" static unsigned int __init kirkwood_variant(void) { u32 dev, rev; kirkwood_pcie_id(&dev, &rev); if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) || (dev == MV88F6282_DEV_ID)) return MPP_F6281_MASK; if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) return MPP_F6192_MASK; if (dev == MV88F6180_DEV_ID) return MPP_F6180_MASK; printk(KERN_ERR "MPP setup: unknown kirkwood variant " "(dev %#x rev %#x)\n", dev, rev); return 0; } void __init kirkwood_mpp_conf(unsigned int *mpp_list) { orion_mpp_conf(mpp_list, kirkwood_variant(), MPP_MAX, DEV_BUS_VIRT_BASE); }
gpl-2.0
Beeko/android_kernel_samsung_d2
drivers/net/ethernet/cisco/enic/enic_res.c
7860
9562
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> #include "wq_enet_desc.h" #include "rq_enet_desc.h" #include "cq_enet_desc.h" #include "vnic_resource.h" #include "vnic_enet.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_nic.h" #include "vnic_rss.h" #include "enic_res.h" #include "enic.h" int enic_get_vnic_config(struct enic *enic) { struct vnic_enet_config *c = &enic->config; int err; err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); if (err) { dev_err(enic_get_dev(enic), "Error getting MAC addr, %d\n", err); return err; } #define GET_CONFIG(m) \ do { \ err = vnic_dev_spec(enic->vdev, \ offsetof(struct vnic_enet_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ dev_err(enic_get_dev(enic), \ "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0) GET_CONFIG(flags); GET_CONFIG(wq_desc_count); GET_CONFIG(rq_desc_count); GET_CONFIG(mtu); GET_CONFIG(intr_timer_type); GET_CONFIG(intr_mode); GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count)); c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->rq_desc_count = min_t(u32, ENIC_MAX_RQ_DESCS, max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ if (c->mtu == 0) c->mtu = 1500; c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu)); c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec " "loopback tag 0x%04x\n", ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", ENIC_SETTING(enic, TSO) ? "yes" : "no", ENIC_SETTING(enic, LRO) ? "yes" : "no", ENIC_SETTING(enic, RSS) ? "yes" : "no", c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c->intr_mode == VENET_INTR_MODE_ANY ? "any" : "unknown", c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : "unknown", c->intr_timer_usec, c->loop_tag); return 0; } int enic_add_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); return err; } int enic_del_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; int err; err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); if (err) dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); return err; } int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { u64 a0, a1; u32 nic_cfg; int wait = 1000; vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); a0 = nic_cfg; a1 = 0; return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); } int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) { u64 a0 = (u64)key_pa, a1 = len; int wait = 1000; return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); } int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len) { u64 a0 = (u64)cpu_pa, a1 = len; int wait = 1000; return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); } void enic_free_vnic_resources(struct enic *enic) { unsigned int i; for (i = 0; i < enic->wq_count; i++) vnic_wq_free(&enic->wq[i]); for (i = 0; i < enic->rq_count; i++) vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) vnic_intr_free(&enic->intr[i]); } void enic_get_res_counts(struct enic *enic) { enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); enic->intr_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL); dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count); } void enic_init_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int mask_on_assertion; unsigned int interrupt_offset; unsigned int error_interrupt_enable; unsigned int error_interrupt_offset; unsigned int cq_index; unsigned int i; intr_mode = vnic_dev_get_intr_mode(enic->vdev); /* Init RQ/WQ resources. * * RQ[0 - n-1] point to CQ[0 - n-1] * WQ[0 - m-1] point to CQ[n - n+m-1] * * Error interrupt is not enabled for MSI. */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; error_interrupt_offset = enic->intr_count - 2; break; default: error_interrupt_enable = 0; error_interrupt_offset = 0; break; } for (i = 0; i < enic->rq_count; i++) { cq_index = i; vnic_rq_init(&enic->rq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; vnic_wq_init(&enic->wq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } /* Init CQ resources * * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: interrupt_offset = i; break; default: interrupt_offset = 0; break; } vnic_cq_init(&enic->cq[i], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 1 /* interrupt_enable */, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, interrupt_offset, 0 /* cq_message_addr */); } /* Init INTR resources * * mask_on_assertion is not used for INTx due to the level- * triggered nature of INTx */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX: mask_on_assertion = 1; break; default: mask_on_assertion = 0; break; } for (i = 0; i < enic->intr_count; i++) { vnic_intr_init(&enic->intr[i], enic->config.intr_timer_usec, enic->config.intr_timer_type, mask_on_assertion); } } int enic_alloc_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int i; int err; intr_mode = vnic_dev_get_intr_mode(enic->vdev); dev_info(enic_get_dev(enic), "vNIC resources used: " "wq %d rq %d cq %d intr %d intr mode %s\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count, intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); /* Allocate queue resources */ for (i = 0; i < enic->wq_count; i++) { err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->rq_count; i++) { err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count) err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.rq_desc_count, sizeof(struct cq_enet_rq_desc)); else err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.wq_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->intr_count; i++) { err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); if (err) goto err_out_cleanup; } /* Hook remaining resource */ enic->legacy_pba = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { dev_err(enic_get_dev(enic), "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } return 0; err_out_cleanup: enic_free_vnic_resources(enic); return err; }
gpl-2.0
n3ocort3x/endeavoru_2.17
drivers/net/stmmac/stmmac_timer.c
9140
3490
/******************************************************************************* STMMAC external timer support. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/kernel.h> #include <linux/etherdevice.h> #include "stmmac_timer.h" static void stmmac_timer_handler(void *data) { struct net_device *dev = (struct net_device *)data; stmmac_schedule(dev); } #define STMMAC_TIMER_MSG(timer, freq) \ printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq); #if defined(CONFIG_STMMAC_RTC_TIMER) #include <linux/rtc.h> static struct rtc_device *stmmac_rtc; static rtc_task_t stmmac_task; static void stmmac_rtc_start(unsigned int new_freq) { rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq); rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1); } static void stmmac_rtc_stop(void) { rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); } int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) { stmmac_task.private_data = dev; stmmac_task.func = stmmac_timer_handler; stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); if (stmmac_rtc == NULL) { pr_err("open rtc device failed\n"); return -ENODEV; } rtc_irq_register(stmmac_rtc, &stmmac_task); /* Periodic mode is not supported */ if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { pr_err("set periodic failed\n"); rtc_irq_unregister(stmmac_rtc, &stmmac_task); rtc_class_close(stmmac_rtc); return -1; } STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq); tm->timer_start = stmmac_rtc_start; tm->timer_stop = stmmac_rtc_stop; return 0; } int stmmac_close_ext_timer(void) { rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); rtc_irq_unregister(stmmac_rtc, &stmmac_task); rtc_class_close(stmmac_rtc); return 0; } #elif defined(CONFIG_STMMAC_TMU_TIMER) #include <linux/clk.h> #define TMU_CHANNEL "tmu2_clk" static struct clk *timer_clock; static void stmmac_tmu_start(unsigned int new_freq) { clk_set_rate(timer_clock, new_freq); clk_enable(timer_clock); } static void stmmac_tmu_stop(void) { clk_disable(timer_clock); } int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) { timer_clock = clk_get(NULL, TMU_CHANNEL); if (timer_clock == NULL) return -1; if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) { timer_clock = NULL; return -1; } STMMAC_TIMER_MSG("TMU2", tm->freq); tm->timer_start = stmmac_tmu_start; tm->timer_stop = stmmac_tmu_stop; return 0; } int stmmac_close_ext_timer(void) { clk_disable(timer_clock); tmu2_unregister_user(); clk_put(timer_clock); return 0; } #endif
gpl-2.0
kimjh-sane/imx6sane-linux-3.14.28
arch/s390/crypto/sha512_s390.c
9652
4157
/* * Cryptographic API. * * s390 implementation of the SHA512 and SHA38 Secure Hash Algorithm. * * Copyright IBM Corp. 2007 * Author(s): Jan Glauber (jang@de.ibm.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include "sha.h" #include "crypt_s390.h" static int sha512_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL; *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL; *(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL; *(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL; *(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL; *(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL; *(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL; *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; return 0; } static int sha512_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *octx = out; octx->count[0] = sctx->count; octx->count[1] = 0; memcpy(octx->state, sctx->state, sizeof(octx->state)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); return 0; } static int sha512_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha512_state *ictx = in; if (unlikely(ictx->count[1])) return -ERANGE; sctx->count = ictx->count[0]; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->func = KIMD_SHA_512; return 0; } static struct shash_alg sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name= "sha512-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; MODULE_ALIAS("sha512"); static int sha384_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL; *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL; *(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL; *(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL; *(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL; *(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL; *(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL; *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL; ctx->count = 0; ctx->func = KIMD_SHA_512; return 0; } static struct shash_alg sha384_alg = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name= "sha384-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha_ctx), .cra_module = THIS_MODULE, } }; MODULE_ALIAS("sha384"); static int __init init(void) { int ret; if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA)) return -EOPNOTSUPP; if ((ret = crypto_register_shash(&sha512_alg)) < 0) goto out; if ((ret = crypto_register_shash(&sha384_alg)) < 0) crypto_unregister_shash(&sha512_alg); out: return ret; } static void __exit fini(void) { crypto_unregister_shash(&sha512_alg); crypto_unregister_shash(&sha384_alg); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 and SHA-384 Secure Hash Algorithm");
gpl-2.0
ChronoMonochrome/android_kernel_lenovo_msm8916
arch/cris/mm/ioremap.c
12468
2367
/* * arch/cris/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * Needed for memory-mapped I/O devices mapped outside our normal DRAM * window (that is, all memory-mapped I/O devices). * * (C) Copyright 1995 1996 Linus Torvalds * CRIS-port by Axis Communications AB */ #include <linux/vmalloc.h> #include <linux/io.h> #include <asm/pgalloc.h> #include <arch/memmap.h> /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = (void __iomem *)area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, prot)) { vfree((void __force *)addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); } void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { return __ioremap_prot(phys_addr, size, __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | _PAGE_KERNEL | flags)); } /** * ioremap_nocache - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * Must be freed with iounmap. */ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0); } void iounmap(volatile void __iomem *addr) { if (addr > high_memory) return vfree((void *) (PAGE_MASK & (unsigned long) addr)); }
gpl-2.0
HandyMenny/android_kernel_sony_u8500
arch/cris/mm/ioremap.c
12468
2367
/* * arch/cris/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * Needed for memory-mapped I/O devices mapped outside our normal DRAM * window (that is, all memory-mapped I/O devices). * * (C) Copyright 1995 1996 Linus Torvalds * CRIS-port by Axis Communications AB */ #include <linux/vmalloc.h> #include <linux/io.h> #include <asm/pgalloc.h> #include <arch/memmap.h> /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = (void __iomem *)area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, prot)) { vfree((void __force *)addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); } void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { return __ioremap_prot(phys_addr, size, __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | _PAGE_KERNEL | flags)); } /** * ioremap_nocache - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * Must be freed with iounmap. */ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0); } void iounmap(volatile void __iomem *addr) { if (addr > high_memory) return vfree((void *) (PAGE_MASK & (unsigned long) addr)); }
gpl-2.0
Root-Box/kernel_samsung_smdk4412
mm/vmalloc.c
181
67315
/* * linux/mm/vmalloc.c * * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next); } while (pud++, addr = next, addr != end); } static void vunmap_page_range(unsigned long addr, unsigned long end) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); } static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(*pte))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pud_t *pud; unsigned long next; pud = pud_alloc(&init_mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /* * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and * will have pfns corresponding to the "pages" array. * * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] */ static int vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { pgd_t *pgd; unsigned long next; unsigned long addr = start; int err = 0; int nr = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); if (err) return err; } while (pgd++, addr = next, addr != end); return nr; } static int vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { int ret; ret = vmap_page_range_noflush(start, end, prot, pages); flush_cache_vmap(start, end); return ret; } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)x; if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } /* * Walk a vmap address to the struct page it maps. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); } } } return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define VM_LAZY_FREE 0x01 #define VM_LAZY_FREEING 0x02 #define VM_VM_AREA 0x04 struct vmap_area { unsigned long va_start; unsigned long va_end; unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ struct list_head purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; static DEFINE_SPINLOCK(vmap_area_lock); static LIST_HEAD(vmap_area_list); static struct rb_root vmap_area_root = RB_ROOT; /* The vmap cache globals are protected by vmap_area_lock */ static struct rb_node *free_vmap_cache; static unsigned long cached_hole_size; static unsigned long cached_vstart; static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole; static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node; while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr > va->va_start) n = n->rb_right; else return va; } return NULL; } static void __insert_vmap_area(struct vmap_area *va) { struct rb_node **p = &vmap_area_root.rb_node; struct rb_node *parent = NULL; struct rb_node *tmp; while (*p) { struct vmap_area *tmp_va; parent = *p; tmp_va = rb_entry(parent, struct vmap_area, rb_node); if (va->va_start < tmp_va->va_end) p = &(*p)->rb_left; else if (va->va_end > tmp_va->va_start) p = &(*p)->rb_right; else BUG(); } rb_link_node(&va->rb_node, parent, p); rb_insert_color(&va->rb_node, &vmap_area_root); /* address-sort this list so it is usable like the vmlist */ tmp = rb_prev(&va->rb_node); if (tmp) { struct vmap_area *prev; prev = rb_entry(tmp, struct vmap_area, rb_node); list_add_rcu(&va->list, &prev->list); } else list_add_rcu(&va->list, &vmap_area_list); } static void purge_vmap_area_lazy(void); /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask) { struct vmap_area *va; struct rb_node *n; unsigned long addr; int purged = 0; struct vmap_area *first; BUG_ON(!size); BUG_ON(size & ~PAGE_MASK); BUG_ON(!is_power_of_2(align)); va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); retry: spin_lock(&vmap_area_lock); /* * Invalidate cache if we have more permissive parameters. * cached_hole_size notes the largest hole noticed _below_ * the vmap_area cached in free_vmap_cache: if size fits * into that hole, we want to scan from vstart to reuse * the hole instead of allocating above free_vmap_cache. * Note that __free_vmap_area may update free_vmap_cache * without updating cached_hole_size or cached_align. */ if (!free_vmap_cache || size < cached_hole_size || vstart < cached_vstart || align < cached_align) { nocache: cached_hole_size = 0; free_vmap_cache = NULL; } /* record if we encounter less permissive parameters */ cached_vstart = vstart; cached_align = align; /* find starting point for our search */ if (free_vmap_cache) { first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); addr = ALIGN(first->va_end, align); if (addr < vstart) goto nocache; if (addr + size - 1 < addr) goto overflow; } else { addr = ALIGN(vstart, align); if (addr + size - 1 < addr) goto overflow; n = vmap_area_root.rb_node; first = NULL; while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end >= addr) { first = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } if (!first) goto found; } /* from the starting point, walk areas until a suitable hole is found */ while (addr + size > first->va_start && addr + size <= vend) { if (addr + cached_hole_size < first->va_start) cached_hole_size = first->va_start - addr; addr = ALIGN(first->va_end, align); if (addr + size - 1 < addr) goto overflow; n = rb_next(&first->rb_node); if (n) first = rb_entry(n, struct vmap_area, rb_node); else goto found; } found: if (addr + size > vend) goto overflow; va->va_start = addr; va->va_end = addr + size; va->flags = 0; __insert_vmap_area(va); free_vmap_cache = &va->rb_node; spin_unlock(&vmap_area_lock); BUG_ON(va->va_start & (align-1)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); return va; overflow: spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = 1; goto retry; } if (printk_ratelimit()) printk(KERN_WARNING "vmap allocation for size %lu failed: " "use vmalloc=<size> to increase size.\n", size); kfree(va); return ERR_PTR(-EBUSY); } static void rcu_free_va(struct rcu_head *head) { struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); kfree(va); } static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); if (free_vmap_cache) { if (va->va_end < cached_vstart) { free_vmap_cache = NULL; } else { struct vmap_area *cache; cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); if (va->va_start <= cache->va_start) { free_vmap_cache = rb_prev(&va->rb_node); /* * We don't try to update cached_hole_size or * cached_align, but it won't go very wrong. */ } } } rb_erase(&va->rb_node, &vmap_area_root); RB_CLEAR_NODE(&va->rb_node); list_del_rcu(&va->list); /* * Track the highest possible candidate for pcpu area * allocation. Areas outside of vmalloc area can be returned * here too, consider only end addresses which fall inside * vmalloc area proper. */ if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); call_rcu(&va->rcu_head, rcu_free_va); } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { spin_lock(&vmap_area_lock); __free_vmap_area(va); spin_unlock(&vmap_area_lock); } /* * Clear the pagetable entries of a given vmap_area */ static void unmap_vmap_area(struct vmap_area *va) { vunmap_page_range(va->va_start, va->va_end); } static void vmap_debug_free_range(unsigned long start, unsigned long end) { /* * Unmap page tables and force a TLB flush immediately if * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free * bugs similarly to those in linear kernel virtual address * space after a page has been freed. * * All the lazy freeing logic is still retained, in order to * minimise intrusiveness of this debugging feature. * * This is going to be *slow* (linear kernel virtual address * debugging doesn't do a broadcast TLB flush so it is a lot * faster). */ #ifdef CONFIG_DEBUG_PAGEALLOC vunmap_page_range(start, end); flush_tlb_kernel_range(start, end); #endif } /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * called before a call to iounmap() if the caller wants vm_area_struct's * immediately freed. */ void set_iounmap_nonlazy(void) { atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); } /* * Purges all lazily-freed vmap areas. * * If sync is 0 then don't purge if there is already a purge in progress. * If force_flush is 1, then flush kernel TLBs between *start and *end even * if we found no lazy vmap areas to unmap (callers can use this to optimise * their own TLB flushing). * Returns with *start = min(*start, lowest purged address) * *end = max(*end, highest purged address) */ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { static DEFINE_SPINLOCK(purge_lock); LIST_HEAD(valist); struct vmap_area *va; struct vmap_area *n_va; int nr = 0; /* * If sync is 0 but force_flush is 1, we'll go sync anyway but callers * should not expect such behaviour. This just simplifies locking for * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { if (!spin_trylock(&purge_lock)) return; } else spin_lock(&purge_lock); if (sync) purge_fragmented_blocks_allcpus(); rcu_read_lock(); list_for_each_entry_rcu(va, &vmap_area_list, list) { if (va->flags & VM_LAZY_FREE) { if (va->va_start < *start) *start = va->va_start; if (va->va_end > *end) *end = va->va_end; nr += (va->va_end - va->va_start) >> PAGE_SHIFT; list_add_tail(&va->purge_list, &valist); va->flags |= VM_LAZY_FREEING; va->flags &= ~VM_LAZY_FREE; } } rcu_read_unlock(); if (nr) atomic_sub(nr, &vmap_lazy_nr); if (nr || force_flush) flush_tlb_kernel_range(*start, *end); if (nr) { spin_lock(&vmap_area_lock); list_for_each_entry_safe(va, n_va, &valist, purge_list) __free_vmap_area(va); spin_unlock(&vmap_area_lock); } spin_unlock(&purge_lock); } /* * Kick off a purge of the outstanding lazy areas. Don't bother if somebody * is already purging. */ static void try_purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 0, 0); } /* * Kick off a purge of the outstanding lazy areas. */ static void purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 1, 0); } /* * Free a vmap area, caller ensuring that the area has been unmapped * and flush_cache_vunmap had been called for the correct range * previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { va->flags |= VM_LAZY_FREE; atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) try_purge_vmap_area_lazy(); } /* * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been * called for the correct range previously. */ static void free_unmap_vmap_area_noflush(struct vmap_area *va) { unmap_vmap_area(va); free_vmap_area_noflush(va); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); free_unmap_vmap_area_noflush(va); } static struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr); spin_unlock(&vmap_area_lock); return va; } static void free_unmap_vmap_area_addr(unsigned long addr) { struct vmap_area *va; va = find_vmap_area(addr); BUG_ON(!va); free_unmap_vmap_area(va); } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) static bool vmap_initialized __read_mostly = false; struct vmap_block_queue { spinlock_t lock; struct list_head free; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; struct vmap_block_queue *vbq; unsigned long free, dirty; DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block * in the free path. Could get rid of this if we change the API to return a * "cookie" from alloc, to be passed to free. But no big deal yet. */ static DEFINE_SPINLOCK(vmap_block_tree_lock); static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static struct vmap_block *new_vmap_block(gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; int node, err; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } err = radix_tree_preload(gfp_mask); if (unlikely(err)) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } spin_lock_init(&vb->lock); vb->va = va; vb->free = VMAP_BBMAP_BITS; vb->dirty = 0; bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); INIT_LIST_HEAD(&vb->free_list); vb_idx = addr_to_vb_idx(va->va_start); spin_lock(&vmap_block_tree_lock); err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); spin_unlock(&vmap_block_tree_lock); BUG_ON(err); radix_tree_preload_end(); vbq = &get_cpu_var(vmap_block_queue); vb->vbq = vbq; spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); put_cpu_var(vmap_block_queue); return vb; } static void rcu_free_vb(struct rcu_head *head) { struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); kfree(vb); } static void free_vmap_block(struct vmap_block *vb) { struct vmap_block *tmp; unsigned long vb_idx; vb_idx = addr_to_vb_idx(vb->va->va_start); spin_lock(&vmap_block_tree_lock); tmp = radix_tree_delete(&vmap_block_tree, vb_idx); spin_unlock(&vmap_block_tree_lock); BUG_ON(tmp != vb); free_vmap_area_noflush(vb->va); call_rcu(&vb->rcu_head, rcu_free_vb); } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block *n_vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) continue; spin_lock(&vb->lock); if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { vb->free = 0; /* prevent further allocs after releasing lock */ vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); spin_unlock(&vb->lock); list_add_tail(&vb->purge, &purge); } else spin_unlock(&vb->lock); } rcu_read_unlock(); list_for_each_entry_safe(vb, n_vb, &purge, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks_thiscpu(void) { purge_fragmented_blocks(smp_processor_id()); } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; unsigned long addr = 0; unsigned int order; int purge = 0; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); order = get_order(size); again: rcu_read_lock(); vbq = &get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; spin_lock(&vb->lock); if (vb->free < 1UL << order) goto next; i = bitmap_find_free_region(vb->alloc_map, VMAP_BBMAP_BITS, order); if (i < 0) { if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { /* fragmented and no outstanding allocations */ BUG_ON(vb->dirty != VMAP_BBMAP_BITS); purge = 1; } goto next; } addr = vb->va->va_start + (i << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(vb->va->va_start)); vb->free -= 1UL << order; if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; next: spin_unlock(&vb->lock); } if (purge) purge_fragmented_blocks_thiscpu(); put_cpu_var(vmap_block_queue); rcu_read_unlock(); if (!addr) { vb = new_vmap_block(gfp_mask); if (IS_ERR(vb)) return vb; goto again; } return (void *)addr; } static void vb_free(const void *addr, unsigned long size) { unsigned long offset; unsigned long vb_idx; unsigned int order; struct vmap_block *vb; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); order = get_order(size); offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); vb_idx = addr_to_vb_idx((unsigned long)addr); rcu_read_lock(); vb = radix_tree_lookup(&vmap_block_tree, vb_idx); rcu_read_unlock(); BUG_ON(!vb); vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); spin_lock(&vb->lock); BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); vb->dirty += 1UL << order; if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { unsigned long start = ULONG_MAX, end = 0; int cpu; int flush = 0; if (unlikely(!vmap_initialized)) return; for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; spin_lock(&vb->lock); i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); while (i < VMAP_BBMAP_BITS) { unsigned long s, e; int j; j = find_next_zero_bit(vb->dirty_map, VMAP_BBMAP_BITS, i); s = vb->va->va_start + (i << PAGE_SHIFT); e = vb->va->va_start + (j << PAGE_SHIFT); flush = 1; if (s < start) start = s; if (e > end) end = e; i = j; i = find_next_bit(vb->dirty_map, VMAP_BBMAP_BITS, i); } spin_unlock(&vb->lock); } rcu_read_unlock(); } __purge_vmap_area_lazy(&start, &end, 1, flush); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = count << PAGE_SHIFT; unsigned long addr = (unsigned long)mem; BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(addr & (PAGE_SIZE-1)); debug_check_no_locks_freed(mem, size); vmap_debug_free_range(addr, addr+size); if (likely(count <= VMAP_MAX_ALLOC)) vb_free(mem, size); else free_unmap_vmap_area_addr(addr); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * @prot: memory protection to use. PAGE_KERNEL for regular RAM * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { unsigned long size = count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_page_range(addr, addr + size, prot, pages) < 0) { vm_unmap_ram(mem, count); return NULL; } return mem; } EXPORT_SYMBOL(vm_map_ram); /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { static size_t vm_init_off __initdata; unsigned long addr; addr = ALIGN(VMALLOC_START + vm_init_off, align); vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; vm->addr = (void *)addr; vm->next = vmlist; vmlist = vm; } void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; for_each_possible_cpu(i) { struct vmap_block_queue *vbq; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); } /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); va->flags = VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; __insert_vmap_area(va); } vmap_area_pcpu_hole = VMALLOC_END; vmap_initialized = true; } /** * map_kernel_range_noflush - map kernel VM area with the specified pages * @addr: start of the VM area to map * @size: size of the VM area to map * @prot: page protection flags to use * @pages: pages to map * * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vmap() on to-be-mapped areas * before calling this function. * * RETURNS: * The number of pages mapped on success, -errno on failure. */ int map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) { return vmap_page_range_noflush(addr, addr + size, prot, pages); } /** * unmap_kernel_range_noflush - unmap kernel VM area * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size * specify should have been allocated using get_vm_area() and its * friends. * * NOTE: * This function does NOT do any cache flushing. The caller is * responsible for calling flush_cache_vunmap() on to-be-mapped areas * before calling this function and flush_tlb_kernel_range() after. */ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { vunmap_page_range(addr, addr + size); } EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); /** * unmap_kernel_range - unmap kernel VM area and flush cache and TLB * @addr: start of the VM area to unmap * @size: size of the VM area to unmap * * Similar to unmap_kernel_range_noflush() but flushes vcache before * the unmapping and tlb after. */ void unmap_kernel_range(unsigned long addr, unsigned long size) { unsigned long end = addr + size; flush_cache_vunmap(addr, end); vunmap_page_range(addr, end); flush_tlb_kernel_range(addr, end); } int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) { unsigned long addr = (unsigned long)area->addr; unsigned long end = addr + area->size - PAGE_SIZE; int err; err = vmap_page_range(addr, end, prot, *pages); if (err > 0) { *pages += err; err = 0; } return err; } EXPORT_SYMBOL_GPL(map_vm_area); /*** Old vmalloc interfaces ***/ DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = va->va_end - va->va_start; vm->caller = caller; #ifdef CONFIG_DEBUG_VMALLOC vm->pid = current->pid; vm->task_name = current->comm; #endif va->vm = vm; va->flags |= VM_VM_AREA; } static void insert_vmalloc_vmlist(struct vm_struct *vm) { struct vm_struct *tmp, **p; vm->flags &= ~VM_UNLIST; write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) break; } vm->next = *p; *p = vm; write_unlock(&vmlist_lock); } static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, void *caller) { setup_vmalloc_vm(vm, va, flags, caller); insert_vmalloc_vmlist(vm); } static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, void *caller) { static struct vmap_area *va; struct vm_struct *area; BUG_ON(in_interrupt()); if (flags & VM_IOREMAP) { int bit = fls(size); if (bit > IOREMAP_MAX_ORDER) bit = IOREMAP_MAX_ORDER; else if (bit < PAGE_SHIFT) bit = PAGE_SHIFT; align = 1ul << bit; } size = PAGE_ALIGN(size); if (unlikely(!size)) return NULL; area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; /* * We always allocate a guard page. */ size += PAGE_SIZE; va = alloc_vmap_area(size, align, start, end, node, gfp_mask); if (IS_ERR(va)) { kfree(area); return NULL; } /* * When this function is called from __vmalloc_node_range, * we do not add vm_struct to vmlist here to avoid * accessing uninitialized members of vm_struct such as * pages and nr_pages fields. They will be set later. * To distinguish it from others, we use a VM_UNLIST flag. */ if (flags & VM_UNLIST) setup_vmalloc_vm(area, va, flags, caller); else insert_vmalloc_vm(area, va, flags, caller); return area; } struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(__get_vm_area); struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, void *caller) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, void *caller) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, caller); } static struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) return va->vm; return NULL; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->vm; if (!(vm->flags & VM_UNLIST)) { struct vm_struct *tmp, **p; /* * remove from list and disallow access to * this vm_struct before unmap. (address range * confliction is maintained by vmap.) */ write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) ; *p = tmp->next; write_unlock(&vmlist_lock); } vmap_debug_free_range(va->va_start, va->va_end); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; return vm; } return NULL; } static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; if (!addr) return; if ((PAGE_SIZE-1) & (unsigned long)addr) { WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); return; } area = remove_vm_area(addr); if (unlikely(!area)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } debug_check_no_locks_freed(addr, area->size); debug_check_no_obj_freed(addr, area->size); if (deallocate_pages) { int i; for (i = 0; i < area->nr_pages; i++) { struct page *page = area->pages[i]; BUG_ON(!page); __free_page(page); } if (area->flags & VM_VPAGES) vfree(area->pages); else kfree(area->pages); } kfree(area); return; } /** * vfree - release memory allocated by vmalloc() * @addr: memory base address * * Free the virtually continuous memory area starting at @addr, as * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * * Must not be called in interrupt context. */ void vfree(const void *addr) { BUG_ON(in_interrupt()); kmemleak_free(addr); __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { BUG_ON(in_interrupt()); might_sleep(); __vunmap(addr, 0); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual * space. */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; might_sleep(); if (count > totalram_pages) return NULL; area = get_vm_area_caller((count << PAGE_SHIFT), flags, __builtin_return_address(0)); if (!area) return NULL; if (map_vm_area(area, prot, &pages)) { vunmap(area->addr); return NULL; } return area->addr; } EXPORT_SYMBOL(vmap); static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) { const int order = 0; struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, PAGE_KERNEL, node, caller); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, nested_gfp, node); } area->pages = pages; area->caller = caller; #ifdef CONFIG_DEBUG_VMALLOC area->pid = current->pid; area->task_name = current->comm; #endif if (!area->pages) { remove_vm_area(area->addr); kfree(area); return NULL; } for (i = 0; i < area->nr_pages; i++) { struct page *page; gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; if (node < 0) page = alloc_page(tmp_mask); else page = alloc_pages_node(node, tmp_mask, order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ area->nr_pages = i; goto fail; } area->pages[i] = page; } if (map_vm_area(area, prot, &pages)) goto fail; return area->addr; fail: warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, " "allocated %ld of %ld bytes\n", (area->nr_pages*PAGE_SIZE), area->size); vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) { struct vm_struct *area; void *addr; unsigned long real_size = size; size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > totalram_pages) return NULL; area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, start, end, node, gfp_mask, caller); if (!area) return NULL; addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); if (!addr) return NULL; /* * In this function, newly allocated vm_struct is not added * to vmlist at __get_vm_area_node(). so, it is added here. */ insert_vmalloc_vmlist(area); /* * A ref_count = 3 is needed because the vm_struct and vmap_area * structures allocated in the __get_vm_area_node() function contain * references to the virtual address of the vmalloc'ed block. */ kmemleak_alloc(addr, real_size, 3, gfp_mask); return addr; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @node: node to use for allocation or -1 * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, prot, node, caller); } void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { return __vmalloc_node(size, 1, gfp_mask, prot, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); static inline void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) { return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, __builtin_return_address(0)); } /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc(unsigned long size) { return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); } EXPORT_SYMBOL(vmalloc); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vzalloc(unsigned long size) { return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. */ void *vmalloc_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, SHMLBA, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_user); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc_node() instead. */ void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node_flags(size, node, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); } EXPORT_SYMBOL(vzalloc_node); #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ void *vmalloc_exec(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -1, __builtin_return_address(0)); } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL #else #define GFP_VMALLOC32 GFP_KERNEL #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. */ void *vmalloc_32(unsigned long size) { return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. */ void *vmalloc_32_user(unsigned long size) { struct vm_struct *area; void *ret; ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, -1, __builtin_return_address(0)); if (ret) { area = find_vm_area(ret); area->flags |= VM_USERMAP; } return ret; } EXPORT_SYMBOL(vmalloc_32_user); /* * small helper routine , copy contents to buf from addr. * If the page is not present, fill zero. */ static int aligned_vread(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p, KM_USER0); memcpy(buf, map + offset, length); kunmap_atomic(map, KM_USER0); } else memset(buf, 0, length); addr += length; buf += length; copied += length; count -= length; } return copied; } static int aligned_vwrite(char *buf, char *addr, unsigned long count) { struct page *p; int copied = 0; while (count) { unsigned long offset, length; offset = (unsigned long)addr & ~PAGE_MASK; length = PAGE_SIZE - offset; if (length > count) length = count; p = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add * overhead of vmalloc()/vfree() calles for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ if (p) { /* * we can expect USER0 is not used (see vread/vwrite's * function description) */ void *map = kmap_atomic(p, KM_USER0); memcpy(map + offset, buf, length); kunmap_atomic(map, KM_USER0); } addr += length; buf += length; copied += length; count -= length; } return copied; } /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be increased. * (same number to @count). Returns 0 if [addr...addr+count) doesn't * includes any intersect with alive vmalloc area. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. * @buf should be kernel's buffer. Because this function uses KM_USER0, * the caller should guarantee KM_USER0 is not used. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. * */ long vread(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; char *vaddr, *buf_start = buf; unsigned long buflen = count; unsigned long n; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; read_lock(&vmlist_lock); for (tmp = vmlist; count && tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; if (addr >= vaddr + tmp->size - PAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) goto finished; *buf = '\0'; buf++; addr++; count--; } n = vaddr + tmp->size - PAGE_SIZE - addr; if (n > count) n = count; if (!(tmp->flags & VM_IOREMAP)) aligned_vread(buf, addr, n); else /* IOREMAP area is treated as memory hole */ memset(buf, 0, n); buf += n; addr += n; count -= n; } finished: read_unlock(&vmlist_lock); if (buf == buf_start) return 0; /* zero-fill memory holes */ if (buf != buf_start + buflen) memset(buf, 0, buflen - (buf - buf_start)); return buflen; } /** * vwrite() - write vmalloc area in a safe way. * @buf: buffer for source data * @addr: vm address. * @count: number of bytes to be read. * * Returns # of bytes which addr and buf should be incresed. * (same number to @count). * If [addr...addr+count) doesn't includes any intersect with valid * vmalloc area, returns 0. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from a buffer to the given addr. If specified range of * [addr...addr+count) includes some valid address, data is copied from * proper area of @buf. If there are memory holes, no copy to hole. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. * @buf should be kernel's buffer. Because this function uses KM_USER0, * the caller should guarantee KM_USER0 is not used. * * Note: In usual ops, vwrite() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any informaion, as /dev/kmem. */ long vwrite(char *buf, char *addr, unsigned long count) { struct vm_struct *tmp; char *vaddr; unsigned long n, buflen; int copied = 0; /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; buflen = count; read_lock(&vmlist_lock); for (tmp = vmlist; count && tmp; tmp = tmp->next) { vaddr = (char *) tmp->addr; if (addr >= vaddr + tmp->size - PAGE_SIZE) continue; while (addr < vaddr) { if (count == 0) goto finished; buf++; addr++; count--; } n = vaddr + tmp->size - PAGE_SIZE - addr; if (n > count) n = count; if (!(tmp->flags & VM_IOREMAP)) { aligned_vwrite(buf, addr, n); copied++; } buf += n; addr += n; count -= n; } finished: read_unlock(&vmlist_lock); if (!copied) return 0; return buflen; } /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { struct vm_struct *area; unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; if ((PAGE_SIZE-1) & (unsigned long)addr) return -EINVAL; area = find_vm_area(addr); if (!area) return -EINVAL; if (!(area->flags & VM_USERMAP)) return -EINVAL; if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) return -EINVAL; addr += pgoff << PAGE_SHIFT; do { struct page *page = vmalloc_to_page(addr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; addr += PAGE_SIZE; usize -= PAGE_SIZE; } while (usize > 0); /* Prevent "things" like memory migration? VM_flags need a cleanup... */ vma->vm_flags |= VM_RESERVED; return 0; } EXPORT_SYMBOL(remap_vmalloc_range); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ void __attribute__((weak)) vmalloc_sync_all(void) { } static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) { /* apply_to_page_range() does all the hard work. */ return 0; } /** * alloc_vm_area - allocate a range of kernel address space * @size: size of the area * * Returns: NULL on failure, vm_struct on success * * This function reserves a range of kernel address space, and * allocates pagetables to map that range. No actual mappings * are created. If the kernel address space is not shared * between processes, it syncs the pagetable across all * processes. */ struct vm_struct *alloc_vm_area(size_t size) { struct vm_struct *area; area = get_vm_area_caller(size, VM_IOREMAP, __builtin_return_address(0)); if (area == NULL) return NULL; /* * This ensures that page tables are constructed for this region * of kernel virtual address space and mapped into init_mm. */ if (apply_to_page_range(&init_mm, (unsigned long)area->addr, area->size, f, NULL)) { free_vm_area(area); return NULL; } /* * If the allocated address space is passed to a hypercall * before being used then we cannot rely on a page fault to * trigger an update of the page tables. So sync all the page * tables here. */ vmalloc_sync_all(); return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; } /** * pvm_find_next_prev - find the next and prev vmap_area surrounding @end * @end: target address * @pnext: out arg for the next vmap_area * @pprev: out arg for the previous vmap_area * * Returns: %true if either or both of next and prev are found, * %false if no vmap_area exists * * Find vmap_areas end addresses of which enclose @end. ie. if not * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. */ static bool pvm_find_next_prev(unsigned long end, struct vmap_area **pnext, struct vmap_area **pprev) { struct rb_node *n = vmap_area_root.rb_node; struct vmap_area *va = NULL; while (n) { va = rb_entry(n, struct vmap_area, rb_node); if (end < va->va_end) n = n->rb_left; else if (end > va->va_end) n = n->rb_right; else break; } if (!va) return false; if (va->va_end > end) { *pnext = va; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } else { *pprev = va; *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); } return true; } /** * pvm_determine_end - find the highest aligned address between two vmap_areas * @pnext: in/out arg for the next vmap_area * @pprev: in/out arg for the previous vmap_area * @align: alignment * * Returns: determined end address * * Find the highest aligned address between *@pnext and *@pprev below * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned * down address is between the end addresses of the two vmap_areas. * * Please note that the address returned by this function may fall * inside *@pnext vmap_area. The caller is responsible for checking * that. */ static unsigned long pvm_determine_end(struct vmap_area **pnext, struct vmap_area **pprev, unsigned long align) { const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (*pnext) addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); else addr = vmalloc_end; while (*pprev && (*pprev)->va_end > addr) { *pnext = *pprev; *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); } return addr; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans areas from the end looking for * matching slot. While scanning, if any of the areas overlaps with * existing vmap_area, the base address is pulled down to fit the * area. Scanning is repeated till all the areas fit and then all * necessary data structres are inserted and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *prev, *next; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, end, last_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = 0; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; if (area2 == area) continue; BUG_ON(start2 >= start && start2 < end); BUG_ON(end2 <= end && end2 > start); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); if (!vas || !vms) goto err_free; for (area = 0; area < nr_vms; area++) { vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { base = vmalloc_end - last_end; goto found; } base = pvm_determine_end(&next, &prev, align) - end; while (true) { BUG_ON(next && next->va_end <= base + end); BUG_ON(prev && prev->va_end > base + end); /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) { spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = true; goto retry; } goto err_free; } /* * If next overlaps, move base downwards so that it's * right below next and then recheck. */ if (next && next->va_start < base + end) { base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * If prev overlaps, shift down next and prev and move * base so that it's right below new next and then * recheck. */ if (prev && prev->va_end > base + start) { next = prev; prev = node_to_va(rb_prev(&next->rb_node)); base = pvm_determine_end(&next, &prev, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; pvm_find_next_prev(base + end, &next, &prev); } found: /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { struct vmap_area *va = vas[area]; va->va_start = base + offsets[area]; va->va_end = va->va_start + sizes[area]; __insert_vmap_area(va); } vmap_area_pcpu_hole = base + offsets[last_area]; spin_unlock(&vmap_area_lock); /* insert all vm's */ for (area = 0; area < nr_vms; area++) insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); kfree(vas); return vms; err_free: for (area = 0; area < nr_vms; area++) { if (vas) kfree(vas[area]); if (vms) kfree(vms[area]); } kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmlist_lock) { loff_t n = *pos; struct vm_struct *v; read_lock(&vmlist_lock); v = vmlist; while (n > 0 && v) { n--; v = v->next; } if (!n) return v; return NULL; } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { struct vm_struct *v = p; ++*pos; return v->next; } static void s_stop(struct seq_file *m, void *p) __releases(&vmlist_lock) { read_unlock(&vmlist_lock); } static void show_numa_info(struct seq_file *m, struct vm_struct *v) { if (NUMA_BUILD) { unsigned int nr, *counters = m->private; if (!counters) return; memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr++) counters[page_to_nid(v->pages[nr])]++; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } } static int s_show(struct seq_file *m, void *p) { struct vm_struct *v = p; seq_printf(m, "0x%p-0x%p %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); if (v->flags & VM_IOREMAP) seq_printf(m, " ioremap"); if (v->flags & VM_ALLOC) seq_printf(m, " vmalloc"); if (v->flags & VM_MAP) seq_printf(m, " vmap"); if (v->flags & VM_USERMAP) seq_printf(m, " user"); if (v->flags & VM_VPAGES) seq_printf(m, " vpages"); #ifdef CONFIG_DEBUG_VMALLOC if (v->pid) seq_printf(m, " pid=%d", v->pid); if (v->task_name) seq_printf(m, " task name=%s", v->task_name); #endif show_numa_info(m, v); seq_putc(m, '\n'); return 0; } static const struct seq_operations vmalloc_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int vmalloc_open(struct inode *inode, struct file *file) { unsigned int *ptr = NULL; int ret; if (NUMA_BUILD) { ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); if (ptr == NULL) return -ENOMEM; } ret = seq_open(file, &vmalloc_op); if (!ret) { struct seq_file *m = file->private_data; m->private = ptr; } else kfree(ptr); return ret; } static const struct file_operations proc_vmalloc_operations = { .open = vmalloc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init proc_vmalloc_init(void) { proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); return 0; } module_init(proc_vmalloc_init); #endif
gpl-2.0
ska/linux-fsl
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
181
6778
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/fb.h> #include <subdev/bios.h> struct nvc0_fb_priv { struct nouveau_fb base; struct page *r100c10_page; dma_addr_t r100c10; }; /* 0 = unsupported * 1 = non-compressed * 3 = compressed */ static const u8 types[256] = { 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 }; static bool nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) { u8 memtype = (tile_flags & 0x0000ff00) >> 8; return likely((types[memtype] == 1)); } static int nvc0_fb_vram_init(struct nouveau_fb *pfb) { struct nouveau_bios *bios = nouveau_bios(pfb); const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ u32 parts = nv_rd32(pfb, 0x022438); u32 pmask = nv_rd32(pfb, 0x022554); u32 bsize = nv_rd32(pfb, 0x10f20c); u32 offset, length; bool uniform = true; int ret, part; nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800)); nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask); pfb->ram.type = nouveau_fb_bios_memtype(bios); pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1; /* read amount of vram attached to each memory controller */ for (part = 0; part < parts; part++) { if (!(pmask & (1 << part))) { u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000)); if (psize != bsize) { if (psize < bsize) bsize = psize; uniform = false; } nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize); pfb->ram.size += (u64)psize << 20; } } /* if all controllers have the same amount attached, there's no holes */ if (uniform) { offset = rsvd_head; length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; return nouveau_mm_init(&pfb->vram, offset, length, 1); } /* otherwise, address lowest common amount from 0GiB */ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); if (ret) return ret; /* and the rest starting from (8GiB + common_size) */ offset = (0x0200000000ULL >> 12) + (bsize << 8); length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail; ret = nouveau_mm_init(&pfb->vram, offset, length, 0); if (ret) { nouveau_mm_fini(&pfb->vram); return ret; } return 0; } static int nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct nouveau_mm *mm = &pfb->vram; struct nouveau_mm_node *r; struct nouveau_mem *mem; int type = (memtype & 0x0ff); int back = (memtype & 0x800); int ret; size >>= 12; align >>= 12; ncmin >>= 12; if (!ncmin) ncmin = size; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; INIT_LIST_HEAD(&mem->regions); mem->memtype = type; mem->size = size; mutex_lock(&pfb->base.mutex); do { if (back) ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); else ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); if (ret) { mutex_unlock(&pfb->base.mutex); pfb->ram.put(pfb, &mem); return ret; } list_add_tail(&r->rl_entry, &mem->regions); size -= r->length; } while (size); mutex_unlock(&pfb->base.mutex); r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); mem->offset = (u64)r->offset << 12; *pmem = mem; return 0; } static int nvc0_fb_init(struct nouveau_object *object) { struct nvc0_fb_priv *priv = (void *)object; int ret; ret = nouveau_fb_init(&priv->base); if (ret) return ret; nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); return 0; } static void nvc0_fb_dtor(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nvc0_fb_priv *priv = (void *)object; if (priv->r100c10_page) { pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(priv->r100c10_page); } nouveau_fb_destroy(&priv->base); } static int nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nvc0_fb_priv *priv; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.memtype_valid = nvc0_fb_memtype_valid; priv->base.ram.init = nvc0_fb_vram_init; priv->base.ram.get = nvc0_fb_vram_new; priv->base.ram.put = nv50_fb_vram_del; priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!priv->r100c10_page) return -ENOMEM; priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(device->pdev, priv->r100c10)) return -EFAULT; return nouveau_fb_preinit(&priv->base); } struct nouveau_oclass nvc0_fb_oclass = { .handle = NV_SUBDEV(FB, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_fb_ctor, .dtor = nvc0_fb_dtor, .init = nvc0_fb_init, .fini = _nouveau_fb_fini, }, };
gpl-2.0
jogger0703/linux
arch/arm/common/locomo.c
437
24141
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(struct irq_desc *desc) { struct locomo *lchip = irq_desc_get_chip_data(desc); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { unsigned int irq; /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler_and_data(lchip->irq, NULL, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
shakalaca/ASUS_ZenFone_ZE600KL
kernel/mm/page_io.c
949
9468
/* * linux/mm/page_io.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, * Asynchronous swapping added 30.12.95. Stephen Tweedie * Removed race in async swapping. 14.4.1996. Bruno Haible * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman */ #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/swapops.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/frontswap.h> #include <linux/aio.h> #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <asm/pgtable.h> /* * We don't need to see swap errors more than once every 1 second to know * that a problem is occurring. */ #define SWAP_ERROR_LOG_RATE_MS 1000 static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { bio->bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; bio->bi_vcnt = 1; bio->bi_size = PAGE_SIZE; bio->bi_end_io = end_io; } return bio; } void end_swap_bio_write(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; static unsigned long swap_error_rs_time; if (!uptodate) { SetPageError(page); /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid rotate_reclaimable_page() */ set_page_dirty(page); if (printk_timed_ratelimit(&swap_error_rs_time, SWAP_ERROR_LOG_RATE_MS)) printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); ClearPageReclaim(page); } end_page_writeback(page); bio_put(bio); } void end_swap_bio_read(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); ClearPageUptodate(page); printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); goto out; } SetPageUptodate(page); /* * There is no guarantee that the page is in swap cache - the software * suspend code (at least) uses end_swap_bio_read() against a non- * swapcache page. So we must check PG_swapcache before proceeding with * this optimization. */ if (likely(PageSwapCache(page))) { struct swap_info_struct *sis; sis = page_swap_info(page); if (sis->flags & SWP_BLKDEV) { /* * The swap subsystem performs lazy swap slot freeing, * expecting that the page will be swapped out again. * So we can avoid an unnecessary write if the page * isn't redirtied. * This is good for real swap storage because we can * reduce unnecessary I/O and enhance wear-leveling * if an SSD is used as the as swap device. * But if in-memory swap device (eg zram) is used, * this causes a duplicated copy between uncompressed * data in VM-owned memory and compressed data in * zram-owned memory. So let's free zram-owned memory * and make the VM-owned decompressed page *dirty*, * so the page should be swapped out somewhere again if * we again wish to reclaim it. */ struct gendisk *disk = sis->bdev->bd_disk; if (disk->fops->swap_slot_free_notify) { swp_entry_t entry; unsigned long offset; entry.val = page_private(page); offset = swp_offset(entry); SetPageDirty(page); disk->fops->swap_slot_free_notify(sis->bdev, offset); } } } out: unlock_page(page); bio_put(bio); } int generic_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; unsigned blocks_per_page; unsigned long page_no; unsigned blkbits; sector_t probe_block; sector_t last_block; sector_t lowest_block = -1; sector_t highest_block = 0; int nr_extents = 0; int ret; blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; /* * Map all the blocks into the extent list. This code doesn't try * to be very smart. */ probe_block = 0; page_no = 0; last_block = i_size_read(inode) >> blkbits; while ((probe_block + blocks_per_page) <= last_block && page_no < sis->max) { unsigned block_in_page; sector_t first_block; first_block = bmap(inode, probe_block); if (first_block == 0) goto bad_bmap; /* * It must be PAGE_SIZE aligned on-disk */ if (first_block & (blocks_per_page - 1)) { probe_block++; goto reprobe; } for (block_in_page = 1; block_in_page < blocks_per_page; block_in_page++) { sector_t block; block = bmap(inode, probe_block + block_in_page); if (block == 0) goto bad_bmap; if (block != first_block + block_in_page) { /* Discontiguity */ probe_block++; goto reprobe; } } first_block >>= (PAGE_SHIFT - blkbits); if (page_no) { /* exclude the header page */ if (first_block < lowest_block) lowest_block = first_block; if (first_block > highest_block) highest_block = first_block; } /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ ret = add_swap_extent(sis, page_no, 1, first_block); if (ret < 0) goto out; nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } ret = nr_extents; *span = 1 + highest_block - lowest_block; if (page_no == 0) page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; sis->highest_bit = page_no - 1; out: return ret; bad_bmap: printk(KERN_ERR "swapon: swapfile has holes\n"); ret = -EINVAL; goto out; } /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; if (try_to_free_swap(page)) { unlock_page(page); goto out; } if (frontswap_store(page) == 0) { set_page_writeback(page); unlock_page(page); end_page_writeback(page); goto out; } ret = __swap_writepage(page, wbc, end_swap_bio_write); out: return ret; } int __swap_writepage(struct page *page, struct writeback_control *wbc, void (*end_write_func)(struct bio *, int)) { struct bio *bio; int ret = 0, rw = WRITE; struct swap_info_struct *sis = page_swap_info(page); if (sis->flags & SWP_FILE) { struct kiocb kiocb; struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; struct iovec iov = { .iov_base = kmap(page), .iov_len = PAGE_SIZE, }; init_sync_kiocb(&kiocb, swap_file); kiocb.ki_pos = page_file_offset(page); kiocb.ki_left = PAGE_SIZE; kiocb.ki_nbytes = PAGE_SIZE; set_page_writeback(page); unlock_page(page); ret = mapping->a_ops->direct_IO(KERNEL_WRITE, &kiocb, &iov, kiocb.ki_pos, 1); kunmap(page); if (ret == PAGE_SIZE) { count_vm_event(PSWPOUT); ret = 0; } else { /* * In the case of swap-over-nfs, this can be a * temporary failure if the system has limited * memory for allocating transmit buffers. * Mark the page dirty and avoid * rotate_reclaimable_page but rate-limit the * messages but do not flag PageError like * the normal direct-to-bio case as it could * be temporary. */ set_page_dirty(page); ClearPageReclaim(page); pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", page_file_offset(page)); } end_page_writeback(page); return ret; } bio = get_swap_bio(GFP_NOIO, page, end_write_func); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; } int swap_readpage(struct page *page) { struct bio *bio; int ret = 0; struct swap_info_struct *sis = page_swap_info(page); VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); if (frontswap_load(page) == 0) { SetPageUptodate(page); unlock_page(page); goto out; } if (sis->flags & SWP_FILE) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; ret = mapping->a_ops->readpage(swap_file, page); if (!ret) count_vm_event(PSWPIN); return ret; } bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; goto out; } count_vm_event(PSWPIN); submit_bio(READ, bio); out: return ret; } int swap_set_page_dirty(struct page *page) { struct swap_info_struct *sis = page_swap_info(page); if (sis->flags & SWP_FILE) { struct address_space *mapping = sis->swap_file->f_mapping; return mapping->a_ops->set_page_dirty(page); } else { return __set_page_dirty_no_writeback(page); } }
gpl-2.0
toyota86/xoompus
drivers/mtd/maps/bfin-async-flash.c
1205
6037
/* * drivers/mtd/maps/bfin-async-flash.c * * Handle the case where flash memory and ethernet mac/phy are * mapped onto the same async bank. The BF533-STAMP does this * for example. All board-specific configuration goes in your * board resources file. * * Copyright 2000 Nicolas Pitre <nico@fluxnic.net> * Copyright 2005-2008 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/blackfin.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/unaligned.h> #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) #define DRIVER_NAME "bfin-async-flash" struct async_state { struct mtd_info *mtd; struct map_info map; int enet_flash_pin; uint32_t flash_ambctl0, flash_ambctl1; uint32_t save_ambctl0, save_ambctl1; unsigned long irq_flags; #ifdef CONFIG_MTD_PARTITIONS struct mtd_partition *parts; #endif }; static void switch_to_flash(struct async_state *state) { local_irq_save(state->irq_flags); gpio_set_value(state->enet_flash_pin, 0); state->save_ambctl0 = bfin_read_EBIU_AMBCTL0(); state->save_ambctl1 = bfin_read_EBIU_AMBCTL1(); bfin_write_EBIU_AMBCTL0(state->flash_ambctl0); bfin_write_EBIU_AMBCTL1(state->flash_ambctl1); SSYNC(); } static void switch_back(struct async_state *state) { bfin_write_EBIU_AMBCTL0(state->save_ambctl0); bfin_write_EBIU_AMBCTL1(state->save_ambctl1); SSYNC(); gpio_set_value(state->enet_flash_pin, 1); local_irq_restore(state->irq_flags); } static map_word bfin_flash_read(struct map_info *map, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t word; map_word test; switch_to_flash(state); word = readw(map->virt + ofs); switch_back(state); test.x[0] = word; return test; } static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(to, map->virt + from, len); switch_back(state); } static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t d; d = d1.x[0]; switch_to_flash(state); writew(d, map->virt + ofs); SSYNC(); switch_back(state); } static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(map->virt + to, from, len); SSYNC(); switch_back(state); } #ifdef CONFIG_MTD_PARTITIONS static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; #endif static int __devinit bfin_flash_probe(struct platform_device *pdev) { int ret; struct physmap_flash_data *pdata = pdev->dev.platform_data; struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1); struct async_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; state->map.name = DRIVER_NAME; state->map.read = bfin_flash_read; state->map.copy_from = bfin_flash_copy_from; state->map.write = bfin_flash_write; state->map.copy_to = bfin_flash_copy_to; state->map.bankwidth = pdata->width; state->map.size = memory->end - memory->start + 1; state->map.virt = (void __iomem *)memory->start; state->map.phys = memory->start; state->map.map_priv_1 = (unsigned long)state; state->enet_flash_pin = platform_get_irq(pdev, 0); state->flash_ambctl0 = flash_ambctl->start; state->flash_ambctl1 = flash_ambctl->end; if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) { pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin); kfree(state); return -EBUSY; } gpio_direction_output(state->enet_flash_pin, 1); pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8); state->mtd = do_map_probe(memory->name, &state->map); if (!state->mtd) { gpio_free(state->enet_flash_pin); kfree(state); return -ENXIO; } #ifdef CONFIG_MTD_PARTITIONS ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); if (ret > 0) { pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); add_mtd_partitions(state->mtd, pdata->parts, ret); state->parts = pdata->parts; } else if (pdata->nr_parts) { pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); } else #endif { pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n"); add_mtd_device(state->mtd); } platform_set_drvdata(pdev, state); return 0; } static int __devexit bfin_flash_remove(struct platform_device *pdev) { struct async_state *state = platform_get_drvdata(pdev); gpio_free(state->enet_flash_pin); #ifdef CONFIG_MTD_PARTITIONS del_mtd_partitions(state->mtd); kfree(state->parts); #endif map_destroy(state->mtd); kfree(state); return 0; } static struct platform_driver bfin_flash_driver = { .probe = bfin_flash_probe, .remove = __devexit_p(bfin_flash_remove), .driver = { .name = DRIVER_NAME, }, }; static int __init bfin_flash_init(void) { return platform_driver_register(&bfin_flash_driver); } module_init(bfin_flash_init); static void __exit bfin_flash_exit(void) { platform_driver_unregister(&bfin_flash_driver); } module_exit(bfin_flash_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
gpl-2.0
emuikernel/WNR2000v4
git_home/linux-2.6.git/arch/m32r/platforms/m32700ut/setup.c
1461
14295
/* * linux/arch/m32r/platforms/m32700ut/setup.c * * Setup routines for Renesas M32700UT Board * * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Takeo Takahashi * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> /* * M32700 Interrupt Control Unit (Level 1) */ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; static void disable_m32700ut_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_m32700ut_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_and_ack_m32700ut(unsigned int irq) { disable_m32700ut_irq(irq); } static void end_m32700ut_irq(unsigned int irq) { enable_m32700ut_irq(irq); } static unsigned int startup_m32700ut_irq(unsigned int irq) { enable_m32700ut_irq(irq); return (0); } static void shutdown_m32700ut_irq(unsigned int irq) { unsigned long port; port = irq2port(irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_irq_type = { .typename = "M32700UT-IRQ", .startup = startup_m32700ut_irq, .shutdown = shutdown_m32700ut_irq, .enable = enable_m32700ut_irq, .disable = disable_m32700ut_irq, .ack = mask_and_ack_m32700ut, .end = end_m32700ut_irq }; /* * Interrupt Control Unit of PLD on M32700UT (Level 2) */ #define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE) #define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) typedef struct { unsigned short icucr; /* ICU Control Register */ } pld_icu_data_t; static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ]; static void disable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); // disable_m32700ut_irq(M32R_IRQ_INT1); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); // enable_m32700ut_irq(M32R_IRQ_INT1); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_and_ack_m32700ut_pld(unsigned int irq) { disable_m32700ut_pld_irq(irq); // mask_and_ack_m32700ut(M32R_IRQ_INT1); } static void end_m32700ut_pld_irq(unsigned int irq) { enable_m32700ut_pld_irq(irq); end_m32700ut_irq(M32R_IRQ_INT1); } static unsigned int startup_m32700ut_pld_irq(unsigned int irq) { enable_m32700ut_pld_irq(irq); return (0); } static void shutdown_m32700ut_pld_irq(unsigned int irq) { unsigned long port; unsigned int pldirq; pldirq = irq2pldirq(irq); // shutdown_m32700ut_irq(M32R_IRQ_INT1); port = pldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_pld_irq_type = { .typename = "M32700UT-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, .disable = disable_m32700ut_pld_irq, .ack = mask_and_ack_m32700ut_pld, .end = end_m32700ut_pld_irq }; /* * Interrupt Control Unit of PLD on M32700UT-LAN (Level 2) */ #define irq2lanpldirq(x) ((x) - M32700UT_LAN_PLD_IRQ_BASE) #define lanpldirq2port(x) (unsigned long)((int)M32700UT_LAN_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) static pld_icu_data_t lanpld_icu_data[M32700UT_NUM_LAN_PLD_IRQ]; static void disable_m32700ut_lanpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lanpldirq(irq); port = lanpldirq2port(pldirq); data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_lanpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lanpldirq(irq); port = lanpldirq2port(pldirq); data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_and_ack_m32700ut_lanpld(unsigned int irq) { disable_m32700ut_lanpld_irq(irq); } static void end_m32700ut_lanpld_irq(unsigned int irq) { enable_m32700ut_lanpld_irq(irq); end_m32700ut_irq(M32R_IRQ_INT0); } static unsigned int startup_m32700ut_lanpld_irq(unsigned int irq) { enable_m32700ut_lanpld_irq(irq); return (0); } static void shutdown_m32700ut_lanpld_irq(unsigned int irq) { unsigned long port; unsigned int pldirq; pldirq = irq2lanpldirq(irq); port = lanpldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_lanpld_irq_type = { .typename = "M32700UT-PLD-LAN-IRQ", .startup = startup_m32700ut_lanpld_irq, .shutdown = shutdown_m32700ut_lanpld_irq, .enable = enable_m32700ut_lanpld_irq, .disable = disable_m32700ut_lanpld_irq, .ack = mask_and_ack_m32700ut_lanpld, .end = end_m32700ut_lanpld_irq }; /* * Interrupt Control Unit of PLD on M32700UT-LCD (Level 2) */ #define irq2lcdpldirq(x) ((x) - M32700UT_LCD_PLD_IRQ_BASE) #define lcdpldirq2port(x) (unsigned long)((int)M32700UT_LCD_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) static pld_icu_data_t lcdpld_icu_data[M32700UT_NUM_LCD_PLD_IRQ]; static void disable_m32700ut_lcdpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lcdpldirq(irq); port = lcdpldirq2port(pldirq); data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_lcdpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lcdpldirq(irq); port = lcdpldirq2port(pldirq); data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_and_ack_m32700ut_lcdpld(unsigned int irq) { disable_m32700ut_lcdpld_irq(irq); } static void end_m32700ut_lcdpld_irq(unsigned int irq) { enable_m32700ut_lcdpld_irq(irq); end_m32700ut_irq(M32R_IRQ_INT2); } static unsigned int startup_m32700ut_lcdpld_irq(unsigned int irq) { enable_m32700ut_lcdpld_irq(irq); return (0); } static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) { unsigned long port; unsigned int pldirq; pldirq = irq2lcdpldirq(irq); port = lcdpldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_lcdpld_irq_type = { .typename = "M32700UT-PLD-LCD-IRQ", .startup = startup_m32700ut_lcdpld_irq, .shutdown = shutdown_m32700ut_lcdpld_irq, .enable = enable_m32700ut_lcdpld_irq, .disable = disable_m32700ut_lcdpld_irq, .ack = mask_and_ack_m32700ut_lcdpld, .end = end_m32700ut_lcdpld_irq }; void __init init_IRQ(void) { #if defined(CONFIG_SMC91X) /* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/ irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED; irq_desc[M32700UT_LAN_IRQ_LAN].chip = &m32700ut_lanpld_irq_type; irq_desc[M32700UT_LAN_IRQ_LAN].action = 0; irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1; /* disable nested irq */ lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */ disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN); #endif /* CONFIG_SMC91X */ /* MFT2 : system timer */ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED; irq_desc[M32R_IRQ_MFT2].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_MFT2].action = 0; irq_desc[M32R_IRQ_MFT2].depth = 1; icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_m32700ut_irq(M32R_IRQ_MFT2); /* SIO0 : receive */ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO0_R].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_SIO0_R].action = 0; irq_desc[M32R_IRQ_SIO0_R].depth = 1; icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO0_R); /* SIO0 : send */ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO0_S].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_SIO0_S].action = 0; irq_desc[M32R_IRQ_SIO0_S].depth = 1; icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO0_S); /* SIO1 : receive */ irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO1_R].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_SIO1_R].action = 0; irq_desc[M32R_IRQ_SIO1_R].depth = 1; icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO1_R); /* SIO1 : send */ irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED; irq_desc[M32R_IRQ_SIO1_S].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_SIO1_S].action = 0; irq_desc[M32R_IRQ_SIO1_S].depth = 1; icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO1_S); /* DMA1 : */ irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED; irq_desc[M32R_IRQ_DMA1].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_DMA1].action = 0; irq_desc[M32R_IRQ_DMA1].depth = 1; icu_data[M32R_IRQ_DMA1].icucr = 0; disable_m32700ut_irq(M32R_IRQ_DMA1); #ifdef CONFIG_SERIAL_M32R_PLDSIO /* INT#1: SIO0 Receive on PLD */ irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED; irq_desc[PLD_IRQ_SIO0_RCV].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_SIO0_RCV].action = 0; irq_desc[PLD_IRQ_SIO0_RCV].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03; disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV); /* INT#1: SIO0 Send on PLD */ irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED; irq_desc[PLD_IRQ_SIO0_SND].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_SIO0_SND].action = 0; irq_desc[PLD_IRQ_SIO0_SND].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03; disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND); #endif /* CONFIG_SERIAL_M32R_PLDSIO */ /* INT#1: CFC IREQ on PLD */ irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED; irq_desc[PLD_IRQ_CFIREQ].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_CFIREQ].action = 0; irq_desc[PLD_IRQ_CFIREQ].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ); /* INT#1: CFC Insert on PLD */ irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED; irq_desc[PLD_IRQ_CFC_INSERT].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_CFC_INSERT].action = 0; irq_desc[PLD_IRQ_CFC_INSERT].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT); /* INT#1: CFC Eject on PLD */ irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED; irq_desc[PLD_IRQ_CFC_EJECT].chip = &m32700ut_pld_irq_type; irq_desc[PLD_IRQ_CFC_EJECT].action = 0; irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */ pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT); /* * INT0# is used for LAN, DIO * We enable it here. */ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11; enable_m32700ut_irq(M32R_IRQ_INT0); /* * INT1# is used for UART, MMC, CF Controller in FPGA. * We enable it here. */ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11; enable_m32700ut_irq(M32R_IRQ_INT1); #if defined(CONFIG_USB) outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */ irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED; irq_desc[M32700UT_LCD_IRQ_USB_INT1].chip = &m32700ut_lcdpld_irq_type; irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0; irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1; lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */ disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1); #endif /* * INT2# is used for BAT, USB, AUDIO * We enable it here. */ icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01; enable_m32700ut_irq(M32R_IRQ_INT2); #if defined(CONFIG_VIDEO_M32R_AR) /* * INT3# is used for AR */ irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED; irq_desc[M32R_IRQ_INT3].chip = &m32700ut_irq_type; irq_desc[M32R_IRQ_INT3].action = 0; irq_desc[M32R_IRQ_INT3].depth = 1; icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_m32700ut_irq(M32R_IRQ_INT3); #endif /* CONFIG_VIDEO_M32R_AR */ } #if defined(CONFIG_SMC91X) #define LAN_IOSTART 0x300 #define LAN_IOEND 0x320 static struct resource smc91x_resources[] = { [0] = { .start = (LAN_IOSTART), .end = (LAN_IOEND), .flags = IORESOURCE_MEM, }, [1] = { .start = M32700UT_LAN_IRQ_LAN, .end = M32700UT_LAN_IRQ_LAN, .flags = IORESOURCE_IRQ, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #endif #if defined(CONFIG_FB_S1D13XXX) #include <video/s1d13xxxfb.h> #include <asm/s1d13806.h> static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL, #ifdef CONFIG_PM .platform_suspend_video = NULL, .platform_resume_video = NULL, #endif }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = 0x10600000UL, .end = 0x1073FFFFUL, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x10400000UL, .end = 0x104001FFUL, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; #endif static int __init platform_init(void) { #if defined(CONFIG_SMC91X) platform_device_register(&smc91x_device); #endif #if defined(CONFIG_FB_S1D13XXX) platform_device_register(&s1d13xxxfb_device); #endif return 0; } arch_initcall(platform_init);
gpl-2.0
Y300-0100/android_kernel_samsung_SM-G355HN_XEC
drivers/mmc/host/sdhci-pci.c
1973
36732
/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface * * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * Thanks to the following companies for their support: * * - JMicron (hardware and technical support) */ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/mmc/host.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/pm_runtime.h> #include <linux/mmc/sdhci-pci-data.h> #include "sdhci.h" /* * PCI device IDs */ #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a #define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 #define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 #define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 /* * PCI registers */ #define PCI_SDHCI_IFPIO 0x00 #define PCI_SDHCI_IFDMA 0x01 #define PCI_SDHCI_IFVENDOR 0x02 #define PCI_SLOT_INFO 0x40 /* 8 bits */ #define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) #define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 #define MAX_SLOTS 8 struct sdhci_pci_chip; struct sdhci_pci_slot; struct sdhci_pci_fixes { unsigned int quirks; unsigned int quirks2; bool allow_runtime_pm; int (*probe) (struct sdhci_pci_chip *); int (*probe_slot) (struct sdhci_pci_slot *); void (*remove_slot) (struct sdhci_pci_slot *, int); int (*suspend) (struct sdhci_pci_chip *); int (*resume) (struct sdhci_pci_chip *); }; struct sdhci_pci_slot { struct sdhci_pci_chip *chip; struct sdhci_host *host; struct sdhci_pci_data *data; int pci_bar; int rst_n_gpio; int cd_gpio; int cd_irq; }; struct sdhci_pci_chip { struct pci_dev *pdev; unsigned int quirks; unsigned int quirks2; bool allow_runtime_pm; const struct sdhci_pci_fixes *fixes; int num_slots; /* Slots on controller */ struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ }; /*****************************************************************************\ * * * Hardware specific quirk handling * * * \*****************************************************************************/ static int ricoh_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; return 0; } static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->caps = ((0x21 << SDHCI_TIMEOUT_CLK_SHIFT) & SDHCI_TIMEOUT_CLK_MASK) | ((0x21 << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_BASE_MASK) | SDHCI_TIMEOUT_CLK_UNIT | SDHCI_CAN_VDD_330 | SDHCI_CAN_DO_HISPD | SDHCI_CAN_DO_SDMA; return 0; } static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) { /* Apply a delay to allow controller to settle */ /* Otherwise it becomes confused if card state changed during suspend */ msleep(500); return 0; } static const struct sdhci_pci_fixes sdhci_ricoh = { .probe = ricoh_probe, .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_FORCE_DMA | SDHCI_QUIRK_CLOCK_BEFORE_RESET, }; static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { .probe_slot = ricoh_mmc_probe_slot, .resume = ricoh_mmc_resume, .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_CLOCK_BEFORE_RESET | SDHCI_QUIRK_NO_CARD_NO_RESET | SDHCI_QUIRK_MISSING_CAPS }; static const struct sdhci_pci_fixes sdhci_ene_712 = { .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_BROKEN_DMA, }; static const struct sdhci_pci_fixes sdhci_ene_714 = { .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | SDHCI_QUIRK_BROKEN_DMA, }; static const struct sdhci_pci_fixes sdhci_cafe = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; } /* * ADMA operation is disabled for Moorestown platform due to * hardware bugs. */ static int mrst_hc_probe(struct sdhci_pci_chip *chip) { /* * slots number is fixed here for MRST as SDIO3/5 are never used and * have hardware bugs. */ chip->num_slots = 1; return 0; } static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; } #ifdef CONFIG_PM_RUNTIME static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) { struct sdhci_pci_slot *slot = dev_id; struct sdhci_host *host = slot->host; mmc_detect_change(host->mmc, msecs_to_jiffies(200)); return IRQ_HANDLED; } static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) { int err, irq, gpio = slot->cd_gpio; slot->cd_gpio = -EINVAL; slot->cd_irq = -EINVAL; if (!gpio_is_valid(gpio)) return; err = gpio_request(gpio, "sd_cd"); if (err < 0) goto out; err = gpio_direction_input(gpio); if (err < 0) goto out_free; irq = gpio_to_irq(gpio); if (irq < 0) goto out_free; err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "sd_cd", slot); if (err) goto out_free; slot->cd_gpio = gpio; slot->cd_irq = irq; return; out_free: gpio_free(gpio); out: dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); } static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) { if (slot->cd_irq >= 0) free_irq(slot->cd_irq, slot); if (gpio_is_valid(slot->cd_gpio)) gpio_free(slot->cd_gpio); } #else static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) { } static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) { } #endif static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_HC_ERASE_SZ; return 0; } static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; return 0; } static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, .probe_slot = mrst_hc_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, .probe = mrst_hc_probe, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .allow_runtime_pm = true, .probe_slot = mfd_sdio_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, .probe_slot = mfd_emmc_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { .quirks = SDHCI_QUIRK_BROKEN_ADMA, .probe_slot = pch_hc_probe_slot, }; static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; return 0; } static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; return 0; } static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { .allow_runtime_pm = true, .probe_slot = byt_emmc_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .allow_runtime_pm = true, .probe_slot = byt_sdio_probe_slot, }; static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { }; /* O2Micro extra registers */ #define O2_SD_LOCK_WP 0xD3 #define O2_SD_MULTI_VCC3V 0xEE #define O2_SD_CLKREQ 0xEC #define O2_SD_CAPS 0xE0 #define O2_SD_ADMA1 0xE2 #define O2_SD_ADMA2 0xE7 #define O2_SD_INF_MOD 0xF1 static int o2_probe(struct sdhci_pci_chip *chip) { int ret; u8 scratch; switch (chip->pdev->device) { case PCI_DEVICE_ID_O2_8220: case PCI_DEVICE_ID_O2_8221: case PCI_DEVICE_ID_O2_8320: case PCI_DEVICE_ID_O2_8321: /* This extra setup is required due to broken ADMA. */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) return ret; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); /* Set Multi 3 to VCC3V# */ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); /* Disable CLK_REQ# support after media DET */ ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch); if (ret) return ret; scratch |= 0x20; pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); /* Choose capabilities, enable SDMA. We have to write 0x01 * to the capabilities register first to unlock it. */ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); if (ret) return ret; scratch |= 0x01; pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); /* Disable ADMA1/2 */ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); /* Disable the infinite transfer mode */ ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch); if (ret) return ret; scratch |= 0x08; pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) return ret; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); } return 0; } static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; int ret; ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); if (ret) return ret; /* * Turn PMOS on [bit 0], set over current detection to 2.4 V * [bit 1:2] and enable over current debouncing [bit 6]. */ if (on) scratch |= 0x47; else scratch &= ~0x47; ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); if (ret) return ret; return 0; } static int jmicron_probe(struct sdhci_pci_chip *chip) { int ret; u16 mmcdev = 0; if (chip->pdev->revision == 0) { chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE | SDHCI_QUIRK_RESET_AFTER_REQUEST | SDHCI_QUIRK_BROKEN_SMALL_PIO; } /* * JMicron chips can have two interfaces to the same hardware * in order to work around limitations in Microsoft's driver. * We need to make sure we only bind to one of them. * * This code assumes two things: * * 1. The PCI code adds subfunctions in order. * * 2. The MMC interface has a lower subfunction number * than the SD interface. */ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; if (mmcdev) { struct pci_dev *sd_dev; sd_dev = NULL; while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, mmcdev, sd_dev)) != NULL) { if ((PCI_SLOT(chip->pdev->devfn) == PCI_SLOT(sd_dev->devfn)) && (chip->pdev->bus == sd_dev->bus)) break; } if (sd_dev) { pci_dev_put(sd_dev); dev_info(&chip->pdev->dev, "Refusing to bind to " "secondary interface.\n"); return -ENODEV; } } /* * JMicron chips need a bit of a nudge to enable the power * output pins. */ ret = jmicron_pmos(chip, 1); if (ret) { dev_err(&chip->pdev->dev, "Failure enabling card power\n"); return ret; } /* quirk for unsable RO-detection on JM388 chips */ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; return 0; } static void jmicron_enable_mmc(struct sdhci_host *host, int on) { u8 scratch; scratch = readb(host->ioaddr + 0xC0); if (on) scratch |= 0x01; else scratch &= ~0x01; writeb(scratch, host->ioaddr + 0xC0); } static int jmicron_probe_slot(struct sdhci_pci_slot *slot) { if (slot->chip->pdev->revision == 0) { u16 version; version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); version = (version & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; /* * Older versions of the chip have lots of nasty glitches * in the ADMA engine. It's best just to avoid it * completely. */ if (version < 0xAC) slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; } /* JM388 MMC doesn't support 1.8V while SD supports it */ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_165_195; /* allow 1.8V */ slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ } /* * The secondary interface requires a bit set to get the * interrupts. */ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) jmicron_enable_mmc(slot->host, 1); slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; return 0; } static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) { if (dead) return; if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) jmicron_enable_mmc(slot->host, 0); } static int jmicron_suspend(struct sdhci_pci_chip *chip) { int i; if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 0); } return 0; } static int jmicron_resume(struct sdhci_pci_chip *chip) { int ret, i; if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 1); } ret = jmicron_pmos(chip, 1); if (ret) { dev_err(&chip->pdev->dev, "Failure enabling card power\n"); return ret; } return 0; } static const struct sdhci_pci_fixes sdhci_o2 = { .probe = o2_probe, }; static const struct sdhci_pci_fixes sdhci_jmicron = { .probe = jmicron_probe, .probe_slot = jmicron_probe_slot, .remove_slot = jmicron_remove_slot, .suspend = jmicron_suspend, .resume = jmicron_resume, }; /* SysKonnect CardBus2SDIO extra registers */ #define SYSKT_CTRL 0x200 #define SYSKT_RDFIFO_STAT 0x204 #define SYSKT_WRFIFO_STAT 0x208 #define SYSKT_POWER_DATA 0x20c #define SYSKT_POWER_330 0xef #define SYSKT_POWER_300 0xf8 #define SYSKT_POWER_184 0xcc #define SYSKT_POWER_CMD 0x20d #define SYSKT_POWER_START (1 << 7) #define SYSKT_POWER_STATUS 0x20e #define SYSKT_POWER_STATUS_OK (1 << 0) #define SYSKT_BOARD_REV 0x210 #define SYSKT_CHIP_REV 0x211 #define SYSKT_CONF_DATA 0x212 #define SYSKT_CONF_DATA_1V8 (1 << 2) #define SYSKT_CONF_DATA_2V5 (1 << 1) #define SYSKT_CONF_DATA_3V3 (1 << 0) static int syskt_probe(struct sdhci_pci_chip *chip) { if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { chip->pdev->class &= ~0x0000FF; chip->pdev->class |= PCI_SDHCI_IFDMA; } return 0; } static int syskt_probe_slot(struct sdhci_pci_slot *slot) { int tm, ps; u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " "board rev %d.%d, chip rev %d.%d\n", board_rev >> 4, board_rev & 0xf, chip_rev >> 4, chip_rev & 0xf); if (chip_rev >= 0x20) slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); udelay(50); tm = 10; /* Wait max 1 ms */ do { ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); if (ps & SYSKT_POWER_STATUS_OK) break; udelay(100); } while (--tm); if (!tm) { dev_err(&slot->chip->pdev->dev, "power regulator never stabilized"); writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); return -ENODEV; } return 0; } static const struct sdhci_pci_fixes sdhci_syskt = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, .probe = syskt_probe, .probe_slot = syskt_probe_slot, }; static int via_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->revision == 0x10) chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; return 0; } static const struct sdhci_pci_fixes sdhci_via = { .probe = via_probe, }; static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_RICOH, .device = PCI_DEVICE_ID_RICOH_R5C822, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0x843, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0xe822, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_RICOH, .device = 0xe823, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_712, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD_2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_712, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB714_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_714, }, { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB714_SD_2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_ene_714, }, { .vendor = PCI_VENDOR_ID_MARVELL, .device = PCI_DEVICE_ID_MARVELL_88ALP01_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_cafe, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB388_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_JMICRON, .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_jmicron, }, { .vendor = PCI_VENDOR_ID_SYSKONNECT, .device = 0x8000, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_syskt, }, { .vendor = PCI_VENDOR_ID_VIA, .device = 0x95d0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_via, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_BYT_EMMC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_BYT_SDIO, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_BYT_SD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8220, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8221, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8320, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8321, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_o2, }, { /* Generic SD host controller */ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(pci, pci_ids); /*****************************************************************************\ * * * SDHCI core callbacks * * * \*****************************************************************************/ static int sdhci_pci_enable_dma(struct sdhci_host *host) { struct sdhci_pci_slot *slot; struct pci_dev *pdev; int ret; slot = sdhci_priv(host); pdev = slot->chip->pdev; if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && (host->flags & SDHCI_USE_SDMA)) { dev_warn(&pdev->dev, "Will use DMA mode even though HW " "doesn't fully claim to support it.\n"); } ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) return ret; pci_set_master(pdev); return 0; } static int sdhci_pci_bus_width(struct sdhci_host *host, int width) { u8 ctrl; ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); switch (width) { case MMC_BUS_WIDTH_8: ctrl |= SDHCI_CTRL_8BITBUS; ctrl &= ~SDHCI_CTRL_4BITBUS; break; case MMC_BUS_WIDTH_4: ctrl |= SDHCI_CTRL_4BITBUS; ctrl &= ~SDHCI_CTRL_8BITBUS; break; default: ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); break; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); return 0; } static void sdhci_pci_hw_reset(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); int rst_n_gpio = slot->rst_n_gpio; if (!gpio_is_valid(rst_n_gpio)) return; gpio_set_value_cansleep(rst_n_gpio, 0); /* For eMMC, minimum is 1us but give it 10us for good measure */ udelay(10); gpio_set_value_cansleep(rst_n_gpio, 1); /* For eMMC, minimum is 200us but give it 300us for good measure */ usleep_range(300, 1000); } static const struct sdhci_ops sdhci_pci_ops = { .enable_dma = sdhci_pci_enable_dma, .platform_bus_width = sdhci_pci_bus_width, .hw_reset = sdhci_pci_hw_reset, }; /*****************************************************************************\ * * * Suspend/resume * * * \*****************************************************************************/ #ifdef CONFIG_PM static int sdhci_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; mmc_pm_flag_t slot_pm_flags; mmc_pm_flag_t pm_flags = 0; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_suspend_host(slot->host); if (ret) goto err_pci_suspend; slot_pm_flags = slot->host->mmc->pm_flags; if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) sdhci_enable_irq_wakeups(slot->host); pm_flags |= slot_pm_flags; } if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip); if (ret) goto err_pci_suspend; } pci_save_state(pdev); if (pm_flags & MMC_PM_KEEP_POWER) { if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { pci_pme_active(pdev, true); pci_enable_wake(pdev, PCI_D3hot, 1); } pci_set_power_state(pdev, PCI_D3hot); } else { pci_enable_wake(pdev, PCI_D3hot, 0); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); } return 0; err_pci_suspend: while (--i >= 0) sdhci_resume_host(chip->slots[i]->host); return ret; } static int sdhci_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) return ret; if (chip->fixes && chip->fixes->resume) { ret = chip->fixes->resume(chip); if (ret) return ret; } for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_resume_host(slot->host); if (ret) return ret; } return 0; } #else /* CONFIG_PM */ #define sdhci_pci_suspend NULL #define sdhci_pci_resume NULL #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME static int sdhci_pci_runtime_suspend(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_runtime_suspend_host(slot->host); if (ret) goto err_pci_runtime_suspend; } if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip); if (ret) goto err_pci_runtime_suspend; } return 0; err_pci_runtime_suspend: while (--i >= 0) sdhci_runtime_resume_host(chip->slots[i]->host); return ret; } static int sdhci_pci_runtime_resume(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; int i, ret; chip = pci_get_drvdata(pdev); if (!chip) return 0; if (chip->fixes && chip->fixes->resume) { ret = chip->fixes->resume(chip); if (ret) return ret; } for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; ret = sdhci_runtime_resume_host(slot->host); if (ret) return ret; } return 0; } static int sdhci_pci_runtime_idle(struct device *dev) { return 0; } #else #define sdhci_pci_runtime_suspend NULL #define sdhci_pci_runtime_resume NULL #define sdhci_pci_runtime_idle NULL #endif static const struct dev_pm_ops sdhci_pci_pm_ops = { .suspend = sdhci_pci_suspend, .resume = sdhci_pci_resume, .runtime_suspend = sdhci_pci_runtime_suspend, .runtime_resume = sdhci_pci_runtime_resume, .runtime_idle = sdhci_pci_runtime_idle, }; /*****************************************************************************\ * * * Device probing/removal * * * \*****************************************************************************/ static struct sdhci_pci_slot *sdhci_pci_probe_slot( struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, int slotno) { struct sdhci_pci_slot *slot; struct sdhci_host *host; int ret, bar = first_bar + slotno; if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); return ERR_PTR(-ENODEV); } if (pci_resource_len(pdev, bar) < 0x100) { dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); } if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); return ERR_PTR(-ENODEV); } if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); return ERR_PTR(-ENODEV); } host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); if (IS_ERR(host)) { dev_err(&pdev->dev, "cannot allocate host\n"); return ERR_CAST(host); } slot = sdhci_priv(host); slot->chip = chip; slot->host = host; slot->pci_bar = bar; slot->rst_n_gpio = -EINVAL; slot->cd_gpio = -EINVAL; /* Retrieve platform data if there is any */ if (*sdhci_pci_get_data) slot->data = sdhci_pci_get_data(pdev, slotno); if (slot->data) { if (slot->data->setup) { ret = slot->data->setup(slot->data); if (ret) { dev_err(&pdev->dev, "platform setup failed\n"); goto free; } } slot->rst_n_gpio = slot->data->rst_n_gpio; slot->cd_gpio = slot->data->cd_gpio; } host->hw_name = "PCI"; host->ops = &sdhci_pci_ops; host->quirks = chip->quirks; host->quirks2 = chip->quirks2; host->irq = pdev->irq; ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); if (ret) { dev_err(&pdev->dev, "cannot request region\n"); goto cleanup; } host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto release; } if (chip->fixes && chip->fixes->probe_slot) { ret = chip->fixes->probe_slot(slot); if (ret) goto unmap; } if (gpio_is_valid(slot->rst_n_gpio)) { if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { gpio_direction_output(slot->rst_n_gpio, 1); slot->host->mmc->caps |= MMC_CAP_HW_RESET; } else { dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); slot->rst_n_gpio = -EINVAL; } } host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; host->mmc->slotno = slotno; host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; ret = sdhci_add_host(host); if (ret) goto remove; sdhci_pci_add_own_cd(slot); return slot; remove: if (gpio_is_valid(slot->rst_n_gpio)) gpio_free(slot->rst_n_gpio); if (chip->fixes && chip->fixes->remove_slot) chip->fixes->remove_slot(slot, 0); unmap: iounmap(host->ioaddr); release: pci_release_region(pdev, bar); cleanup: if (slot->data && slot->data->cleanup) slot->data->cleanup(slot->data); free: sdhci_free_host(host); return ERR_PTR(ret); } static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) { int dead; u32 scratch; sdhci_pci_remove_own_cd(slot); dead = 0; scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); if (scratch == (u32)-1) dead = 1; sdhci_remove_host(slot->host, dead); if (gpio_is_valid(slot->rst_n_gpio)) gpio_free(slot->rst_n_gpio); if (slot->chip->fixes && slot->chip->fixes->remove_slot) slot->chip->fixes->remove_slot(slot, dead); if (slot->data && slot->data->cleanup) slot->data->cleanup(slot->data); pci_release_region(slot->chip->pdev, slot->pci_bar); sdhci_free_host(slot->host); } static void sdhci_pci_runtime_pm_allow(struct device *dev) { pm_runtime_put_noidle(dev); pm_runtime_allow(dev); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); pm_suspend_ignore_children(dev, 1); } static void sdhci_pci_runtime_pm_forbid(struct device *dev) { pm_runtime_forbid(dev); pm_runtime_get_noresume(dev); } static int sdhci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; u8 slots, first_bar; int ret, i; BUG_ON(pdev == NULL); BUG_ON(ent == NULL); dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); if (ret) return ret; slots = PCI_SLOT_INFO_SLOTS(slots) + 1; dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); if (slots == 0) return -ENODEV; BUG_ON(slots > MAX_SLOTS); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); if (ret) return ret; first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; if (first_bar > 5) { dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); return -ENODEV; } ret = pci_enable_device(pdev); if (ret) return ret; chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); if (!chip) { ret = -ENOMEM; goto err; } chip->pdev = pdev; chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; if (chip->fixes) { chip->quirks = chip->fixes->quirks; chip->quirks2 = chip->fixes->quirks2; chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; } chip->num_slots = slots; pci_set_drvdata(pdev, chip); if (chip->fixes && chip->fixes->probe) { ret = chip->fixes->probe(chip); if (ret) goto free; } slots = chip->num_slots; /* Quirk may have changed this */ for (i = 0; i < slots; i++) { slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); if (IS_ERR(slot)) { for (i--; i >= 0; i--) sdhci_pci_remove_slot(chip->slots[i]); ret = PTR_ERR(slot); goto free; } chip->slots[i] = slot; } if (chip->allow_runtime_pm) sdhci_pci_runtime_pm_allow(&pdev->dev); return 0; free: pci_set_drvdata(pdev, NULL); kfree(chip); err: pci_disable_device(pdev); return ret; } static void sdhci_pci_remove(struct pci_dev *pdev) { int i; struct sdhci_pci_chip *chip; chip = pci_get_drvdata(pdev); if (chip) { if (chip->allow_runtime_pm) sdhci_pci_runtime_pm_forbid(&pdev->dev); for (i = 0; i < chip->num_slots; i++) sdhci_pci_remove_slot(chip->slots[i]); pci_set_drvdata(pdev, NULL); kfree(chip); } pci_disable_device(pdev); } static struct pci_driver sdhci_driver = { .name = "sdhci-pci", .id_table = pci_ids, .probe = sdhci_pci_probe, .remove = sdhci_pci_remove, .driver = { .pm = &sdhci_pci_pm_ops }, }; module_pci_driver(sdhci_driver); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); MODULE_LICENSE("GPL");
gpl-2.0
Andrew-Gazizov/linux-beagle-npi
net/phonet/af_phonet.c
2229
13015
/* * File: af_phonet.c * * Phonet protocols family * * Copyright (C) 2008 Nokia Corporation. * * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> * Original author: Sakari Ailus <sakari.ailus@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <net/sock.h> #include <linux/if_phonet.h> #include <linux/phonet.h> #include <net/phonet/phonet.h> #include <net/phonet/pn_dev.h> /* Transport protocol registration */ static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; static struct phonet_protocol *phonet_proto_get(unsigned int protocol) { struct phonet_protocol *pp; if (protocol >= PHONET_NPROTO) return NULL; rcu_read_lock(); pp = rcu_dereference(proto_tab[protocol]); if (pp && !try_module_get(pp->prot->owner)) pp = NULL; rcu_read_unlock(); return pp; } static inline void phonet_proto_put(struct phonet_protocol *pp) { module_put(pp->prot->owner); } /* protocol family functions */ static int pn_socket_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct pn_sock *pn; struct phonet_protocol *pnp; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (protocol == 0) { /* Default protocol selection */ switch (sock->type) { case SOCK_DGRAM: protocol = PN_PROTO_PHONET; break; case SOCK_SEQPACKET: protocol = PN_PROTO_PIPE; break; default: return -EPROTONOSUPPORT; } } pnp = phonet_proto_get(protocol); if (pnp == NULL && request_module("net-pf-%d-proto-%d", PF_PHONET, protocol) == 0) pnp = phonet_proto_get(protocol); if (pnp == NULL) return -EPROTONOSUPPORT; if (sock->type != pnp->sock_type) { err = -EPROTONOSUPPORT; goto out; } sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot); if (sk == NULL) { err = -ENOMEM; goto out; } sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = pnp->ops; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; sk->sk_protocol = protocol; pn = pn_sk(sk); pn->sobject = 0; pn->dobject = 0; pn->resource = 0; sk->sk_prot->init(sk); err = 0; out: phonet_proto_put(pnp); return err; } static const struct net_proto_family phonet_proto_family = { .family = PF_PHONET, .create = pn_socket_create, .owner = THIS_MODULE, }; /* Phonet device header operations */ static int pn_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { u8 *media = skb_push(skb, 1); if (type != ETH_P_PHONET) return -1; if (!saddr) saddr = dev->dev_addr; *media = *(const u8 *)saddr; return 1; } static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const u8 *media = skb_mac_header(skb); *haddr = *media; return 1; } struct header_ops phonet_header_ops = { .create = pn_header_create, .parse = pn_header_parse, }; EXPORT_SYMBOL(phonet_header_ops); /* * Prepends an ISI header and sends a datagram. */ static int pn_send(struct sk_buff *skb, struct net_device *dev, u16 dst, u16 src, u8 res, u8 irq) { struct phonethdr *ph; int err; if (skb->len + 2 > 0xffff /* Phonet length field limit */ || skb->len + sizeof(struct phonethdr) > dev->mtu) { err = -EMSGSIZE; goto drop; } /* Broadcast sending is not implemented */ if (pn_addr(dst) == PNADDR_BROADCAST) { err = -EOPNOTSUPP; goto drop; } skb_reset_transport_header(skb); WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ skb_push(skb, sizeof(struct phonethdr)); skb_reset_network_header(skb); ph = pn_hdr(skb); ph->pn_rdev = pn_dev(dst); ph->pn_sdev = pn_dev(src); ph->pn_res = res; ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); ph->pn_robj = pn_obj(dst); ph->pn_sobj = pn_obj(src); skb->protocol = htons(ETH_P_PHONET); skb->priority = 0; skb->dev = dev; if (skb->pkt_type == PACKET_LOOPBACK) { skb_reset_mac_header(skb); skb_orphan(skb); err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0; } else { err = dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len); if (err < 0) { err = -EHOSTUNREACH; goto drop; } err = dev_queue_xmit(skb); if (unlikely(err > 0)) err = net_xmit_errno(err); } return err; drop: kfree_skb(skb); return err; } static int pn_raw_send(const void *data, int len, struct net_device *dev, u16 dst, u16 src, u8 res) { struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0) skb->pkt_type = PACKET_LOOPBACK; skb_reserve(skb, MAX_PHONET_HEADER); __skb_put(skb, len); skb_copy_to_linear_data(skb, data, len); return pn_send(skb, dev, dst, src, res, 1); } /* * Create a Phonet header for the skb and send it out. Returns * non-zero error code if failed. The skb is freed then. */ int pn_skb_send(struct sock *sk, struct sk_buff *skb, const struct sockaddr_pn *target) { struct net *net = sock_net(sk); struct net_device *dev; struct pn_sock *pn = pn_sk(sk); int err; u16 src, dst; u8 daddr, saddr, res; src = pn->sobject; if (target != NULL) { dst = pn_sockaddr_get_object(target); res = pn_sockaddr_get_resource(target); } else { dst = pn->dobject; res = pn->resource; } daddr = pn_addr(dst); err = -EHOSTUNREACH; if (sk->sk_bound_dev_if) dev = dev_get_by_index(net, sk->sk_bound_dev_if); else if (phonet_address_lookup(net, daddr) == 0) { dev = phonet_device_get(net); skb->pkt_type = PACKET_LOOPBACK; } else if (dst == 0) { /* Resource routing (small race until phonet_rcv()) */ struct sock *sk = pn_find_sock_by_res(net, res); if (sk) { sock_put(sk); dev = phonet_device_get(net); skb->pkt_type = PACKET_LOOPBACK; } else dev = phonet_route_output(net, daddr); } else dev = phonet_route_output(net, daddr); if (!dev || !(dev->flags & IFF_UP)) goto drop; saddr = phonet_address_get(dev, daddr); if (saddr == PN_NO_ADDR) goto drop; if (!pn_addr(src)) src = pn_object(saddr, pn_obj(src)); err = pn_send(skb, dev, dst, src, res, 0); dev_put(dev); return err; drop: kfree_skb(skb); if (dev) dev_put(dev); return err; } EXPORT_SYMBOL(pn_skb_send); /* Do not send an error message in response to an error message */ static inline int can_respond(struct sk_buff *skb) { const struct phonethdr *ph; const struct phonetmsg *pm; u8 submsg_id; if (!pskb_may_pull(skb, 3)) return 0; ph = pn_hdr(skb); if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) return 0; if (ph->pn_res == PN_COMMGR) /* indications */ return 0; ph = pn_hdr(skb); /* re-acquires the pointer */ pm = pn_msg(skb); if (pm->pn_msg_id != PN_COMMON_MESSAGE) return 1; submsg_id = (ph->pn_res == PN_PREFIX) ? pm->pn_e_submsg_id : pm->pn_submsg_id; if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP && pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP) return 1; return 0; } static int send_obj_unreachable(struct sk_buff *rskb) { const struct phonethdr *oph = pn_hdr(rskb); const struct phonetmsg *opm = pn_msg(rskb); struct phonetmsg resp; memset(&resp, 0, sizeof(resp)); resp.pn_trans_id = opm->pn_trans_id; resp.pn_msg_id = PN_COMMON_MESSAGE; if (oph->pn_res == PN_PREFIX) { resp.pn_e_res_id = opm->pn_e_res_id; resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP; resp.pn_e_orig_msg_id = opm->pn_msg_id; resp.pn_e_status = 0; } else { resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP; resp.pn_orig_msg_id = opm->pn_msg_id; resp.pn_status = 0; } return pn_raw_send(&resp, sizeof(resp), rskb->dev, pn_object(oph->pn_sdev, oph->pn_sobj), pn_object(oph->pn_rdev, oph->pn_robj), oph->pn_res); } static int send_reset_indications(struct sk_buff *rskb) { struct phonethdr *oph = pn_hdr(rskb); static const u8 data[4] = { 0x00 /* trans ID */, 0x10 /* subscribe msg */, 0x00 /* subscription count */, 0x00 /* dummy */ }; return pn_raw_send(data, sizeof(data), rskb->dev, pn_object(oph->pn_sdev, 0x00), pn_object(oph->pn_rdev, oph->pn_robj), PN_COMMGR); } /* packet type functions */ /* * Stuff received packets to associated sockets. * On error, returns non-zero and releases the skb. */ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { struct net *net = dev_net(dev); struct phonethdr *ph; struct sockaddr_pn sa; u16 len; /* check we have at least a full Phonet header */ if (!pskb_pull(skb, sizeof(struct phonethdr))) goto out; /* check that the advertised length is correct */ ph = pn_hdr(skb); len = get_unaligned_be16(&ph->pn_length); if (len < 2) goto out; len -= 2; if ((len > skb->len) || pskb_trim(skb, len)) goto out; skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); /* check if this is broadcasted */ if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) { pn_deliver_sock_broadcast(net, skb); goto out; } /* resource routing */ if (pn_sockaddr_get_object(&sa) == 0) { struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource); if (sk) return sk_receive_skb(sk, skb, 0); } /* check if we are the destination */ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { /* Phonet packet input */ struct sock *sk = pn_find_sock_by_sa(net, &sa); if (sk) return sk_receive_skb(sk, skb, 0); if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) goto out; /* Race between address deletion and loopback */ else { /* Phonet packet routing */ struct net_device *out_dev; out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); if (!out_dev) { LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", pn_sockaddr_get_addr(&sa)); goto out; } __skb_push(skb, sizeof(struct phonethdr)); skb->dev = out_dev; if (out_dev == dev) { LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", pn_sockaddr_get_addr(&sa), dev->name); goto out_dev; } /* Some drivers (e.g. TUN) do not allocate HW header space */ if (skb_cow_head(skb, out_dev->hard_header_len)) goto out_dev; if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, skb->len) < 0) goto out_dev; dev_queue_xmit(skb); dev_put(out_dev); return NET_RX_SUCCESS; out_dev: dev_put(out_dev); } out: kfree_skb(skb); return NET_RX_DROP; } static struct packet_type phonet_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_PHONET), .func = phonet_rcv, }; static DEFINE_MUTEX(proto_tab_lock); int __init_or_module phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp) { int err = 0; if (protocol >= PHONET_NPROTO) return -EINVAL; err = proto_register(pp->prot, 1); if (err) return err; mutex_lock(&proto_tab_lock); if (proto_tab[protocol]) err = -EBUSY; else rcu_assign_pointer(proto_tab[protocol], pp); mutex_unlock(&proto_tab_lock); return err; } EXPORT_SYMBOL(phonet_proto_register); void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) { mutex_lock(&proto_tab_lock); BUG_ON(proto_tab[protocol] != pp); rcu_assign_pointer(proto_tab[protocol], NULL); mutex_unlock(&proto_tab_lock); synchronize_rcu(); proto_unregister(pp->prot); } EXPORT_SYMBOL(phonet_proto_unregister); /* Module registration */ static int __init phonet_init(void) { int err; err = phonet_device_init(); if (err) return err; pn_sock_init(); err = sock_register(&phonet_proto_family); if (err) { printk(KERN_ALERT "phonet protocol family initialization failed\n"); goto err_sock; } dev_add_pack(&phonet_packet_type); phonet_sysctl_init(); err = isi_register(); if (err) goto err; return 0; err: phonet_sysctl_exit(); sock_unregister(PF_PHONET); dev_remove_pack(&phonet_packet_type); err_sock: phonet_device_exit(); return err; } static void __exit phonet_exit(void) { isi_unregister(); phonet_sysctl_exit(); sock_unregister(PF_PHONET); dev_remove_pack(&phonet_packet_type); phonet_device_exit(); } module_init(phonet_init); module_exit(phonet_exit); MODULE_DESCRIPTION("Phonet protocol stack for Linux"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PHONET);
gpl-2.0
FEDEVEL/imx6tinyrex-linux-3.0.35
drivers/mmc/host/sdhci-tegra.c
2229
6569
/* * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <mach/gpio.h> #include <mach/sdhci.h> #include "sdhci.h" #include "sdhci-pltfm.h" static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) { u32 val; if (unlikely(reg == SDHCI_PRESENT_STATE)) { /* Use wp_gpio here instead? */ val = readl(host->ioaddr + reg); return val | SDHCI_WRITE_PROTECT; } return readl(host->ioaddr + reg); } static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) { /* Erratum: Version register is invalid in HW. */ return SDHCI_SPEC_200; } return readw(host->ioaddr + reg); } static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) { /* Seems like we're getting spurious timeout and crc errors, so * disable signalling of them. In case of real errors software * timers should take care of eventually detecting them. */ if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); writel(val, host->ioaddr + reg); if (unlikely(reg == SDHCI_INT_ENABLE)) { /* Erratum: Must enable block gap interrupt detection */ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); if (val & SDHCI_INT_CARD_INT) gap_ctrl |= 0x8; else gap_ctrl &= ~0x8; writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); } } static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) { struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc)); struct tegra_sdhci_platform_data *plat; plat = pdev->dev.platform_data; if (!gpio_is_valid(plat->wp_gpio)) return -1; return gpio_get_value(plat->wp_gpio); } static irqreturn_t carddetect_irq(int irq, void *data) { struct sdhci_host *sdhost = (struct sdhci_host *)data; tasklet_schedule(&sdhost->card_tasklet); return IRQ_HANDLED; }; static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; u32 ctrl; plat = pdev->dev.platform_data; ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { ctrl &= ~SDHCI_CTRL_4BITBUS; ctrl |= SDHCI_CTRL_8BITBUS; } else { ctrl &= ~SDHCI_CTRL_8BITBUS; if (bus_width == MMC_BUS_WIDTH_4) ctrl |= SDHCI_CTRL_4BITBUS; else ctrl &= ~SDHCI_CTRL_4BITBUS; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); return 0; } static int tegra_sdhci_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; struct clk *clk; int rc; plat = pdev->dev.platform_data; if (plat == NULL) { dev_err(mmc_dev(host->mmc), "missing platform data\n"); return -ENXIO; } if (gpio_is_valid(plat->power_gpio)) { rc = gpio_request(plat->power_gpio, "sdhci_power"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate power gpio\n"); goto out; } tegra_gpio_enable(plat->power_gpio); gpio_direction_output(plat->power_gpio, 1); } if (gpio_is_valid(plat->cd_gpio)) { rc = gpio_request(plat->cd_gpio, "sdhci_cd"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate cd gpio\n"); goto out_power; } tegra_gpio_enable(plat->cd_gpio); gpio_direction_input(plat->cd_gpio); rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (rc) { dev_err(mmc_dev(host->mmc), "request irq error\n"); goto out_cd; } } if (gpio_is_valid(plat->wp_gpio)) { rc = gpio_request(plat->wp_gpio, "sdhci_wp"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate wp gpio\n"); goto out_irq; } tegra_gpio_enable(plat->wp_gpio); gpio_direction_input(plat->wp_gpio); } clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); rc = PTR_ERR(clk); goto out_wp; } clk_enable(clk); pltfm_host->clk = clk; host->mmc->pm_caps = plat->pm_flags; if (plat->is_8bit) host->mmc->caps |= MMC_CAP_8_BIT_DATA; return 0; out_wp: if (gpio_is_valid(plat->wp_gpio)) { tegra_gpio_disable(plat->wp_gpio); gpio_free(plat->wp_gpio); } out_irq: if (gpio_is_valid(plat->cd_gpio)) free_irq(gpio_to_irq(plat->cd_gpio), host); out_cd: if (gpio_is_valid(plat->cd_gpio)) { tegra_gpio_disable(plat->cd_gpio); gpio_free(plat->cd_gpio); } out_power: if (gpio_is_valid(plat->power_gpio)) { tegra_gpio_disable(plat->power_gpio); gpio_free(plat->power_gpio); } out: return rc; } static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct tegra_sdhci_platform_data *plat; plat = pdev->dev.platform_data; if (gpio_is_valid(plat->wp_gpio)) { tegra_gpio_disable(plat->wp_gpio); gpio_free(plat->wp_gpio); } if (gpio_is_valid(plat->cd_gpio)) { free_irq(gpio_to_irq(plat->cd_gpio), host); tegra_gpio_disable(plat->cd_gpio); gpio_free(plat->cd_gpio); } if (gpio_is_valid(plat->power_gpio)) { tegra_gpio_disable(plat->power_gpio); gpio_free(plat->power_gpio); } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); } static struct sdhci_ops tegra_sdhci_ops = { .get_ro = tegra_sdhci_get_ro, .read_l = tegra_sdhci_readl, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .platform_8bit_width = tegra_sdhci_8bit, }; struct sdhci_pltfm_data sdhci_tegra_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, .ops = &tegra_sdhci_ops, .init = tegra_sdhci_pltfm_init, .exit = tegra_sdhci_pltfm_exit, };
gpl-2.0
gem5/linux-arm64-gem5
drivers/hwmon/sch5636.c
3253
17129
/*************************************************************************** * Copyright (C) 2011-2012 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include "sch56xx-common.h" #define DRVNAME "sch5636" #define DEVNAME "theseus" /* We only support one model for now */ #define SCH5636_REG_FUJITSU_ID 0x780 #define SCH5636_REG_FUJITSU_REV 0x783 #define SCH5636_NO_INS 5 #define SCH5636_NO_TEMPS 16 #define SCH5636_NO_FANS 8 static const u16 SCH5636_REG_IN_VAL[SCH5636_NO_INS] = { 0x22, 0x23, 0x24, 0x25, 0x189 }; static const u16 SCH5636_REG_IN_FACTORS[SCH5636_NO_INS] = { 4400, 1500, 4000, 4400, 16000 }; static const char * const SCH5636_IN_LABELS[SCH5636_NO_INS] = { "3.3V", "VREF", "VBAT", "3.3AUX", "12V" }; static const u16 SCH5636_REG_TEMP_VAL[SCH5636_NO_TEMPS] = { 0x2B, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x180, 0x181, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C }; #define SCH5636_REG_TEMP_CTRL(i) (0x790 + (i)) #define SCH5636_TEMP_WORKING 0x01 #define SCH5636_TEMP_ALARM 0x02 #define SCH5636_TEMP_DEACTIVATED 0x80 static const u16 SCH5636_REG_FAN_VAL[SCH5636_NO_FANS] = { 0x2C, 0x2E, 0x30, 0x32, 0x62, 0x64, 0x66, 0x68 }; #define SCH5636_REG_FAN_CTRL(i) (0x880 + (i)) /* FAULT in datasheet, but acts as an alarm */ #define SCH5636_FAN_ALARM 0x04 #define SCH5636_FAN_NOT_PRESENT 0x08 #define SCH5636_FAN_DEACTIVATED 0x80 struct sch5636_data { unsigned short addr; struct device *hwmon_dev; struct sch56xx_watchdog_data *watchdog; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[SCH5636_NO_INS]; u8 temp_val[SCH5636_NO_TEMPS]; u8 temp_ctrl[SCH5636_NO_TEMPS]; u16 fan_val[SCH5636_NO_FANS]; u8 fan_ctrl[SCH5636_NO_FANS]; }; static struct sch5636_data *sch5636_update_device(struct device *dev) { struct sch5636_data *data = dev_get_drvdata(dev); struct sch5636_data *ret = data; int i, val; mutex_lock(&data->update_lock); /* Cache the values for 1 second */ if (data->valid && !time_after(jiffies, data->last_updated + HZ)) goto abort; for (i = 0; i < SCH5636_NO_INS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_IN_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->in[i] = val; } for (i = 0; i < SCH5636_NO_TEMPS; i++) { if (data->temp_ctrl[i] & SCH5636_TEMP_DEACTIVATED) continue; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_val[i] = val; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_ctrl[i] = val; /* Alarms need to be explicitly write-cleared */ if (val & SCH5636_TEMP_ALARM) { sch56xx_write_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i), val); } } for (i = 0; i < SCH5636_NO_FANS; i++) { if (data->fan_ctrl[i] & SCH5636_FAN_DEACTIVATED) continue; val = sch56xx_read_virtual_reg16(data->addr, SCH5636_REG_FAN_VAL[i]); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->fan_val[i] = val; val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->fan_ctrl[i] = val; /* Alarms need to be explicitly write-cleared */ if (val & SCH5636_FAN_ALARM) { sch56xx_write_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i), val); } } data->last_updated = jiffies; data->valid = 1; abort: mutex_unlock(&data->update_lock); return ret; } static int reg_to_rpm(u16 reg) { if (reg == 0) return -EIO; if (reg == 0xffff) return 0; return 5400540 / reg; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME); } static ssize_t show_in_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = DIV_ROUND_CLOSEST( data->in[attr->index] * SCH5636_REG_IN_FACTORS[attr->index], 255); return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_in_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); return snprintf(buf, PAGE_SIZE, "%s\n", SCH5636_IN_LABELS[attr->index]); } static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_val[attr->index] - 64) * 1000; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_WORKING) ? 0 : 1; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_ALARM) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_value(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = reg_to_rpm(data->fan_val[attr->index]); if (val < 0) return val; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->fan_ctrl[attr->index] & SCH5636_FAN_NOT_PRESENT) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct sch5636_data *data = sch5636_update_device(dev); int val; if (IS_ERR(data)) return PTR_ERR(data); val = (data->fan_ctrl[attr->index] & SCH5636_FAN_ALARM) ? 1 : 0; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static struct sensor_device_attribute sch5636_attr[] = { SENSOR_ATTR(name, 0444, show_name, NULL, 0), SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0), SENSOR_ATTR(in0_label, 0444, show_in_label, NULL, 0), SENSOR_ATTR(in1_input, 0444, show_in_value, NULL, 1), SENSOR_ATTR(in1_label, 0444, show_in_label, NULL, 1), SENSOR_ATTR(in2_input, 0444, show_in_value, NULL, 2), SENSOR_ATTR(in2_label, 0444, show_in_label, NULL, 2), SENSOR_ATTR(in3_input, 0444, show_in_value, NULL, 3), SENSOR_ATTR(in3_label, 0444, show_in_label, NULL, 3), SENSOR_ATTR(in4_input, 0444, show_in_value, NULL, 4), SENSOR_ATTR(in4_label, 0444, show_in_label, NULL, 4), }; static struct sensor_device_attribute sch5636_temp_attr[] = { SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), SENSOR_ATTR(temp1_fault, 0444, show_temp_fault, NULL, 0), SENSOR_ATTR(temp1_alarm, 0444, show_temp_alarm, NULL, 0), SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), SENSOR_ATTR(temp2_fault, 0444, show_temp_fault, NULL, 1), SENSOR_ATTR(temp2_alarm, 0444, show_temp_alarm, NULL, 1), SENSOR_ATTR(temp3_input, 0444, show_temp_value, NULL, 2), SENSOR_ATTR(temp3_fault, 0444, show_temp_fault, NULL, 2), SENSOR_ATTR(temp3_alarm, 0444, show_temp_alarm, NULL, 2), SENSOR_ATTR(temp4_input, 0444, show_temp_value, NULL, 3), SENSOR_ATTR(temp4_fault, 0444, show_temp_fault, NULL, 3), SENSOR_ATTR(temp4_alarm, 0444, show_temp_alarm, NULL, 3), SENSOR_ATTR(temp5_input, 0444, show_temp_value, NULL, 4), SENSOR_ATTR(temp5_fault, 0444, show_temp_fault, NULL, 4), SENSOR_ATTR(temp5_alarm, 0444, show_temp_alarm, NULL, 4), SENSOR_ATTR(temp6_input, 0444, show_temp_value, NULL, 5), SENSOR_ATTR(temp6_fault, 0444, show_temp_fault, NULL, 5), SENSOR_ATTR(temp6_alarm, 0444, show_temp_alarm, NULL, 5), SENSOR_ATTR(temp7_input, 0444, show_temp_value, NULL, 6), SENSOR_ATTR(temp7_fault, 0444, show_temp_fault, NULL, 6), SENSOR_ATTR(temp7_alarm, 0444, show_temp_alarm, NULL, 6), SENSOR_ATTR(temp8_input, 0444, show_temp_value, NULL, 7), SENSOR_ATTR(temp8_fault, 0444, show_temp_fault, NULL, 7), SENSOR_ATTR(temp8_alarm, 0444, show_temp_alarm, NULL, 7), SENSOR_ATTR(temp9_input, 0444, show_temp_value, NULL, 8), SENSOR_ATTR(temp9_fault, 0444, show_temp_fault, NULL, 8), SENSOR_ATTR(temp9_alarm, 0444, show_temp_alarm, NULL, 8), SENSOR_ATTR(temp10_input, 0444, show_temp_value, NULL, 9), SENSOR_ATTR(temp10_fault, 0444, show_temp_fault, NULL, 9), SENSOR_ATTR(temp10_alarm, 0444, show_temp_alarm, NULL, 9), SENSOR_ATTR(temp11_input, 0444, show_temp_value, NULL, 10), SENSOR_ATTR(temp11_fault, 0444, show_temp_fault, NULL, 10), SENSOR_ATTR(temp11_alarm, 0444, show_temp_alarm, NULL, 10), SENSOR_ATTR(temp12_input, 0444, show_temp_value, NULL, 11), SENSOR_ATTR(temp12_fault, 0444, show_temp_fault, NULL, 11), SENSOR_ATTR(temp12_alarm, 0444, show_temp_alarm, NULL, 11), SENSOR_ATTR(temp13_input, 0444, show_temp_value, NULL, 12), SENSOR_ATTR(temp13_fault, 0444, show_temp_fault, NULL, 12), SENSOR_ATTR(temp13_alarm, 0444, show_temp_alarm, NULL, 12), SENSOR_ATTR(temp14_input, 0444, show_temp_value, NULL, 13), SENSOR_ATTR(temp14_fault, 0444, show_temp_fault, NULL, 13), SENSOR_ATTR(temp14_alarm, 0444, show_temp_alarm, NULL, 13), SENSOR_ATTR(temp15_input, 0444, show_temp_value, NULL, 14), SENSOR_ATTR(temp15_fault, 0444, show_temp_fault, NULL, 14), SENSOR_ATTR(temp15_alarm, 0444, show_temp_alarm, NULL, 14), SENSOR_ATTR(temp16_input, 0444, show_temp_value, NULL, 15), SENSOR_ATTR(temp16_fault, 0444, show_temp_fault, NULL, 15), SENSOR_ATTR(temp16_alarm, 0444, show_temp_alarm, NULL, 15), }; static struct sensor_device_attribute sch5636_fan_attr[] = { SENSOR_ATTR(fan1_input, 0444, show_fan_value, NULL, 0), SENSOR_ATTR(fan1_fault, 0444, show_fan_fault, NULL, 0), SENSOR_ATTR(fan1_alarm, 0444, show_fan_alarm, NULL, 0), SENSOR_ATTR(fan2_input, 0444, show_fan_value, NULL, 1), SENSOR_ATTR(fan2_fault, 0444, show_fan_fault, NULL, 1), SENSOR_ATTR(fan2_alarm, 0444, show_fan_alarm, NULL, 1), SENSOR_ATTR(fan3_input, 0444, show_fan_value, NULL, 2), SENSOR_ATTR(fan3_fault, 0444, show_fan_fault, NULL, 2), SENSOR_ATTR(fan3_alarm, 0444, show_fan_alarm, NULL, 2), SENSOR_ATTR(fan4_input, 0444, show_fan_value, NULL, 3), SENSOR_ATTR(fan4_fault, 0444, show_fan_fault, NULL, 3), SENSOR_ATTR(fan4_alarm, 0444, show_fan_alarm, NULL, 3), SENSOR_ATTR(fan5_input, 0444, show_fan_value, NULL, 4), SENSOR_ATTR(fan5_fault, 0444, show_fan_fault, NULL, 4), SENSOR_ATTR(fan5_alarm, 0444, show_fan_alarm, NULL, 4), SENSOR_ATTR(fan6_input, 0444, show_fan_value, NULL, 5), SENSOR_ATTR(fan6_fault, 0444, show_fan_fault, NULL, 5), SENSOR_ATTR(fan6_alarm, 0444, show_fan_alarm, NULL, 5), SENSOR_ATTR(fan7_input, 0444, show_fan_value, NULL, 6), SENSOR_ATTR(fan7_fault, 0444, show_fan_fault, NULL, 6), SENSOR_ATTR(fan7_alarm, 0444, show_fan_alarm, NULL, 6), SENSOR_ATTR(fan8_input, 0444, show_fan_value, NULL, 7), SENSOR_ATTR(fan8_fault, 0444, show_fan_fault, NULL, 7), SENSOR_ATTR(fan8_alarm, 0444, show_fan_alarm, NULL, 7), }; static int sch5636_remove(struct platform_device *pdev) { struct sch5636_data *data = platform_get_drvdata(pdev); int i; if (data->watchdog) sch56xx_watchdog_unregister(data->watchdog); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++) device_remove_file(&pdev->dev, &sch5636_attr[i].dev_attr); for (i = 0; i < SCH5636_NO_TEMPS * 3; i++) device_remove_file(&pdev->dev, &sch5636_temp_attr[i].dev_attr); for (i = 0; i < SCH5636_NO_FANS * 3; i++) device_remove_file(&pdev->dev, &sch5636_fan_attr[i].dev_attr); return 0; } static int sch5636_probe(struct platform_device *pdev) { struct sch5636_data *data; int i, err, val, revision[2]; char id[4]; data = devm_kzalloc(&pdev->dev, sizeof(struct sch5636_data), GFP_KERNEL); if (!data) return -ENOMEM; data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); for (i = 0; i < 3; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FUJITSU_ID + i); if (val < 0) { pr_err("Could not read Fujitsu id byte at %#x\n", SCH5636_REG_FUJITSU_ID + i); err = val; goto error; } id[i] = val; } id[i] = '\0'; if (strcmp(id, "THS")) { pr_err("Unknown Fujitsu id: %02x%02x%02x\n", id[0], id[1], id[2]); err = -ENODEV; goto error; } for (i = 0; i < 2; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FUJITSU_REV + i); if (val < 0) { err = val; goto error; } revision[i] = val; } pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME, data->addr, revision[0], revision[1]); /* Read all temp + fan ctrl registers to determine which are active */ for (i = 0; i < SCH5636_NO_TEMPS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_TEMP_CTRL(i)); if (unlikely(val < 0)) { err = val; goto error; } data->temp_ctrl[i] = val; } for (i = 0; i < SCH5636_NO_FANS; i++) { val = sch56xx_read_virtual_reg(data->addr, SCH5636_REG_FAN_CTRL(i)); if (unlikely(val < 0)) { err = val; goto error; } data->fan_ctrl[i] = val; } for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++) { err = device_create_file(&pdev->dev, &sch5636_attr[i].dev_attr); if (err) goto error; } for (i = 0; i < (SCH5636_NO_TEMPS * 3); i++) { if (data->temp_ctrl[i/3] & SCH5636_TEMP_DEACTIVATED) continue; err = device_create_file(&pdev->dev, &sch5636_temp_attr[i].dev_attr); if (err) goto error; } for (i = 0; i < (SCH5636_NO_FANS * 3); i++) { if (data->fan_ctrl[i/3] & SCH5636_FAN_DEACTIVATED) continue; err = device_create_file(&pdev->dev, &sch5636_fan_attr[i].dev_attr); if (err) goto error; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto error; } /* Note failing to register the watchdog is not a fatal error */ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr, (revision[0] << 8) | revision[1], &data->update_lock, 0); return 0; error: sch5636_remove(pdev); return err; } static struct platform_driver sch5636_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = sch5636_probe, .remove = sch5636_remove, }; module_platform_driver(sch5636_driver); MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
somcom3x/kernel_u8800pro
drivers/media/dvb/frontends/dib3000mb.c
3253
23765
/* * Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B * DiBcom (http://www.dibcom.fr/) * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * based on GPL code from DibCom, which has * * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * Acknowledgements * * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver * sources, on which this driver (and the dvb-dibusb) are based. * * see Documentation/dvb/README.dibusb for more information * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "dib3000.h" #include "dib3000mb_priv.h" /* Version information */ #define DRIVER_VERSION "0.1" #define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator" #define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-able))."); #define deb_info(args...) dprintk(0x01,args) #define deb_i2c(args...) dprintk(0x02,args) #define deb_srch(args...) dprintk(0x04,args) #define deb_info(args...) dprintk(0x01,args) #define deb_xfer(args...) dprintk(0x02,args) #define deb_setf(args...) dprintk(0x04,args) #define deb_getf(args...) dprintk(0x08,args) static int dib3000_read_reg(struct dib3000_state *state, u16 reg) { u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff }; u8 rb[2]; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 }, }; if (i2c_transfer(state->i2c, msg, 2) != 2) deb_i2c("i2c read error\n"); deb_i2c("reading i2c bus (reg: %5d 0x%04x, val: %5d 0x%04x)\n",reg,reg, (rb[0] << 8) | rb[1],(rb[0] << 8) | rb[1]); return (rb[0] << 8) | rb[1]; } static int dib3000_write_reg(struct dib3000_state *state, u16 reg, u16 val) { u8 b[] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff, }; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = b, .len = 4 } }; deb_i2c("writing i2c bus (reg: %5d 0x%04x, val: %5d 0x%04x)\n",reg,reg,val,val); return i2c_transfer(state->i2c,msg, 1) != 1 ? -EREMOTEIO : 0; } static int dib3000_search_status(u16 irq,u16 lock) { if (irq & 0x02) { if (lock & 0x01) { deb_srch("auto search succeeded\n"); return 1; // auto search succeeded } else { deb_srch("auto search not successful\n"); return 0; // auto search failed } } else if (irq & 0x01) { deb_srch("auto search failed\n"); return 0; // auto search failed } return -1; // try again } /* for auto search */ static u16 dib3000_seq[2][2][2] = /* fft,gua, inv */ { /* fft */ { /* gua */ { 0, 1 }, /* 0 0 { 0,1 } */ { 3, 9 }, /* 0 1 { 0,1 } */ }, { { 2, 5 }, /* 1 0 { 0,1 } */ { 6, 11 }, /* 1 1 { 0,1 } */ } }; static int dib3000mb_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep); static int dib3000mb_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep, int tuner) { struct dib3000_state* state = fe->demodulator_priv; struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm; fe_code_rate_t fe_cr = FEC_NONE; int search_state, seq; if (tuner && fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, fep); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); deb_setf("bandwidth: "); switch (ofdm->bandwidth) { case BANDWIDTH_8_MHZ: deb_setf("8 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz); break; case BANDWIDTH_7_MHZ: deb_setf("7 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz); break; case BANDWIDTH_6_MHZ: deb_setf("6 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz); break; case BANDWIDTH_AUTO: return -EOPNOTSUPP; default: err("unknown bandwidth value."); return -EINVAL; } } wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); deb_setf("transmission mode: "); switch (ofdm->transmission_mode) { case TRANSMISSION_MODE_2K: deb_setf("2k\n"); wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); break; case TRANSMISSION_MODE_8K: deb_setf("8k\n"); wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); break; case TRANSMISSION_MODE_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } deb_setf("guard: "); switch (ofdm->guard_interval) { case GUARD_INTERVAL_1_32: deb_setf("1_32\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); break; case GUARD_INTERVAL_1_16: deb_setf("1_16\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); break; case GUARD_INTERVAL_1_8: deb_setf("1_8\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); break; case GUARD_INTERVAL_1_4: deb_setf("1_4\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); break; case GUARD_INTERVAL_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } deb_setf("inversion: "); switch (fep->inversion) { case INVERSION_OFF: deb_setf("off\n"); wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); break; case INVERSION_AUTO: deb_setf("auto "); break; case INVERSION_ON: deb_setf("on\n"); wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); break; default: return -EINVAL; } deb_setf("constellation: "); switch (ofdm->constellation) { case QPSK: deb_setf("qpsk\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); break; case QAM_16: deb_setf("qam16\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_16QAM); break; case QAM_64: deb_setf("qam64\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_64QAM); break; case QAM_AUTO: break; default: return -EINVAL; } deb_setf("hierarchy: "); switch (ofdm->hierarchy_information) { case HIERARCHY_NONE: deb_setf("none "); /* fall through */ case HIERARCHY_1: deb_setf("alpha=1\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1); break; case HIERARCHY_2: deb_setf("alpha=2\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_2); break; case HIERARCHY_4: deb_setf("alpha=4\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_4); break; case HIERARCHY_AUTO: deb_setf("alpha=auto\n"); break; default: return -EINVAL; } deb_setf("hierarchy: "); if (ofdm->hierarchy_information == HIERARCHY_NONE) { deb_setf("none\n"); wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF); wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP); fe_cr = ofdm->code_rate_HP; } else if (ofdm->hierarchy_information != HIERARCHY_AUTO) { deb_setf("on\n"); wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON); wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP); fe_cr = ofdm->code_rate_LP; } deb_setf("fec: "); switch (fe_cr) { case FEC_1_2: deb_setf("1_2\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_1_2); break; case FEC_2_3: deb_setf("2_3\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_2_3); break; case FEC_3_4: deb_setf("3_4\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_3_4); break; case FEC_5_6: deb_setf("5_6\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_5_6); break; case FEC_7_8: deb_setf("7_8\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_7_8); break; case FEC_NONE: deb_setf("none "); break; case FEC_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } seq = dib3000_seq [ofdm->transmission_mode == TRANSMISSION_MODE_AUTO] [ofdm->guard_interval == GUARD_INTERVAL_AUTO] [fep->inversion == INVERSION_AUTO]; deb_setf("seq? %d\n", seq); wr(DIB3000MB_REG_SEQ, seq); wr(DIB3000MB_REG_ISI, seq ? DIB3000MB_ISI_INHIBIT : DIB3000MB_ISI_ACTIVATE); if (ofdm->transmission_mode == TRANSMISSION_MODE_2K) { if (ofdm->guard_interval == GUARD_INTERVAL_1_8) { wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_2K_1_8); } else { wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_DEFAULT); } wr(DIB3000MB_REG_UNK_121, DIB3000MB_UNK_121_2K); } else { wr(DIB3000MB_REG_UNK_121, DIB3000MB_UNK_121_DEFAULT); } wr(DIB3000MB_REG_MOBILE_ALGO, DIB3000MB_MOBILE_ALGO_OFF); wr(DIB3000MB_REG_MOBILE_MODE_QAM, DIB3000MB_MOBILE_MODE_QAM_OFF); wr(DIB3000MB_REG_MOBILE_MODE, DIB3000MB_MOBILE_MODE_OFF); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_high); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_ACTIVATE); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AGC + DIB3000MB_RESTART_CTRL); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); /* wait for AGC lock */ msleep(70); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_low); /* something has to be auto searched */ if (ofdm->constellation == QAM_AUTO || ofdm->hierarchy_information == HIERARCHY_AUTO || fe_cr == FEC_AUTO || fep->inversion == INVERSION_AUTO) { int as_count=0; deb_setf("autosearch enabled.\n"); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_INHIBIT); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AUTO_SEARCH); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); while ((search_state = dib3000_search_status( rd(DIB3000MB_REG_AS_IRQ_PENDING), rd(DIB3000MB_REG_LOCK2_VALUE))) < 0 && as_count++ < 100) msleep(1); deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count); if (search_state == 1) { struct dvb_frontend_parameters feps; if (dib3000mb_get_frontend(fe, &feps) == 0) { deb_setf("reading tuning data from frontend succeeded.\n"); return dib3000mb_set_frontend(fe, &feps, 0); } } } else { wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_CTRL); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); } return 0; } static int dib3000mb_fe_init(struct dvb_frontend* fe, int mobile_mode) { struct dib3000_state* state = fe->demodulator_priv; deb_info("dib3000mb is getting up.\n"); wr(DIB3000MB_REG_POWER_CONTROL, DIB3000MB_POWER_UP); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AGC); wr(DIB3000MB_REG_RESET_DEVICE, DIB3000MB_RESET_DEVICE); wr(DIB3000MB_REG_RESET_DEVICE, DIB3000MB_RESET_DEVICE_RST); wr(DIB3000MB_REG_CLOCK, DIB3000MB_CLOCK_DEFAULT); wr(DIB3000MB_REG_ELECT_OUT_MODE, DIB3000MB_ELECT_OUT_MODE_ON); wr(DIB3000MB_REG_DDS_FREQ_MSB, DIB3000MB_DDS_FREQ_MSB); wr(DIB3000MB_REG_DDS_FREQ_LSB, DIB3000MB_DDS_FREQ_LSB); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]); wr_foreach(dib3000mb_reg_impulse_noise, dib3000mb_impulse_noise_values[DIB3000MB_IMPNOISE_OFF]); wr_foreach(dib3000mb_reg_agc_gain, dib3000mb_default_agc_gain); wr(DIB3000MB_REG_PHASE_NOISE, DIB3000MB_PHASE_NOISE_DEFAULT); wr_foreach(dib3000mb_reg_phase_noise, dib3000mb_default_noise_phase); wr_foreach(dib3000mb_reg_lock_duration, dib3000mb_default_lock_duration); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_low); wr(DIB3000MB_REG_LOCK0_MASK, DIB3000MB_LOCK0_DEFAULT); wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); wr(DIB3000MB_REG_LOCK2_MASK, DIB3000MB_LOCK2_DEFAULT); wr(DIB3000MB_REG_SEQ, dib3000_seq[1][1][1]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz); wr(DIB3000MB_REG_UNK_68, DIB3000MB_UNK_68); wr(DIB3000MB_REG_UNK_69, DIB3000MB_UNK_69); wr(DIB3000MB_REG_UNK_71, DIB3000MB_UNK_71); wr(DIB3000MB_REG_UNK_77, DIB3000MB_UNK_77); wr(DIB3000MB_REG_UNK_78, DIB3000MB_UNK_78); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_INHIBIT); wr(DIB3000MB_REG_UNK_92, DIB3000MB_UNK_92); wr(DIB3000MB_REG_UNK_96, DIB3000MB_UNK_96); wr(DIB3000MB_REG_UNK_97, DIB3000MB_UNK_97); wr(DIB3000MB_REG_UNK_106, DIB3000MB_UNK_106); wr(DIB3000MB_REG_UNK_107, DIB3000MB_UNK_107); wr(DIB3000MB_REG_UNK_108, DIB3000MB_UNK_108); wr(DIB3000MB_REG_UNK_122, DIB3000MB_UNK_122); wr(DIB3000MB_REG_MOBILE_MODE_QAM, DIB3000MB_MOBILE_MODE_QAM_OFF); wr(DIB3000MB_REG_BERLEN, DIB3000MB_BERLEN_DEFAULT); wr_foreach(dib3000mb_reg_filter_coeffs, dib3000mb_filter_coeffs); wr(DIB3000MB_REG_MOBILE_ALGO, DIB3000MB_MOBILE_ALGO_ON); wr(DIB3000MB_REG_MULTI_DEMOD_MSB, DIB3000MB_MULTI_DEMOD_MSB); wr(DIB3000MB_REG_MULTI_DEMOD_LSB, DIB3000MB_MULTI_DEMOD_LSB); wr(DIB3000MB_REG_OUTPUT_MODE, DIB3000MB_OUTPUT_MODE_SLAVE); wr(DIB3000MB_REG_FIFO_142, DIB3000MB_FIFO_142); wr(DIB3000MB_REG_MPEG2_OUT_MODE, DIB3000MB_MPEG2_OUT_MODE_188); wr(DIB3000MB_REG_PID_PARSE, DIB3000MB_PID_PARSE_ACTIVATE); wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_INHIBIT); wr(DIB3000MB_REG_FIFO_146, DIB3000MB_FIFO_146); wr(DIB3000MB_REG_FIFO_147, DIB3000MB_FIFO_147); wr(DIB3000MB_REG_DATA_IN_DIVERSITY, DIB3000MB_DATA_DIVERSITY_IN_OFF); return 0; } static int dib3000mb_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { struct dib3000_state* state = fe->demodulator_priv; struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm; fe_code_rate_t *cr; u16 tps_val; int inv_test1,inv_test2; u32 dds_val, threshold = 0x800000; if (!rd(DIB3000MB_REG_TPS_LOCK)) return 0; dds_val = ((rd(DIB3000MB_REG_DDS_VALUE_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_VALUE_LSB); deb_getf("DDS_VAL: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB)); if (dds_val < threshold) inv_test1 = 0; else if (dds_val == threshold) inv_test1 = 1; else inv_test1 = 2; dds_val = ((rd(DIB3000MB_REG_DDS_FREQ_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_FREQ_LSB); deb_getf("DDS_FREQ: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB)); if (dds_val < threshold) inv_test2 = 0; else if (dds_val == threshold) inv_test2 = 1; else inv_test2 = 2; fep->inversion = ((inv_test2 == 2) && (inv_test1==1 || inv_test1==0)) || ((inv_test2 == 0) && (inv_test1==1 || inv_test1==2)) ? INVERSION_ON : INVERSION_OFF; deb_getf("inversion %d %d, %d\n", inv_test2, inv_test1, fep->inversion); switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) { case DIB3000_CONSTELLATION_QPSK: deb_getf("QPSK "); ofdm->constellation = QPSK; break; case DIB3000_CONSTELLATION_16QAM: deb_getf("QAM16 "); ofdm->constellation = QAM_16; break; case DIB3000_CONSTELLATION_64QAM: deb_getf("QAM64 "); ofdm->constellation = QAM_64; break; default: err("Unexpected constellation returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); if (rd(DIB3000MB_REG_TPS_HRCH)) { deb_getf("HRCH ON\n"); cr = &ofdm->code_rate_LP; ofdm->code_rate_HP = FEC_NONE; switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) { case DIB3000_ALPHA_0: deb_getf("HIERARCHY_NONE "); ofdm->hierarchy_information = HIERARCHY_NONE; break; case DIB3000_ALPHA_1: deb_getf("HIERARCHY_1 "); ofdm->hierarchy_information = HIERARCHY_1; break; case DIB3000_ALPHA_2: deb_getf("HIERARCHY_2 "); ofdm->hierarchy_information = HIERARCHY_2; break; case DIB3000_ALPHA_4: deb_getf("HIERARCHY_4 "); ofdm->hierarchy_information = HIERARCHY_4; break; default: err("Unexpected ALPHA value returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_LP); } else { deb_getf("HRCH OFF\n"); cr = &ofdm->code_rate_HP; ofdm->code_rate_LP = FEC_NONE; ofdm->hierarchy_information = HIERARCHY_NONE; tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_HP); } switch (tps_val) { case DIB3000_FEC_1_2: deb_getf("FEC_1_2 "); *cr = FEC_1_2; break; case DIB3000_FEC_2_3: deb_getf("FEC_2_3 "); *cr = FEC_2_3; break; case DIB3000_FEC_3_4: deb_getf("FEC_3_4 "); *cr = FEC_3_4; break; case DIB3000_FEC_5_6: deb_getf("FEC_5_6 "); *cr = FEC_4_5; break; case DIB3000_FEC_7_8: deb_getf("FEC_7_8 "); *cr = FEC_7_8; break; default: err("Unexpected FEC returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n",tps_val); switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) { case DIB3000_GUARD_TIME_1_32: deb_getf("GUARD_INTERVAL_1_32 "); ofdm->guard_interval = GUARD_INTERVAL_1_32; break; case DIB3000_GUARD_TIME_1_16: deb_getf("GUARD_INTERVAL_1_16 "); ofdm->guard_interval = GUARD_INTERVAL_1_16; break; case DIB3000_GUARD_TIME_1_8: deb_getf("GUARD_INTERVAL_1_8 "); ofdm->guard_interval = GUARD_INTERVAL_1_8; break; case DIB3000_GUARD_TIME_1_4: deb_getf("GUARD_INTERVAL_1_4 "); ofdm->guard_interval = GUARD_INTERVAL_1_4; break; default: err("Unexpected Guard Time returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) { case DIB3000_TRANSMISSION_MODE_2K: deb_getf("TRANSMISSION_MODE_2K "); ofdm->transmission_mode = TRANSMISSION_MODE_2K; break; case DIB3000_TRANSMISSION_MODE_8K: deb_getf("TRANSMISSION_MODE_8K "); ofdm->transmission_mode = TRANSMISSION_MODE_8K; break; default: err("unexpected transmission mode return by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); return 0; } static int dib3000mb_read_status(struct dvb_frontend* fe, fe_status_t *stat) { struct dib3000_state* state = fe->demodulator_priv; *stat = 0; if (rd(DIB3000MB_REG_AGC_LOCK)) *stat |= FE_HAS_SIGNAL; if (rd(DIB3000MB_REG_CARRIER_LOCK)) *stat |= FE_HAS_CARRIER; if (rd(DIB3000MB_REG_VIT_LCK)) *stat |= FE_HAS_VITERBI; if (rd(DIB3000MB_REG_TS_SYNC_LOCK)) *stat |= (FE_HAS_SYNC | FE_HAS_LOCK); deb_getf("actual status is %2x\n",*stat); deb_getf("autoval: tps: %d, qam: %d, hrch: %d, alpha: %d, hp: %d, lp: %d, guard: %d, fft: %d cell: %d\n", rd(DIB3000MB_REG_TPS_LOCK), rd(DIB3000MB_REG_TPS_QAM), rd(DIB3000MB_REG_TPS_HRCH), rd(DIB3000MB_REG_TPS_VIT_ALPHA), rd(DIB3000MB_REG_TPS_CODE_RATE_HP), rd(DIB3000MB_REG_TPS_CODE_RATE_LP), rd(DIB3000MB_REG_TPS_GUARD_TIME), rd(DIB3000MB_REG_TPS_FFT), rd(DIB3000MB_REG_TPS_CELL_ID)); //*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int dib3000mb_read_ber(struct dvb_frontend* fe, u32 *ber) { struct dib3000_state* state = fe->demodulator_priv; *ber = ((rd(DIB3000MB_REG_BER_MSB) << 16) | rd(DIB3000MB_REG_BER_LSB)); return 0; } /* see dib3000-watch dvb-apps for exact calcuations of signal_strength and snr */ static int dib3000mb_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct dib3000_state* state = fe->demodulator_priv; *strength = rd(DIB3000MB_REG_SIGNAL_POWER) * 0xffff / 0x170; return 0; } static int dib3000mb_read_snr(struct dvb_frontend* fe, u16 *snr) { struct dib3000_state* state = fe->demodulator_priv; short sigpow = rd(DIB3000MB_REG_SIGNAL_POWER); int icipow = ((rd(DIB3000MB_REG_NOISE_POWER_MSB) & 0xff) << 16) | rd(DIB3000MB_REG_NOISE_POWER_LSB); *snr = (sigpow << 8) / ((icipow > 0) ? icipow : 1); return 0; } static int dib3000mb_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { struct dib3000_state* state = fe->demodulator_priv; *unc = rd(DIB3000MB_REG_PACKET_ERROR_RATE); return 0; } static int dib3000mb_sleep(struct dvb_frontend* fe) { struct dib3000_state* state = fe->demodulator_priv; deb_info("dib3000mb is going to bed.\n"); wr(DIB3000MB_REG_POWER_CONTROL, DIB3000MB_POWER_DOWN); return 0; } static int dib3000mb_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int dib3000mb_fe_init_nonmobile(struct dvb_frontend* fe) { return dib3000mb_fe_init(fe, 0); } static int dib3000mb_set_frontend_and_tuner(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { return dib3000mb_set_frontend(fe, fep, 1); } static void dib3000mb_release(struct dvb_frontend* fe) { struct dib3000_state *state = fe->demodulator_priv; kfree(state); } /* pid filter and transfer stuff */ static int dib3000mb_pid_control(struct dvb_frontend *fe,int index, int pid,int onoff) { struct dib3000_state *state = fe->demodulator_priv; pid = (onoff ? pid | DIB3000_ACTIVATE_PID_FILTERING : 0); wr(index+DIB3000MB_REG_FIRST_PID,pid); return 0; } static int dib3000mb_fifo_control(struct dvb_frontend *fe, int onoff) { struct dib3000_state *state = fe->demodulator_priv; deb_xfer("%s fifo\n",onoff ? "enabling" : "disabling"); if (onoff) { wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_ACTIVATE); } else { wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_INHIBIT); } return 0; } static int dib3000mb_pid_parse(struct dvb_frontend *fe, int onoff) { struct dib3000_state *state = fe->demodulator_priv; deb_xfer("%s pid parsing\n",onoff ? "enabling" : "disabling"); wr(DIB3000MB_REG_PID_PARSE,onoff); return 0; } static int dib3000mb_tuner_pass_ctrl(struct dvb_frontend *fe, int onoff, u8 pll_addr) { struct dib3000_state *state = fe->demodulator_priv; if (onoff) { wr(DIB3000MB_REG_TUNER, DIB3000_TUNER_WRITE_ENABLE(pll_addr)); } else { wr(DIB3000MB_REG_TUNER, DIB3000_TUNER_WRITE_DISABLE(pll_addr)); } return 0; } static struct dvb_frontend_ops dib3000mb_ops; struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops) { struct dib3000_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct dib3000_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config,config,sizeof(struct dib3000_config)); /* check for the correct demod */ if (rd(DIB3000_REG_MANUFACTOR_ID) != DIB3000_I2C_ID_DIBCOM) goto error; if (rd(DIB3000_REG_DEVICE_ID) != DIB3000MB_DEVICE_ID) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dib3000mb_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; /* set the xfer operations */ xfer_ops->pid_parse = dib3000mb_pid_parse; xfer_ops->fifo_ctrl = dib3000mb_fifo_control; xfer_ops->pid_ctrl = dib3000mb_pid_control; xfer_ops->tuner_pass_ctrl = dib3000mb_tuner_pass_ctrl; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops dib3000mb_ops = { .info = { .name = "DiBcom 3000M-B DVB-T", .type = FE_OFDM, .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib3000mb_release, .init = dib3000mb_fe_init_nonmobile, .sleep = dib3000mb_sleep, .set_frontend = dib3000mb_set_frontend_and_tuner, .get_frontend = dib3000mb_get_frontend, .get_tune_settings = dib3000mb_fe_get_tune_settings, .read_status = dib3000mb_read_status, .read_ber = dib3000mb_read_ber, .read_signal_strength = dib3000mb_read_signal_strength, .read_snr = dib3000mb_read_snr, .read_ucblocks = dib3000mb_read_unc_blocks, }; MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(dib3000mb_attach);
gpl-2.0
CyanHacker-Lollipop/kernel_lge_hammerhead
arch/powerpc/kernel/crash_dump.c
4021
4336
/* * Routines for doing kexec-based kdump. * * Copyright (C) 2005, IBM Corp. * * Created by: Michael Ellerman * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #undef DEBUG #include <linux/crash_dump.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <asm/code-patching.h> #include <asm/kdump.h> #include <asm/prom.h> #include <asm/firmware.h> #include <asm/uaccess.h> #include <asm/rtas.h> #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifndef CONFIG_NONSTATIC_KERNEL void __init reserve_kdump_trampoline(void) { memblock_reserve(0, KDUMP_RESERVE_LIMIT); } static void __init create_trampoline(unsigned long addr) { unsigned int *p = (unsigned int *)addr; /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we * need to branch to current address + 32 MB. So we insert a nop at * the trampoline address, then the next instruction (+ 4 bytes) * does a branch to (32 MB - 4). The net effect is that when we * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ patch_instruction(p, PPC_INST_NOP); patch_branch(++p, addr + PHYSICAL_START, 0); } void __init setup_kdump_trampoline(void) { unsigned long i; DBG(" -> setup_kdump_trampoline()\n"); for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { create_trampoline(i); } #ifdef CONFIG_PPC_PSERIES create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); #endif /* CONFIG_PPC_PSERIES */ DBG(" <- setup_kdump_trampoline()\n"); } #endif /* CONFIG_NONSTATIC_KERNEL */ static int __init parse_savemaxmem(char *p) { if (p) saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; return 1; } __setup("savemaxmem=", parse_savemaxmem); static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, unsigned long offset, int userbuf) { if (userbuf) { if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) return -EFAULT; } else memcpy(buf, (vaddr + offset), csize); return csize; } /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; csize = min_t(size_t, csize, PAGE_SIZE); if ((min_low_pfn < pfn) && (pfn < max_pfn)) { vaddr = __va(pfn << PAGE_SHIFT); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } return csize; } #ifdef CONFIG_PPC_RTAS /* * The crashkernel region will almost always overlap the RTAS region, so * we have to be careful when shrinking the crashkernel region. */ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) { unsigned long addr; const u32 *basep, *sizep; unsigned int rtas_start = 0, rtas_end = 0; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep && sizep) { rtas_start = *basep; rtas_end = *basep + *sizep; } for (addr = begin; addr < end; addr += PAGE_SIZE) { /* Does this page overlap with the RTAS region? */ if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) continue; ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); free_page((unsigned long)__va(addr)); totalram_pages++; } } #endif
gpl-2.0
TheKang/kernel_lge_hammerhead
tools/perf/util/hist.c
4789
32896
#include "annotate.h" #include "util.h" #include "build-id.h" #include "hist.h" #include "session.h" #include "sort.h" #include <math.h> static bool hists__filter_entry_by_dso(struct hists *hists, struct hist_entry *he); static bool hists__filter_entry_by_thread(struct hists *hists, struct hist_entry *he); static bool hists__filter_entry_by_symbol(struct hists *hists, struct hist_entry *he); enum hist_filter { HIST_FILTER__DSO, HIST_FILTER__THREAD, HIST_FILTER__PARENT, HIST_FILTER__SYMBOL, }; struct callchain_param callchain_param = { .mode = CHAIN_GRAPH_REL, .min_percent = 0.5, .order = ORDER_CALLEE }; u16 hists__col_len(struct hists *hists, enum hist_column col) { return hists->col_len[col]; } void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) { hists->col_len[col] = len; } bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) { if (len > hists__col_len(hists, col)) { hists__set_col_len(hists, col, len); return true; } return false; } static void hists__reset_col_len(struct hists *hists) { enum hist_column col; for (col = 0; col < HISTC_NR_COLS; ++col) hists__set_col_len(hists, col, 0); } static void hists__set_unres_dso_col_len(struct hists *hists, int dso) { const unsigned int unresolved_col_width = BITS_PER_LONG / 4; if (hists__col_len(hists, dso) < unresolved_col_width && !symbol_conf.col_width_list_str && !symbol_conf.field_sep && !symbol_conf.dso_list) hists__set_col_len(hists, dso, unresolved_col_width); } static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) { const unsigned int unresolved_col_width = BITS_PER_LONG / 4; u16 len; if (h->ms.sym) hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); else hists__set_unres_dso_col_len(hists, HISTC_DSO); len = thread__comm_len(h->thread); if (hists__new_col_len(hists, HISTC_COMM, len)) hists__set_col_len(hists, HISTC_THREAD, len + 6); if (h->ms.map) { len = dso__name_len(h->ms.map->dso); hists__new_col_len(hists, HISTC_DSO, len); } if (h->branch_info) { int symlen; /* * +4 accounts for '[x] ' priv level info * +2 account of 0x prefix on raw addresses */ if (h->branch_info->from.sym) { symlen = (int)h->branch_info->from.sym->namelen + 4; hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); symlen = dso__name_len(h->branch_info->from.map->dso); hists__new_col_len(hists, HISTC_DSO_FROM, symlen); } else { symlen = unresolved_col_width + 4 + 2; hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); } if (h->branch_info->to.sym) { symlen = (int)h->branch_info->to.sym->namelen + 4; hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); symlen = dso__name_len(h->branch_info->to.map->dso); hists__new_col_len(hists, HISTC_DSO_TO, symlen); } else { symlen = unresolved_col_width + 4 + 2; hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); } } } static void hist_entry__add_cpumode_period(struct hist_entry *he, unsigned int cpumode, u64 period) { switch (cpumode) { case PERF_RECORD_MISC_KERNEL: he->period_sys += period; break; case PERF_RECORD_MISC_USER: he->period_us += period; break; case PERF_RECORD_MISC_GUEST_KERNEL: he->period_guest_sys += period; break; case PERF_RECORD_MISC_GUEST_USER: he->period_guest_us += period; break; default: break; } } static void hist_entry__decay(struct hist_entry *he) { he->period = (he->period * 7) / 8; he->nr_events = (he->nr_events * 7) / 8; } static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) { u64 prev_period = he->period; if (prev_period == 0) return true; hist_entry__decay(he); if (!he->filtered) hists->stats.total_period -= prev_period - he->period; return he->period == 0; } static void __hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel, bool threaded) { struct rb_node *next = rb_first(&hists->entries); struct hist_entry *n; while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); /* * We may be annotating this, for instance, so keep it here in * case some it gets new samples, we'll eventually free it when * the user stops browsing and it agains gets fully decayed. */ if (((zap_user && n->level == '.') || (zap_kernel && n->level != '.') || hists__decay_entry(hists, n)) && !n->used) { rb_erase(&n->rb_node, &hists->entries); if (sort__need_collapse || threaded) rb_erase(&n->rb_node_in, &hists->entries_collapsed); hist_entry__free(n); --hists->nr_entries; } } } void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) { return __hists__decay_entries(hists, zap_user, zap_kernel, false); } void hists__decay_entries_threaded(struct hists *hists, bool zap_user, bool zap_kernel) { return __hists__decay_entries(hists, zap_user, zap_kernel, true); } /* * histogram, sorted on item, collects periods */ static struct hist_entry *hist_entry__new(struct hist_entry *template) { size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; struct hist_entry *he = malloc(sizeof(*he) + callchain_size); if (he != NULL) { *he = *template; he->nr_events = 1; if (he->ms.map) he->ms.map->referenced = true; if (symbol_conf.use_callchain) callchain_init(he->callchain); } return he; } static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) { if (!h->filtered) { hists__calc_col_len(hists, h); ++hists->nr_entries; hists->stats.total_period += h->period; } } static u8 symbol__parent_filter(const struct symbol *parent) { if (symbol_conf.exclude_other && parent == NULL) return 1 << HIST_FILTER__PARENT; return 0; } static struct hist_entry *add_hist_entry(struct hists *hists, struct hist_entry *entry, struct addr_location *al, u64 period) { struct rb_node **p; struct rb_node *parent = NULL; struct hist_entry *he; int cmp; pthread_mutex_lock(&hists->lock); p = &hists->entries_in->rb_node; while (*p != NULL) { parent = *p; he = rb_entry(parent, struct hist_entry, rb_node_in); cmp = hist_entry__cmp(entry, he); if (!cmp) { he->period += period; ++he->nr_events; /* If the map of an existing hist_entry has * become out-of-date due to an exec() or * similar, update it. Otherwise we will * mis-adjust symbol addresses when computing * the history counter to increment. */ if (he->ms.map != entry->ms.map) { he->ms.map = entry->ms.map; if (he->ms.map) he->ms.map->referenced = true; } goto out; } if (cmp < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } he = hist_entry__new(entry); if (!he) goto out_unlock; rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, hists->entries_in); out: hist_entry__add_cpumode_period(he, al->cpumode, period); out_unlock: pthread_mutex_unlock(&hists->lock); return he; } struct hist_entry *__hists__add_branch_entry(struct hists *self, struct addr_location *al, struct symbol *sym_parent, struct branch_info *bi, u64 period) { struct hist_entry entry = { .thread = al->thread, .ms = { .map = bi->to.map, .sym = bi->to.sym, }, .cpu = al->cpu, .ip = bi->to.addr, .level = al->level, .period = period, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent), .branch_info = bi, }; return add_hist_entry(self, &entry, al, period); } struct hist_entry *__hists__add_entry(struct hists *self, struct addr_location *al, struct symbol *sym_parent, u64 period) { struct hist_entry entry = { .thread = al->thread, .ms = { .map = al->map, .sym = al->sym, }, .cpu = al->cpu, .ip = al->addr, .level = al->level, .period = period, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent), }; return add_hist_entry(self, &entry, al, period); } int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { struct sort_entry *se; int64_t cmp = 0; list_for_each_entry(se, &hist_entry__sort_list, list) { cmp = se->se_cmp(left, right); if (cmp) break; } return cmp; } int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) { struct sort_entry *se; int64_t cmp = 0; list_for_each_entry(se, &hist_entry__sort_list, list) { int64_t (*f)(struct hist_entry *, struct hist_entry *); f = se->se_collapse ?: se->se_cmp; cmp = f(left, right); if (cmp) break; } return cmp; } void hist_entry__free(struct hist_entry *he) { free(he); } /* * collapse the histogram */ static bool hists__collapse_insert_entry(struct hists *hists, struct rb_root *root, struct hist_entry *he) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; int64_t cmp; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node_in); cmp = hist_entry__collapse(iter, he); if (!cmp) { iter->period += he->period; iter->nr_events += he->nr_events; if (symbol_conf.use_callchain) { callchain_cursor_reset(&hists->callchain_cursor); callchain_merge(&hists->callchain_cursor, iter->callchain, he->callchain); } hist_entry__free(he); return false; } if (cmp < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, root); return true; } static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) { struct rb_root *root; pthread_mutex_lock(&hists->lock); root = hists->entries_in; if (++hists->entries_in > &hists->entries_in_array[1]) hists->entries_in = &hists->entries_in_array[0]; pthread_mutex_unlock(&hists->lock); return root; } static void hists__apply_filters(struct hists *hists, struct hist_entry *he) { hists__filter_entry_by_dso(hists, he); hists__filter_entry_by_thread(hists, he); hists__filter_entry_by_symbol(hists, he); } static void __hists__collapse_resort(struct hists *hists, bool threaded) { struct rb_root *root; struct rb_node *next; struct hist_entry *n; if (!sort__need_collapse && !threaded) return; root = hists__get_rotate_entries_in(hists); next = rb_first(root); while (next) { n = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&n->rb_node_in); rb_erase(&n->rb_node_in, root); if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { /* * If it wasn't combined with one of the entries already * collapsed, we need to apply the filters that may have * been set by, say, the hist_browser. */ hists__apply_filters(hists, n); } } } void hists__collapse_resort(struct hists *hists) { return __hists__collapse_resort(hists, false); } void hists__collapse_resort_threaded(struct hists *hists) { return __hists__collapse_resort(hists, true); } /* * reverse the map, sort on period. */ static void __hists__insert_output_entry(struct rb_root *entries, struct hist_entry *he, u64 min_callchain_hits) { struct rb_node **p = &entries->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; if (symbol_conf.use_callchain) callchain_param.sort(&he->sorted_chain, he->callchain, min_callchain_hits, &callchain_param); while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); if (he->period > iter->period) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, entries); } static void __hists__output_resort(struct hists *hists, bool threaded) { struct rb_root *root; struct rb_node *next; struct hist_entry *n; u64 min_callchain_hits; min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); if (sort__need_collapse || threaded) root = &hists->entries_collapsed; else root = hists->entries_in; next = rb_first(root); hists->entries = RB_ROOT; hists->nr_entries = 0; hists->stats.total_period = 0; hists__reset_col_len(hists); while (next) { n = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&n->rb_node_in); __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); hists__inc_nr_entries(hists, n); } } void hists__output_resort(struct hists *hists) { return __hists__output_resort(hists, false); } void hists__output_resort_threaded(struct hists *hists) { return __hists__output_resort(hists, true); } static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) { int i; int ret = fprintf(fp, " "); for (i = 0; i < left_margin; i++) ret += fprintf(fp, " "); return ret; } static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, int left_margin) { int i; size_t ret = callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) if (depth_mask & (1 << i)) ret += fprintf(fp, "| "); else ret += fprintf(fp, " "); ret += fprintf(fp, "\n"); return ret; } static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, int depth_mask, int period, u64 total_samples, u64 hits, int left_margin) { int i; size_t ret = 0; ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) { if (depth_mask & (1 << i)) ret += fprintf(fp, "|"); else ret += fprintf(fp, " "); if (!period && i == depth - 1) { double percent; percent = hits * 100.0 / total_samples; ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); } else ret += fprintf(fp, "%s", " "); } if (chain->ms.sym) ret += fprintf(fp, "%s\n", chain->ms.sym->name); else ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); return ret; } static struct symbol *rem_sq_bracket; static struct callchain_list rem_hits; static void init_rem_hits(void) { rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); if (!rem_sq_bracket) { fprintf(stderr, "Not enough memory to display remaining hits\n"); return; } strcpy(rem_sq_bracket->name, "[...]"); rem_hits.ms.sym = rem_sq_bracket; } static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, u64 total_samples, int depth, int depth_mask, int left_margin) { struct rb_node *node, *next; struct callchain_node *child; struct callchain_list *chain; int new_depth_mask = depth_mask; u64 remaining; size_t ret = 0; int i; uint entries_printed = 0; remaining = total_samples; node = rb_first(root); while (node) { u64 new_total; u64 cumul; child = rb_entry(node, struct callchain_node, rb_node); cumul = callchain_cumul_hits(child); remaining -= cumul; /* * The depth mask manages the output of pipes that show * the depth. We don't want to keep the pipes of the current * level for the last child of this depth. * Except if we have remaining filtered hits. They will * supersede the last child */ next = rb_next(node); if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) new_depth_mask &= ~(1 << (depth - 1)); /* * But we keep the older depth mask for the line separator * to keep the level link until we reach the last child */ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, left_margin); i = 0; list_for_each_entry(chain, &child->val, list) { ret += ipchain__fprintf_graph(fp, chain, depth, new_depth_mask, i++, total_samples, cumul, left_margin); } if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = child->children_hit; else new_total = total_samples; ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, depth + 1, new_depth_mask | (1 << depth), left_margin); node = next; if (++entries_printed == callchain_param.print_limit) break; } if (callchain_param.mode == CHAIN_GRAPH_REL && remaining && remaining != total_samples) { if (!rem_sq_bracket) return ret; new_depth_mask &= ~(1 << (depth - 1)); ret += ipchain__fprintf_graph(fp, &rem_hits, depth, new_depth_mask, 0, total_samples, remaining, left_margin); } return ret; } static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, u64 total_samples, int left_margin) { struct callchain_node *cnode; struct callchain_list *chain; u32 entries_printed = 0; bool printed = false; struct rb_node *node; int i = 0; int ret; /* * If have one single callchain root, don't bother printing * its percentage (100 % in fractal mode and the same percentage * than the hist in graph mode). This also avoid one level of column. */ node = rb_first(root); if (node && !rb_next(node)) { cnode = rb_entry(node, struct callchain_node, rb_node); list_for_each_entry(chain, &cnode->val, list) { /* * If we sort by symbol, the first entry is the same than * the symbol. No need to print it otherwise it appears as * displayed twice. */ if (!i++ && sort__first_dimension == SORT_SYM) continue; if (!printed) { ret += callchain__fprintf_left_margin(fp, left_margin); ret += fprintf(fp, "|\n"); ret += callchain__fprintf_left_margin(fp, left_margin); ret += fprintf(fp, "---"); left_margin += 3; printed = true; } else ret += callchain__fprintf_left_margin(fp, left_margin); if (chain->ms.sym) ret += fprintf(fp, " %s\n", chain->ms.sym->name); else ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); if (++entries_printed == callchain_param.print_limit) break; } root = &cnode->rb_root; } return __callchain__fprintf_graph(fp, root, total_samples, 1, 1, left_margin); } static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *self, u64 total_samples) { struct callchain_list *chain; size_t ret = 0; if (!self) return 0; ret += __callchain__fprintf_flat(fp, self->parent, total_samples); list_for_each_entry(chain, &self->val, list) { if (chain->ip >= PERF_CONTEXT_MAX) continue; if (chain->ms.sym) ret += fprintf(fp, " %s\n", chain->ms.sym->name); else ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); } return ret; } static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, u64 total_samples) { size_t ret = 0; u32 entries_printed = 0; struct rb_node *rb_node; struct callchain_node *chain; rb_node = rb_first(self); while (rb_node) { double percent; chain = rb_entry(rb_node, struct callchain_node, rb_node); percent = chain->hit * 100.0 / total_samples; ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); ret += __callchain__fprintf_flat(fp, chain, total_samples); ret += fprintf(fp, "\n"); if (++entries_printed == callchain_param.print_limit) break; rb_node = rb_next(rb_node); } return ret; } static size_t hist_entry_callchain__fprintf(struct hist_entry *he, u64 total_samples, int left_margin, FILE *fp) { switch (callchain_param.mode) { case CHAIN_GRAPH_REL: return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, left_margin); break; case CHAIN_GRAPH_ABS: return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, left_margin); break; case CHAIN_FLAT: return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); break; case CHAIN_NONE: break; default: pr_err("Bad callchain mode\n"); } return 0; } void hists__output_recalc_col_len(struct hists *hists, int max_rows) { struct rb_node *next = rb_first(&hists->entries); struct hist_entry *n; int row = 0; hists__reset_col_len(hists); while (next && row++ < max_rows) { n = rb_entry(next, struct hist_entry, rb_node); if (!n->filtered) hists__calc_col_len(hists, n); next = rb_next(&n->rb_node); } } static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s, size_t size, struct hists *pair_hists, bool show_displacement, long displacement, bool color, u64 total_period) { u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; u64 nr_events; const char *sep = symbol_conf.field_sep; int ret; if (symbol_conf.exclude_other && !he->parent) return 0; if (pair_hists) { period = he->pair ? he->pair->period : 0; nr_events = he->pair ? he->pair->nr_events : 0; total = pair_hists->stats.total_period; period_sys = he->pair ? he->pair->period_sys : 0; period_us = he->pair ? he->pair->period_us : 0; period_guest_sys = he->pair ? he->pair->period_guest_sys : 0; period_guest_us = he->pair ? he->pair->period_guest_us : 0; } else { period = he->period; nr_events = he->nr_events; total = total_period; period_sys = he->period_sys; period_us = he->period_us; period_guest_sys = he->period_guest_sys; period_guest_us = he->period_guest_us; } if (total) { if (color) ret = percent_color_snprintf(s, size, sep ? "%.2f" : " %6.2f%%", (period * 100.0) / total); else ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%", (period * 100.0) / total); if (symbol_conf.show_cpu_utilization) { ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_sys * 100.0) / total); ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_us * 100.0) / total); if (perf_guest) { ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_guest_sys * 100.0) / total); ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_guest_us * 100.0) / total); } } } else ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); if (symbol_conf.show_nr_samples) { if (sep) ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); else ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events); } if (symbol_conf.show_total_period) { if (sep) ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); else ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period); } if (pair_hists) { char bf[32]; double old_percent = 0, new_percent = 0, diff; if (total > 0) old_percent = (period * 100.0) / total; if (total_period > 0) new_percent = (he->period * 100.0) / total_period; diff = new_percent - old_percent; if (fabs(diff) >= 0.01) scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); else scnprintf(bf, sizeof(bf), " "); if (sep) ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); else ret += scnprintf(s + ret, size - ret, "%11.11s", bf); if (show_displacement) { if (displacement) scnprintf(bf, sizeof(bf), "%+4ld", displacement); else scnprintf(bf, sizeof(bf), " "); if (sep) ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); else ret += scnprintf(s + ret, size - ret, "%6.6s", bf); } } return ret; } int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size, struct hists *hists) { const char *sep = symbol_conf.field_sep; struct sort_entry *se; int ret = 0; list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); ret += se->se_snprintf(he, s + ret, size - ret, hists__col_len(hists, se->se_width_idx)); } return ret; } static int hist_entry__fprintf(struct hist_entry *he, size_t size, struct hists *hists, struct hists *pair_hists, bool show_displacement, long displacement, u64 total_period, FILE *fp) { char bf[512]; int ret; if (size == 0 || size > sizeof(bf)) size = sizeof(bf); ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists, show_displacement, displacement, true, total_period); hist_entry__snprintf(he, bf + ret, size - ret, hists); return fprintf(fp, "%s\n", bf); } static size_t hist_entry__fprintf_callchain(struct hist_entry *he, struct hists *hists, u64 total_period, FILE *fp) { int left_margin = 0; if (sort__first_dimension == SORT_COMM) { struct sort_entry *se = list_first_entry(&hist_entry__sort_list, typeof(*se), list); left_margin = hists__col_len(hists, se->se_width_idx); left_margin -= thread__comm_len(he->thread); } return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); } size_t hists__fprintf(struct hists *hists, struct hists *pair, bool show_displacement, bool show_header, int max_rows, int max_cols, FILE *fp) { struct sort_entry *se; struct rb_node *nd; size_t ret = 0; u64 total_period; unsigned long position = 1; long displacement = 0; unsigned int width; const char *sep = symbol_conf.field_sep; const char *col_width = symbol_conf.col_width_list_str; int nr_rows = 0; init_rem_hits(); if (!show_header) goto print_entries; fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); if (symbol_conf.show_cpu_utilization) { if (sep) { ret += fprintf(fp, "%csys", *sep); ret += fprintf(fp, "%cus", *sep); if (perf_guest) { ret += fprintf(fp, "%cguest sys", *sep); ret += fprintf(fp, "%cguest us", *sep); } } else { ret += fprintf(fp, " sys "); ret += fprintf(fp, " us "); if (perf_guest) { ret += fprintf(fp, " guest sys "); ret += fprintf(fp, " guest us "); } } } if (symbol_conf.show_nr_samples) { if (sep) fprintf(fp, "%cSamples", *sep); else fputs(" Samples ", fp); } if (symbol_conf.show_total_period) { if (sep) ret += fprintf(fp, "%cPeriod", *sep); else ret += fprintf(fp, " Period "); } if (pair) { if (sep) ret += fprintf(fp, "%cDelta", *sep); else ret += fprintf(fp, " Delta "); if (show_displacement) { if (sep) ret += fprintf(fp, "%cDisplacement", *sep); else ret += fprintf(fp, " Displ"); } } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; if (sep) { fprintf(fp, "%c%s", *sep, se->se_header); continue; } width = strlen(se->se_header); if (symbol_conf.col_width_list_str) { if (col_width) { hists__set_col_len(hists, se->se_width_idx, atoi(col_width)); col_width = strchr(col_width, ','); if (col_width) ++col_width; } } if (!hists__new_col_len(hists, se->se_width_idx, width)) width = hists__col_len(hists, se->se_width_idx); fprintf(fp, " %*s", width, se->se_header); } fprintf(fp, "\n"); if (max_rows && ++nr_rows >= max_rows) goto out; if (sep) goto print_entries; fprintf(fp, "# ........"); if (symbol_conf.show_cpu_utilization) fprintf(fp, " ....... ......."); if (symbol_conf.show_nr_samples) fprintf(fp, " .........."); if (symbol_conf.show_total_period) fprintf(fp, " ............"); if (pair) { fprintf(fp, " .........."); if (show_displacement) fprintf(fp, " ....."); } list_for_each_entry(se, &hist_entry__sort_list, list) { unsigned int i; if (se->elide) continue; fprintf(fp, " "); width = hists__col_len(hists, se->se_width_idx); if (width == 0) width = strlen(se->se_header); for (i = 0; i < width; i++) fprintf(fp, "."); } fprintf(fp, "\n"); if (max_rows && ++nr_rows >= max_rows) goto out; fprintf(fp, "#\n"); if (max_rows && ++nr_rows >= max_rows) goto out; print_entries: total_period = hists->stats.total_period; for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (h->filtered) continue; if (show_displacement) { if (h->pair != NULL) displacement = ((long)h->pair->position - (long)position); else displacement = 0; ++position; } ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement, displacement, total_period, fp); if (symbol_conf.use_callchain) ret += hist_entry__fprintf_callchain(h, hists, total_period, fp); if (max_rows && ++nr_rows >= max_rows) goto out; if (h->ms.map == NULL && verbose > 1) { __map_groups__fprintf_maps(&h->thread->mg, MAP__FUNCTION, verbose, fp); fprintf(fp, "%.10s end\n", graph_dotted_line); } } out: free(rem_sq_bracket); return ret; } /* * See hists__fprintf to match the column widths */ unsigned int hists__sort_list_width(struct hists *hists) { struct sort_entry *se; int ret = 9; /* total % */ if (symbol_conf.show_cpu_utilization) { ret += 7; /* count_sys % */ ret += 6; /* count_us % */ if (perf_guest) { ret += 13; /* count_guest_sys % */ ret += 12; /* count_guest_us % */ } } if (symbol_conf.show_nr_samples) ret += 11; if (symbol_conf.show_total_period) ret += 13; list_for_each_entry(se, &hist_entry__sort_list, list) if (!se->elide) ret += 2 + hists__col_len(hists, se->se_width_idx); if (verbose) /* Addr + origin */ ret += 3 + BITS_PER_LONG / 4; return ret; } static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, enum hist_filter filter) { h->filtered &= ~(1 << filter); if (h->filtered) return; ++hists->nr_entries; if (h->ms.unfolded) hists->nr_entries += h->nr_rows; h->row_offset = 0; hists->stats.total_period += h->period; hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events; hists__calc_col_len(hists, h); } static bool hists__filter_entry_by_dso(struct hists *hists, struct hist_entry *he) { if (hists->dso_filter != NULL && (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { he->filtered |= (1 << HIST_FILTER__DSO); return true; } return false; } void hists__filter_by_dso(struct hists *hists) { struct rb_node *nd; hists->nr_entries = hists->stats.total_period = 0; hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (symbol_conf.exclude_other && !h->parent) continue; if (hists__filter_entry_by_dso(hists, h)) continue; hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); } } static bool hists__filter_entry_by_thread(struct hists *hists, struct hist_entry *he) { if (hists->thread_filter != NULL && he->thread != hists->thread_filter) { he->filtered |= (1 << HIST_FILTER__THREAD); return true; } return false; } void hists__filter_by_thread(struct hists *hists) { struct rb_node *nd; hists->nr_entries = hists->stats.total_period = 0; hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (hists__filter_entry_by_thread(hists, h)) continue; hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); } } static bool hists__filter_entry_by_symbol(struct hists *hists, struct hist_entry *he) { if (hists->symbol_filter_str != NULL && (!he->ms.sym || strstr(he->ms.sym->name, hists->symbol_filter_str) == NULL)) { he->filtered |= (1 << HIST_FILTER__SYMBOL); return true; } return false; } void hists__filter_by_symbol(struct hists *hists) { struct rb_node *nd; hists->nr_entries = hists->stats.total_period = 0; hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (hists__filter_entry_by_symbol(hists, h)) continue; hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); } } int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) { return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); } int hist_entry__annotate(struct hist_entry *he, size_t privsize) { return symbol__annotate(he->ms.sym, he->ms.map, privsize); } void hists__inc_nr_events(struct hists *hists, u32 type) { ++hists->stats.nr_events[0]; ++hists->stats.nr_events[type]; } size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) { int i; size_t ret = 0; for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { const char *name; if (hists->stats.nr_events[i] == 0) continue; name = perf_event__name(i); if (!strcmp(name, "UNKNOWN")) continue; ret += fprintf(fp, "%16s events: %10d\n", name, hists->stats.nr_events[i]); } return ret; }
gpl-2.0
arcardinal/kernel_lge_g3
fs/ext4/hash.c
4789
4462
/* * linux/fs/ext4/hash.c * * Copyright (C) 2002 by Theodore Ts'o * * This file is released under the GPL v2. * * This file may be redistributed under the terms of the GNU Public * License. */ #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/cryptohash.h> #include "ext4.h" #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } /* The old legacy hash */ static __u32 dx_hack_hash_unsigned(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const unsigned char *ucp = (const unsigned char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static __u32 dx_hack_hash_signed(const char *name, int len) { __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; const signed char *scp = (const signed char *) name; while (len--) { hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); if (hash & 0x80000000) hash -= 0x7fffffff; hash1 = hash0; hash0 = hash; } return hash0 << 1; } static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const signed char *scp = (const signed char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) scp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; const unsigned char *ucp = (const unsigned char *) msg; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = ((int) ucp[i]) + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } /* * Returns the hash of a filename. If len is 0 and name is NULL, then * this function can be used to test whether or not a hash version is * supported. * * The seed is an 4 longword (32 bits) "secret" which can be used to * uniquify a hash. If the seed is all zero's, then some default seed * may be used. * * A particular hash version specifies whether or not the seed is * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; __u32 minor_hash = 0; const char *p; int i; __u32 in[8], buf[4]; void (*str2hashbuf)(const char *, int, __u32 *, int) = str2hashbuf_signed; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; /* Check to see if the seed is all zero's */ if (hinfo->seed) { for (i = 0; i < 4; i++) { if (hinfo->seed[i]) break; } if (i < 4) memcpy(buf, hinfo->seed, sizeof(buf)); } switch (hinfo->hash_version) { case DX_HASH_LEGACY_UNSIGNED: hash = dx_hack_hash_unsigned(name, len); break; case DX_HASH_LEGACY: hash = dx_hack_hash_signed(name, len); break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_HALF_MD4: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 8); half_md4_transform(buf, in); len -= 32; p += 32; } minor_hash = buf[2]; hash = buf[1]; break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; case DX_HASH_TEA: p = name; while (len > 0) { (*str2hashbuf)(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } hash = buf[0]; minor_hash = buf[1]; break; default: hinfo->hash = 0; return -1; } hash = hash & ~1; if (hash == (EXT4_HTREE_EOF_32BIT << 1)) hash = (EXT4_HTREE_EOF_32BIT - 1) << 1; hinfo->hash = hash; hinfo->minor_hash = minor_hash; return 0; }
gpl-2.0
samuaz/kernel_msm_gee
arch/powerpc/kernel/module.c
7093
2464
/* Kernel module help for powerpc. Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. Copyright (C) 2008 Freescale Semiconductor, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/bug.h> #include <asm/module.h> #include <asm/uaccess.h> #include <asm/firmware.h> #include <linux/sort.h> #include "setup.h" LIST_HEAD(module_bug_list); static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) { char *secstrings; unsigned int i; secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (i = 1; i < hdr->e_shnum; i++) if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0) return &sechdrs[i]; return NULL; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *sect; /* Apply feature fixups */ sect = find_section(hdr, sechdrs, "__ftr_fixup"); if (sect != NULL) do_feature_fixups(cur_cpu_spec->cpu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup"); if (sect != NULL) do_feature_fixups(cur_cpu_spec->mmu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); #ifdef CONFIG_PPC64 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); if (sect != NULL) do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); #endif sect = find_section(hdr, sechdrs, "__lwsync_fixup"); if (sect != NULL) do_lwsync_fixups(cur_cpu_spec->cpu_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); return 0; }
gpl-2.0
AndroidGX/SimpleGX-MM-6.0_H815
arch/arm/mach-sa1100/clock.c
7349
1998
/* * linux/arch/arm/mach-sa1100/clock.c */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/clkdev.h> #include <mach/hardware.h> struct clkops { void (*enable)(struct clk *); void (*disable)(struct clk *); }; struct clk { const struct clkops *ops; unsigned int enabled; }; #define DEFINE_CLK(_name, _ops) \ struct clk clk_##_name = { \ .ops = _ops, \ } static DEFINE_SPINLOCK(clocks_lock); static void clk_gpio27_enable(struct clk *clk) { /* * First, set up the 3.6864MHz clock on GPIO 27 for the SA-1111: * (SA-1110 Developer's Manual, section 9.1.2.1) */ GAFR |= GPIO_32_768kHz; GPDR |= GPIO_32_768kHz; TUCR = TUCR_3_6864MHz; } static void clk_gpio27_disable(struct clk *clk) { TUCR = 0; GPDR &= ~GPIO_32_768kHz; GAFR &= ~GPIO_32_768kHz; } int clk_enable(struct clk *clk) { unsigned long flags; if (clk) { spin_lock_irqsave(&clocks_lock, flags); if (clk->enabled++ == 0) clk->ops->enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); } return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; if (clk) { WARN_ON(clk->enabled == 0); spin_lock_irqsave(&clocks_lock, flags); if (--clk->enabled == 0) clk->ops->disable(clk); spin_unlock_irqrestore(&clocks_lock, flags); } } EXPORT_SYMBOL(clk_disable); const struct clkops clk_gpio27_ops = { .enable = clk_gpio27_enable, .disable = clk_gpio27_disable, }; static DEFINE_CLK(gpio27, &clk_gpio27_ops); static struct clk_lookup sa11xx_clkregs[] = { CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27), CLKDEV_INIT("sa1100-rtc", NULL, NULL), }; static int __init sa11xx_clk_init(void) { clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs)); return 0; } core_initcall(sa11xx_clk_init);
gpl-2.0
skeevy420/android_kernel_lge_d850
arch/cris/arch-v32/drivers/mach-fs/nandflash.c
8885
4260
/* * arch/cris/arch-v32/drivers/nandflash.c * * Copyright (c) 2004 * * Derived from drivers/mtd/nand/spia.c * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <arch/memmap.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/gio_defs.h> #include <hwregs/bif_core_defs.h> #include <asm/io.h> #define CE_BIT 4 #define CLE_BIT 5 #define ALE_BIT 6 #define BY_BIT 7 struct mtd_info_wrapper { struct mtd_info info; struct nand_chip chip; }; /* Bitmask for control pins */ #define PIN_BITMASK ((1 << CE_BIT) | (1 << CLE_BIT) | (1 << ALE_BIT)) /* Bitmask for mtd nand control bits */ #define CTRL_BITMASK (NAND_NCE | NAND_CLE | NAND_ALE) static struct mtd_info *crisv32_mtd; /* * hardware specific access to control-lines */ static void crisv32_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { unsigned long flags; reg_gio_rw_pa_dout dout; struct nand_chip *this = mtd->priv; local_irq_save(flags); /* control bits change */ if (ctrl & NAND_CTRL_CHANGE) { dout = REG_RD(gio, regi_gio, rw_pa_dout); dout.data &= ~PIN_BITMASK; #if (CE_BIT == 4 && NAND_NCE == 1 && \ CLE_BIT == 5 && NAND_CLE == 2 && \ ALE_BIT == 6 && NAND_ALE == 4) /* Pins in same order as control bits, but shifted. * Optimize for this case; works for 2.6.18 */ dout.data |= ((ctrl & CTRL_BITMASK) ^ NAND_NCE) << CE_BIT; #else /* the slow way */ if (!(ctrl & NAND_NCE)) dout.data |= (1 << CE_BIT); if (ctrl & NAND_CLE) dout.data |= (1 << CLE_BIT); if (ctrl & NAND_ALE) dout.data |= (1 << ALE_BIT); #endif REG_WR(gio, regi_gio, rw_pa_dout, dout); } /* command to chip */ if (cmd != NAND_CMD_NONE) writeb(cmd, this->IO_ADDR_W); local_irq_restore(flags); } /* * read device ready pin */ static int crisv32_device_ready(struct mtd_info *mtd) { reg_gio_r_pa_din din = REG_RD(gio, regi_gio, r_pa_din); return ((din.data & (1 << BY_BIT)) >> BY_BIT); } /* * Main initialization routine */ struct mtd_info *__init crisv32_nand_flash_probe(void) { void __iomem *read_cs; void __iomem *write_cs; reg_bif_core_rw_grp3_cfg bif_cfg = REG_RD(bif_core, regi_bif_core, rw_grp3_cfg); reg_gio_rw_pa_oe pa_oe = REG_RD(gio, regi_gio, rw_pa_oe); struct mtd_info_wrapper *wrapper; struct nand_chip *this; int err = 0; /* Allocate memory for MTD device structure and private data */ wrapper = kzalloc(sizeof(struct mtd_info_wrapper), GFP_KERNEL); if (!wrapper) { printk(KERN_ERR "Unable to allocate CRISv32 NAND MTD " "device structure.\n"); err = -ENOMEM; return NULL; } read_cs = ioremap(MEM_CSP0_START | MEM_NON_CACHEABLE, 8192); write_cs = ioremap(MEM_CSP1_START | MEM_NON_CACHEABLE, 8192); if (!read_cs || !write_cs) { printk(KERN_ERR "CRISv32 NAND ioremap failed\n"); err = -EIO; goto out_mtd; } /* Get pointer to private data */ this = &wrapper->chip; crisv32_mtd = &wrapper->info; pa_oe.oe |= 1 << CE_BIT; pa_oe.oe |= 1 << ALE_BIT; pa_oe.oe |= 1 << CLE_BIT; pa_oe.oe &= ~(1 << BY_BIT); REG_WR(gio, regi_gio, rw_pa_oe, pa_oe); bif_cfg.gated_csp0 = regk_bif_core_rd; bif_cfg.gated_csp1 = regk_bif_core_wr; REG_WR(bif_core, regi_bif_core, rw_grp3_cfg, bif_cfg); /* Link the private data with the MTD structure */ crisv32_mtd->priv = this; /* Set address of NAND IO lines */ this->IO_ADDR_R = read_cs; this->IO_ADDR_W = write_cs; this->cmd_ctrl = crisv32_hwcontrol; this->dev_ready = crisv32_device_ready; /* 20 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ /* this->bbt_options = NAND_BBT_USE_FLASH; */ /* Scan to find existence of the device */ if (nand_scan(crisv32_mtd, 1)) { err = -ENXIO; goto out_ior; } return crisv32_mtd; out_ior: iounmap((void *)read_cs); iounmap((void *)write_cs); out_mtd: kfree(wrapper); return NULL; }
gpl-2.0
ptriller/dcpu-gcc
gcc/testsuite/gcc.c-torture/compile/pr34091.c
182
5566
typedef unsigned int GLenum; typedef unsigned char GLboolean; typedef int GLint; typedef unsigned short GLushort; typedef unsigned int GLuint; typedef float GLfloat; typedef GLushort GLchan; struct gl_texture_image; typedef struct __GLcontextRec GLcontext; typedef void (*FetchTexelFuncC) (const struct gl_texture_image * texImage, GLint col, GLint row, GLint img, GLchan * texelOut); struct gl_texture_format { }; struct gl_texture_image { GLenum _BaseFormat; GLboolean _IsPowerOfTwo; FetchTexelFuncC FetchTexelc; }; struct gl_texture_object { GLenum Target; GLenum WrapS; GLenum MinFilter; GLenum MagFilter; GLint BaseLevel; GLint _MaxLevel; struct gl_texture_image *Image[6][12]; }; enum _format { MESA_FORMAT_RGBA_DXT3, MESA_FORMAT_RGBA_DXT5, MESA_FORMAT_RGBA, MESA_FORMAT_RGB, MESA_FORMAT_ALPHA, MESA_FORMAT_LUMINANCE, }; typedef void (*texture_sample_func) (GLcontext * ctx, const struct gl_texture_object * tObj, GLuint n, const GLfloat texcoords[][4], const GLfloat lambda[], GLchan rgba[][4]); lerp_2d (GLfloat a, GLfloat b, GLfloat v00, GLfloat v10, GLfloat v01, GLfloat v11) { const GLfloat temp0 = ((v00) + (a) * ((v10) - (v00))); const GLfloat temp1 = ((v01) + (a) * ((v11) - (v01))); return ((temp0) + (b) * ((temp1) - (temp0))); } static __inline__ void lerp_rgba (GLchan result[4], GLfloat t, const GLchan a[4], const GLchan b[4]) { result[0] = (GLchan) (((a[0]) + (t) * ((b[0]) - (a[0]))) + 0.5); result[1] = (GLchan) (((a[1]) + (t) * ((b[1]) - (a[1]))) + 0.5); result[2] = (GLchan) (((a[2]) + (t) * ((b[2]) - (a[2]))) + 0.5); } static __inline__ void lerp_rgba_2d (GLchan result[4], GLfloat a, GLfloat b, const GLchan t00[4], const GLchan t10[4], const GLchan t01[4], const GLchan t11[4]) { result[0] = (GLchan) (lerp_2d (a, b, t00[0], t10[0], t01[0], t11[0]) + 0.5); result[1] = (GLchan) (lerp_2d (a, b, t00[1], t10[1], t01[1], t11[1]) + 0.5); result[2] = (GLchan) (lerp_2d (a, b, t00[2], t10[2], t01[2], t11[2]) + 0.5); } static __inline__ void sample_2d_linear_repeat (GLcontext * ctx, const struct gl_texture_object *tObj, const struct gl_texture_image *img, const GLfloat texcoord[4], GLchan rgba[]) { GLint i0, j0, i1, j1; GLfloat a, b; GLchan t00[4], t10[4], t01[4], t11[4]; { }; img->FetchTexelc (img, i1, j1, 0, t11); lerp_rgba_2d (rgba, a, b, t00, t10, t01, t11); } sample_2d_nearest_mipmap_linear (GLcontext * ctx, const struct gl_texture_object *tObj, GLuint n, const GLfloat texcoord[][4], const GLfloat lambda[], GLchan rgba[][4]) { GLuint i; GLint level = linear_mipmap_level (tObj, lambda[i]); sample_2d_nearest (ctx, tObj, tObj->Image[0][tObj->_MaxLevel], texcoord[i], rgba[i]); GLchan t0[4], t1[4]; sample_2d_nearest (ctx, tObj, tObj->Image[0][level], texcoord[i], t0); sample_2d_nearest (ctx, tObj, tObj->Image[0][level + 1], texcoord[i], t1); } static void sample_2d_linear_mipmap_linear_repeat (GLcontext * ctx, const struct gl_texture_object *tObj, GLuint n, const GLfloat texcoord[][4], const GLfloat lambda[], GLchan rgba[][4]) { GLuint i; for (i = 0; i < n; i++) { GLint level = linear_mipmap_level (tObj, lambda[i]); if (level >= tObj->_MaxLevel) { GLchan t0[4], t1[4]; const GLfloat f = ((lambda[i]) - ifloor (lambda[i])); sample_2d_linear_repeat (ctx, tObj, tObj->Image[0][level], texcoord[i], t0); sample_2d_linear_repeat (ctx, tObj, tObj->Image[0][level + 1], texcoord[i], t1); lerp_rgba (rgba[i], f, t0, t1); } } } static void sample_lambda_2d (GLcontext * ctx, const struct gl_texture_object *tObj, GLuint n, const GLfloat texcoords[][4], const GLfloat lambda[], GLchan rgba[][4]) { const struct gl_texture_image *tImg = tObj->Image[0][tObj->BaseLevel]; GLuint minStart, minEnd; GLuint magStart, magEnd; const GLboolean repeatNoBorderPOT = (tObj->WrapS == 0x2901) && (tImg->_BaseFormat != 0x1900) && tImg->_IsPowerOfTwo; compute_min_mag_ranges (tObj, n, lambda, &minStart, &minEnd, &magStart, &magEnd); if (minStart < minEnd) { const GLuint m = minEnd - minStart; switch (tObj->MinFilter) { case 0x2600: if (repeatNoBorderPOT) { case MESA_FORMAT_RGB: opt_sample_rgb_2d (ctx, tObj, m, texcoords + minStart, ((void *) 0), rgba + minStart); case MESA_FORMAT_RGBA: opt_sample_rgba_2d (ctx, tObj, m, texcoords + minStart, ((void *) 0), rgba + minStart); } { sample_nearest_2d (ctx, tObj, m, texcoords + minStart, ((void *) 0), rgba + minStart); } break; sample_2d_nearest_mipmap_linear (ctx, tObj, m, texcoords + minStart, lambda + minStart, rgba + minStart); case 0x2703: if (repeatNoBorderPOT) sample_2d_linear_mipmap_linear_repeat (ctx, tObj, m, texcoords + minStart, lambda + minStart, rgba + minStart); } switch (tObj->MagFilter) { case MESA_FORMAT_RGB: opt_sample_rgb_2d (ctx, tObj, m, texcoords + magStart, ((void *) 0), rgba + magStart); opt_sample_rgba_2d (ctx, tObj, m, texcoords + magStart, ((void *) 0), rgba + magStart); sample_nearest_2d (ctx, tObj, m, texcoords + magStart, ((void *) 0), rgba + magStart); } } } texture_sample_func _swrast_choose_texture_sample_func (const struct gl_texture_object *t) { switch (t->Target) { case 0x0DE0: return &sample_lambda_2d; } }
gpl-2.0
Orion116/kernel_samsung_lt03wifi_rebase
arch/um/kernel/smp.c
438
4873
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include "linux/percpu.h" #include "asm/pgalloc.h" #include "asm/tlb.h" #ifdef CONFIG_SMP #include "linux/sched.h" #include "linux/module.h" #include "linux/threads.h" #include "linux/interrupt.h" #include "linux/err.h" #include "linux/hardirq.h" #include "asm/smp.h" #include "asm/processor.h" #include "asm/spinlock.h" #include "kern.h" #include "irq_user.h" #include "os.h" /* Per CPU bogomips and other parameters * The only piece used here is the ipi pipe, which is set before SMP is * started and never changed. */ struct cpuinfo_um cpu_data[NR_CPUS]; /* A statistic, can be a little off */ int num_reschedules_sent = 0; /* Not changed after boot */ struct task_struct *idle_threads[NR_CPUS]; void smp_send_reschedule(int cpu) { os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1); num_reschedules_sent++; } void smp_send_stop(void) { int i; printk(KERN_INFO "Stopping all CPUs..."); for (i = 0; i < num_online_cpus(); i++) { if (i == current_thread->cpu) continue; os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); } printk(KERN_CONT "done\n"); } static cpumask_t smp_commenced_mask = CPU_MASK_NONE; static cpumask_t cpu_callin_map = CPU_MASK_NONE; static int idle_proc(void *cpup) { int cpu = (int) cpup, err; err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1); if (err < 0) panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); os_set_fd_async(cpu_data[cpu].ipi_pipe[0]); wmb(); if (cpu_test_and_set(cpu, cpu_callin_map)) { printk(KERN_ERR "huh, CPU#%d already present??\n", cpu); BUG(); } while (!cpu_isset(cpu, smp_commenced_mask)) cpu_relax(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); default_idle(); return 0; } static struct task_struct *idle_thread(int cpu) { struct task_struct *new_task; current->thread.request.u.thread.proc = idle_proc; current->thread.request.u.thread.arg = (void *) cpu; new_task = fork_idle(cpu); if (IS_ERR(new_task)) panic("copy_process failed in idle_thread, error = %ld", PTR_ERR(new_task)); cpu_tasks[cpu] = ((struct cpu_task) { .pid = new_task->thread.mode.tt.extern_pid, .task = new_task } ); idle_threads[cpu] = new_task; panic("skas mode doesn't support SMP"); return new_task; } void smp_prepare_cpus(unsigned int maxcpus) { struct task_struct *idle; unsigned long waittime; int err, cpu, me = smp_processor_id(); int i; for (i = 0; i < ncpus; ++i) set_cpu_possible(i, true); set_cpu_online(me, true); cpu_set(me, cpu_callin_map); err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); if (err < 0) panic("CPU#0 failed to create IPI pipe, errno = %d", -err); os_set_fd_async(cpu_data[me].ipi_pipe[0]); for (cpu = 1; cpu < ncpus; cpu++) { printk(KERN_INFO "Booting processor %d...\n", cpu); idle = idle_thread(cpu); init_idle(idle, cpu); waittime = 200000000; while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) cpu_relax(); printk(KERN_INFO "%s\n", cpu_isset(cpu, cpu_calling_map) ? "done" : "failed"); } } void smp_prepare_boot_cpu(void) { set_cpu_online(smp_processor_id(), true); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { cpu_set(cpu, smp_commenced_mask); while (!cpu_online(cpu)) mb(); return 0; } int setup_profiling_timer(unsigned int multiplier) { printk(KERN_INFO "setup_profiling_timer\n"); return 0; } void smp_call_function_slave(int cpu); void IPI_handler(int cpu) { unsigned char c; int fd; fd = cpu_data[cpu].ipi_pipe[0]; while (os_read_file(fd, &c, 1) == 1) { switch (c) { case 'C': smp_call_function_slave(cpu); break; case 'R': scheduler_ipi(); break; case 'S': printk(KERN_INFO "CPU#%d stopping\n", cpu); while (1) pause(); break; default: printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n", cpu, c); break; } } } int hard_smp_processor_id(void) { return pid_to_processor_id(os_getpid()); } static DEFINE_SPINLOCK(call_lock); static atomic_t scf_started; static atomic_t scf_finished; static void (*func)(void *info); static void *info; void smp_call_function_slave(int cpu) { atomic_inc(&scf_started); (*func)(info); atomic_inc(&scf_finished); } int smp_call_function(void (*_func)(void *info), void *_info, int wait) { int cpus = num_online_cpus() - 1; int i; if (!cpus) return 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); spin_lock_bh(&call_lock); atomic_set(&scf_started, 0); atomic_set(&scf_finished, 0); func = _func; info = _info; for_each_online_cpu(i) os_write_file(cpu_data[i].ipi_pipe[1], "C", 1); while (atomic_read(&scf_started) != cpus) barrier(); if (wait) while (atomic_read(&scf_finished) != cpus) barrier(); spin_unlock_bh(&call_lock); return 0; } #endif
gpl-2.0
nightscape/yoga-900-kernel
drivers/media/pci/solo6x10/solo6x10-core.c
694
17897
/* * Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com> * * Original author: * Ben Collins <bcollins@ubuntu.com> * * Additional work by: * John Brooks <john.brooks@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/videodev2.h> #include <linux/delay.h> #include <linux/sysfs.h> #include <linux/ktime.h> #include <linux/slab.h> #include "solo6x10.h" #include "solo6x10-tw28.h" MODULE_DESCRIPTION("Softlogic 6x10 MPEG4/H.264/G.723 CODEC V4L2/ALSA Driver"); MODULE_AUTHOR("Bluecherry <maintainers@bluecherrydvr.com>"); MODULE_VERSION(SOLO6X10_VERSION); MODULE_LICENSE("GPL"); static unsigned video_nr = -1; module_param(video_nr, uint, 0644); MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect (default)"); static int full_eeprom; /* default is only top 64B */ module_param(full_eeprom, uint, 0644); MODULE_PARM_DESC(full_eeprom, "Allow access to full 128B EEPROM (dangerous)"); static void solo_set_time(struct solo_dev *solo_dev) { struct timespec ts; ktime_get_ts(&ts); solo_reg_write(solo_dev, SOLO_TIMER_SEC, ts.tv_sec); solo_reg_write(solo_dev, SOLO_TIMER_USEC, ts.tv_nsec / NSEC_PER_USEC); } static void solo_timer_sync(struct solo_dev *solo_dev) { u32 sec, usec; struct timespec ts; long diff; if (solo_dev->type != SOLO_DEV_6110) return; if (++solo_dev->time_sync < 60) return; solo_dev->time_sync = 0; sec = solo_reg_read(solo_dev, SOLO_TIMER_SEC); usec = solo_reg_read(solo_dev, SOLO_TIMER_USEC); ktime_get_ts(&ts); diff = (long)ts.tv_sec - (long)sec; diff = (diff * 1000000) + ((long)(ts.tv_nsec / NSEC_PER_USEC) - (long)usec); if (diff > 1000 || diff < -1000) { solo_set_time(solo_dev); } else if (diff) { long usec_lsb = solo_dev->usec_lsb; usec_lsb -= diff / 4; if (usec_lsb < 0) usec_lsb = 0; else if (usec_lsb > 255) usec_lsb = 255; solo_dev->usec_lsb = usec_lsb; solo_reg_write(solo_dev, SOLO_TIMER_USEC_LSB, solo_dev->usec_lsb); } } static irqreturn_t solo_isr(int irq, void *data) { struct solo_dev *solo_dev = data; u32 status; int i; status = solo_reg_read(solo_dev, SOLO_IRQ_STAT); if (!status) return IRQ_NONE; /* Acknowledge all interrupts immediately */ solo_reg_write(solo_dev, SOLO_IRQ_STAT, status); if (status & SOLO_IRQ_PCI_ERR) solo_p2m_error_isr(solo_dev); for (i = 0; i < SOLO_NR_P2M; i++) if (status & SOLO_IRQ_P2M(i)) solo_p2m_isr(solo_dev, i); if (status & SOLO_IRQ_IIC) solo_i2c_isr(solo_dev); if (status & SOLO_IRQ_VIDEO_IN) { solo_video_in_isr(solo_dev); solo_timer_sync(solo_dev); } if (status & SOLO_IRQ_ENCODER) solo_enc_v4l2_isr(solo_dev); if (status & SOLO_IRQ_G723) solo_g723_isr(solo_dev); return IRQ_HANDLED; } static void free_solo_dev(struct solo_dev *solo_dev) { struct pci_dev *pdev = solo_dev->pdev; if (solo_dev->dev.parent) device_unregister(&solo_dev->dev); if (solo_dev->reg_base) { /* Bring down the sub-devices first */ solo_g723_exit(solo_dev); solo_enc_v4l2_exit(solo_dev); solo_enc_exit(solo_dev); solo_v4l2_exit(solo_dev); solo_disp_exit(solo_dev); solo_gpio_exit(solo_dev); solo_p2m_exit(solo_dev); solo_i2c_exit(solo_dev); /* Now cleanup the PCI device */ solo_irq_off(solo_dev, ~0); free_irq(pdev->irq, solo_dev); pci_iounmap(pdev, solo_dev->reg_base); } pci_release_regions(pdev); pci_disable_device(pdev); v4l2_device_unregister(&solo_dev->v4l2_dev); pci_set_drvdata(pdev, NULL); kfree(solo_dev); } static ssize_t eeprom_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); u16 *p = (u16 *)buf; int i; if (count & 0x1) dev_warn(dev, "EEPROM Write not aligned (truncating)\n"); if (!full_eeprom && count > 64) { dev_warn(dev, "EEPROM Write truncated to 64 bytes\n"); count = 64; } else if (full_eeprom && count > 128) { dev_warn(dev, "EEPROM Write truncated to 128 bytes\n"); count = 128; } solo_eeprom_ewen(solo_dev, 1); for (i = full_eeprom ? 0 : 32; i < min((int)(full_eeprom ? 64 : 32), (int)(count / 2)); i++) solo_eeprom_write(solo_dev, i, cpu_to_be16(p[i])); solo_eeprom_ewen(solo_dev, 0); return count; } static ssize_t eeprom_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); u16 *p = (u16 *)buf; int count = (full_eeprom ? 128 : 64); int i; for (i = (full_eeprom ? 0 : 32); i < (count / 2); i++) p[i] = be16_to_cpu(solo_eeprom_read(solo_dev, i)); return count; } static ssize_t p2m_timeouts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); return sprintf(buf, "%d\n", solo_dev->p2m_timeouts); } static ssize_t sdram_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); return sprintf(buf, "%dMegs\n", solo_dev->sdram_size >> 20); } static ssize_t tw28xx_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); return sprintf(buf, "tw2815[%d] tw2864[%d] tw2865[%d]\n", hweight32(solo_dev->tw2815), hweight32(solo_dev->tw2864), hweight32(solo_dev->tw2865)); } static ssize_t input_map_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); unsigned int val; char *out = buf; val = solo_reg_read(solo_dev, SOLO_VI_CH_SWITCH_0); out += sprintf(out, "Channel 0 => Input %d\n", val & 0x1f); out += sprintf(out, "Channel 1 => Input %d\n", (val >> 5) & 0x1f); out += sprintf(out, "Channel 2 => Input %d\n", (val >> 10) & 0x1f); out += sprintf(out, "Channel 3 => Input %d\n", (val >> 15) & 0x1f); out += sprintf(out, "Channel 4 => Input %d\n", (val >> 20) & 0x1f); out += sprintf(out, "Channel 5 => Input %d\n", (val >> 25) & 0x1f); val = solo_reg_read(solo_dev, SOLO_VI_CH_SWITCH_1); out += sprintf(out, "Channel 6 => Input %d\n", val & 0x1f); out += sprintf(out, "Channel 7 => Input %d\n", (val >> 5) & 0x1f); out += sprintf(out, "Channel 8 => Input %d\n", (val >> 10) & 0x1f); out += sprintf(out, "Channel 9 => Input %d\n", (val >> 15) & 0x1f); out += sprintf(out, "Channel 10 => Input %d\n", (val >> 20) & 0x1f); out += sprintf(out, "Channel 11 => Input %d\n", (val >> 25) & 0x1f); val = solo_reg_read(solo_dev, SOLO_VI_CH_SWITCH_2); out += sprintf(out, "Channel 12 => Input %d\n", val & 0x1f); out += sprintf(out, "Channel 13 => Input %d\n", (val >> 5) & 0x1f); out += sprintf(out, "Channel 14 => Input %d\n", (val >> 10) & 0x1f); out += sprintf(out, "Channel 15 => Input %d\n", (val >> 15) & 0x1f); out += sprintf(out, "Spot Output => Input %d\n", (val >> 20) & 0x1f); return out - buf; } static ssize_t p2m_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); unsigned long ms; int ret = kstrtoul(buf, 10, &ms); if (ret < 0 || ms > 200) return -EINVAL; solo_dev->p2m_jiffies = msecs_to_jiffies(ms); return count; } static ssize_t p2m_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); return sprintf(buf, "%ums\n", jiffies_to_msecs(solo_dev->p2m_jiffies)); } static ssize_t intervals_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); char *out = buf; int fps = solo_dev->fps; int i; for (i = 0; i < solo_dev->nr_chans; i++) { out += sprintf(out, "Channel %d: %d/%d (0x%08x)\n", i, solo_dev->v4l2_enc[i]->interval, fps, solo_reg_read(solo_dev, SOLO_CAP_CH_INTV(i))); } return out - buf; } static ssize_t sdram_offsets_show(struct device *dev, struct device_attribute *attr, char *buf) { struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); char *out = buf; out += sprintf(out, "DISP: 0x%08x @ 0x%08x\n", SOLO_DISP_EXT_ADDR, SOLO_DISP_EXT_SIZE); out += sprintf(out, "EOSD: 0x%08x @ 0x%08x (0x%08x * %d)\n", SOLO_EOSD_EXT_ADDR, SOLO_EOSD_EXT_AREA(solo_dev), SOLO_EOSD_EXT_SIZE(solo_dev), SOLO_EOSD_EXT_AREA(solo_dev) / SOLO_EOSD_EXT_SIZE(solo_dev)); out += sprintf(out, "MOTI: 0x%08x @ 0x%08x\n", SOLO_MOTION_EXT_ADDR(solo_dev), SOLO_MOTION_EXT_SIZE); out += sprintf(out, "G723: 0x%08x @ 0x%08x\n", SOLO_G723_EXT_ADDR(solo_dev), SOLO_G723_EXT_SIZE); out += sprintf(out, "CAPT: 0x%08x @ 0x%08x (0x%08x * %d)\n", SOLO_CAP_EXT_ADDR(solo_dev), SOLO_CAP_EXT_SIZE(solo_dev), SOLO_CAP_PAGE_SIZE, SOLO_CAP_EXT_SIZE(solo_dev) / SOLO_CAP_PAGE_SIZE); out += sprintf(out, "EREF: 0x%08x @ 0x%08x (0x%08x * %d)\n", SOLO_EREF_EXT_ADDR(solo_dev), SOLO_EREF_EXT_AREA(solo_dev), SOLO_EREF_EXT_SIZE, SOLO_EREF_EXT_AREA(solo_dev) / SOLO_EREF_EXT_SIZE); out += sprintf(out, "MPEG: 0x%08x @ 0x%08x\n", SOLO_MP4E_EXT_ADDR(solo_dev), SOLO_MP4E_EXT_SIZE(solo_dev)); out += sprintf(out, "JPEG: 0x%08x @ 0x%08x\n", SOLO_JPEG_EXT_ADDR(solo_dev), SOLO_JPEG_EXT_SIZE(solo_dev)); return out - buf; } static ssize_t sdram_show(struct file *file, struct kobject *kobj, struct bin_attribute *a, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct solo_dev *solo_dev = container_of(dev, struct solo_dev, dev); const int size = solo_dev->sdram_size; if (off >= size) return 0; if (off + count > size) count = size - off; if (solo_p2m_dma(solo_dev, 0, buf, off, count, 0, 0)) return -EIO; return count; } static const struct device_attribute solo_dev_attrs[] = { __ATTR(eeprom, 0640, eeprom_show, eeprom_store), __ATTR(p2m_timeout, 0644, p2m_timeout_show, p2m_timeout_store), __ATTR_RO(p2m_timeouts), __ATTR_RO(sdram_size), __ATTR_RO(tw28xx), __ATTR_RO(input_map), __ATTR_RO(intervals), __ATTR_RO(sdram_offsets), }; static void solo_device_release(struct device *dev) { /* Do nothing */ } static int solo_sysfs_init(struct solo_dev *solo_dev) { struct bin_attribute *sdram_attr = &solo_dev->sdram_attr; struct device *dev = &solo_dev->dev; const char *driver; int i; if (solo_dev->type == SOLO_DEV_6110) driver = "solo6110"; else driver = "solo6010"; dev->release = solo_device_release; dev->parent = &solo_dev->pdev->dev; set_dev_node(dev, dev_to_node(&solo_dev->pdev->dev)); dev_set_name(dev, "%s-%d-%d", driver, solo_dev->vfd->num, solo_dev->nr_chans); if (device_register(dev)) { dev->parent = NULL; return -ENOMEM; } for (i = 0; i < ARRAY_SIZE(solo_dev_attrs); i++) { if (device_create_file(dev, &solo_dev_attrs[i])) { device_unregister(dev); return -ENOMEM; } } sysfs_attr_init(&sdram_attr->attr); sdram_attr->attr.name = "sdram"; sdram_attr->attr.mode = 0440; sdram_attr->read = sdram_show; sdram_attr->size = solo_dev->sdram_size; if (device_create_bin_file(dev, sdram_attr)) { device_unregister(dev); return -ENOMEM; } return 0; } static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct solo_dev *solo_dev; int ret; u8 chip_id; solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL); if (solo_dev == NULL) return -ENOMEM; if (id->driver_data == SOLO_DEV_6010) dev_info(&pdev->dev, "Probing Softlogic 6010\n"); else dev_info(&pdev->dev, "Probing Softlogic 6110\n"); solo_dev->type = id->driver_data; solo_dev->pdev = pdev; ret = v4l2_device_register(&pdev->dev, &solo_dev->v4l2_dev); if (ret) goto fail_probe; /* Only for during init */ solo_dev->p2m_jiffies = msecs_to_jiffies(100); ret = pci_enable_device(pdev); if (ret) goto fail_probe; pci_set_master(pdev); /* RETRY/TRDY Timeout disabled */ pci_write_config_byte(pdev, 0x40, 0x00); pci_write_config_byte(pdev, 0x41, 0x00); ret = pci_request_regions(pdev, SOLO6X10_NAME); if (ret) goto fail_probe; solo_dev->reg_base = pci_ioremap_bar(pdev, 0); if (solo_dev->reg_base == NULL) { ret = -ENOMEM; goto fail_probe; } chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) & SOLO_CHIP_ID_MASK; switch (chip_id) { case 7: solo_dev->nr_chans = 16; solo_dev->nr_ext = 5; break; case 6: solo_dev->nr_chans = 8; solo_dev->nr_ext = 2; break; default: dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, assuming 4 ch\n", chip_id); case 5: solo_dev->nr_chans = 4; solo_dev->nr_ext = 1; } /* Disable all interrupts to start */ solo_irq_off(solo_dev, ~0); /* Initial global settings */ if (solo_dev->type == SOLO_DEV_6010) { solo_dev->clock_mhz = 108; solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT | SOLO_SYS_CFG_INPUTDIV(25) | SOLO_SYS_CFG_FEEDBACKDIV(solo_dev->clock_mhz * 2 - 2) | SOLO_SYS_CFG_OUTDIV(3); solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config); } else { u32 divq, divf; solo_dev->clock_mhz = 135; if (solo_dev->clock_mhz < 125) { divq = 3; divf = (solo_dev->clock_mhz * 4) / 3 - 1; } else { divq = 2; divf = (solo_dev->clock_mhz * 2) / 3 - 1; } solo_reg_write(solo_dev, SOLO_PLL_CONFIG, (1 << 20) | /* PLL_RANGE */ (8 << 15) | /* PLL_DIVR */ (divq << 12) | (divf << 4) | (1 << 1) /* PLL_FSEN */); solo_dev->sys_config = SOLO_SYS_CFG_SDRAM64BIT; } solo_reg_write(solo_dev, SOLO_SYS_CFG, solo_dev->sys_config); solo_reg_write(solo_dev, SOLO_TIMER_CLOCK_NUM, solo_dev->clock_mhz - 1); /* PLL locking time of 1ms */ mdelay(1); ret = request_irq(pdev->irq, solo_isr, IRQF_SHARED, SOLO6X10_NAME, solo_dev); if (ret) goto fail_probe; /* Handle this from the start */ solo_irq_on(solo_dev, SOLO_IRQ_PCI_ERR); ret = solo_i2c_init(solo_dev); if (ret) goto fail_probe; /* Setup the DMA engine */ solo_reg_write(solo_dev, SOLO_DMA_CTRL, SOLO_DMA_CTRL_REFRESH_CYCLE(1) | SOLO_DMA_CTRL_SDRAM_SIZE(2) | SOLO_DMA_CTRL_SDRAM_CLK_INVERT | SOLO_DMA_CTRL_READ_CLK_SELECT | SOLO_DMA_CTRL_LATENCY(1)); /* Undocumented crap */ solo_reg_write(solo_dev, SOLO_DMA_CTRL1, solo_dev->type == SOLO_DEV_6010 ? 0x100 : 0x300); if (solo_dev->type != SOLO_DEV_6010) { solo_dev->usec_lsb = 0x3f; solo_set_time(solo_dev); } /* Disable watchdog */ solo_reg_write(solo_dev, SOLO_WATCHDOG, 0); /* Initialize sub components */ ret = solo_p2m_init(solo_dev); if (ret) goto fail_probe; ret = solo_disp_init(solo_dev); if (ret) goto fail_probe; ret = solo_gpio_init(solo_dev); if (ret) goto fail_probe; ret = solo_tw28_init(solo_dev); if (ret) goto fail_probe; ret = solo_v4l2_init(solo_dev, video_nr); if (ret) goto fail_probe; ret = solo_enc_init(solo_dev); if (ret) goto fail_probe; ret = solo_enc_v4l2_init(solo_dev, video_nr); if (ret) goto fail_probe; ret = solo_g723_init(solo_dev); if (ret) goto fail_probe; ret = solo_sysfs_init(solo_dev); if (ret) goto fail_probe; /* Now that init is over, set this lower */ solo_dev->p2m_jiffies = msecs_to_jiffies(20); return 0; fail_probe: free_solo_dev(solo_dev); return ret; } static void solo_pci_remove(struct pci_dev *pdev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev); struct solo_dev *solo_dev = container_of(v4l2_dev, struct solo_dev, v4l2_dev); free_solo_dev(solo_dev); } static const struct pci_device_id solo_id_table[] = { /* 6010 based cards */ { PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_4), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_9), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_16), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_4), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_9), .driver_data = SOLO_DEV_6010 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_16), .driver_data = SOLO_DEV_6010 }, /* 6110 based cards */ { PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6110), .driver_data = SOLO_DEV_6110 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_4), .driver_data = SOLO_DEV_6110 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_8), .driver_data = SOLO_DEV_6110 }, { PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_16), .driver_data = SOLO_DEV_6110 }, {0,} }; MODULE_DEVICE_TABLE(pci, solo_id_table); static struct pci_driver solo_pci_driver = { .name = SOLO6X10_NAME, .id_table = solo_id_table, .probe = solo_pci_probe, .remove = solo_pci_remove, }; module_pci_driver(solo_pci_driver);
gpl-2.0
TeamFahQ/kernel_linux_next
arch/nios2/boot/compressed/misc.c
1462
4615
/* * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw> * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Adapted for SH by Stuart Menefy, Aug 1999 * * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000 * * Based on arch/sh/boot/compressed/misc.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/string.h> /* * gzip declarations */ #define OF(args) args #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset((s), 0, (n)) typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; #define WSIZE 0x8000 /* Window size must be at least 32k, */ /* and a power of two */ static uch *inbuf; /* input buffer */ static uch window[WSIZE]; /* Sliding window buffer */ static unsigned insize; /* valid bytes in inbuf */ static unsigned inptr; /* index of next byte to be processed in inbuf */ static unsigned outcnt; /* bytes in output buffer */ /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) #ifdef DEBUG # define Assert(cond, msg) {if (!(cond)) error(msg); } # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ; } # define Tracevv(x) {if (verbose > 1) fprintf x ; } # define Tracec(c, x) {if (verbose && (c)) fprintf x ; } # define Tracecv(c, x) {if (verbose > 1 && (c)) fprintf x ; } #else # define Assert(cond, msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c, x) # define Tracecv(c, x) #endif static int fill_inbuf(void); static void flush_window(void); static void error(char *m); extern char input_data[]; extern int input_len; static long bytes_out; static uch *output_data; static unsigned long output_ptr; #include "console.c" static void error(char *m); int puts(const char *); extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #define HEAP_SIZE 0x10000 #include "../../../../lib/inflate.c" void *memset(void *s, int c, size_t n) { int i; char *ss = (char *)s; for (i = 0; i < n; i++) ss[i] = c; return s; } void *memcpy(void *__dest, __const void *__src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i = 0; i < __n; i++) d[i] = s[i]; return __dest; } /* * Fill the input buffer. This is called only when the buffer is empty * and at least one byte is really needed. */ static int fill_inbuf(void) { if (insize != 0) error("ran out of input data"); inbuf = input_data; insize = input_len; inptr = 1; return inbuf[0]; } /* * Write the output window window[0..outcnt-1] and update crc and bytes_out. * (Used for the decompressed data only.) */ static void flush_window(void) { ulg c = crc; /* temporary variable */ unsigned n; uch *in, *out, ch; in = window; out = &output_data[output_ptr]; for (n = 0; n < outcnt; n++) { ch = *out++ = *in++; c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); } crc = c; bytes_out += (ulg)outcnt; output_ptr += (ulg)outcnt; outcnt = 0; } static void error(char *x) { puts("\nERROR\n"); puts(x); puts("\n\n -- System halted"); while (1) /* Halt */ ; } void decompress_kernel(void) { output_data = (void *) (CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE); output_ptr = 0; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; console_init(); makecrc(); puts("Uncompressing Linux... "); gunzip(); puts("Ok, booting the kernel.\n"); }
gpl-2.0
ronasimi/LGF180-Optimus-G-_Android_KK_v30a_Kernel
drivers/usb/dwc3/host.c
1718
3219
/** * host.c - DesignWare USB3 DRD Controller Host Glue * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com * * Authors: Felipe Balbi <balbi@ti.com>, * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the above-listed copyright holders may not be used * to endorse or promote products derived from this software without * specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2, as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/platform_device.h> #include "core.h" #include "xhci.h" int dwc3_host_init(struct dwc3 *dwc) { struct platform_device *xhci; int ret; struct xhci_plat_data pdata; xhci = platform_device_alloc("xhci-hcd", -1); if (!xhci) { dev_err(dwc->dev, "couldn't allocate xHCI device\n"); ret = -ENOMEM; goto err0; } dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask); xhci->dev.parent = dwc->dev; xhci->dev.dma_mask = dwc->dev->dma_mask; xhci->dev.dma_parms = dwc->dev->dma_parms; dwc->xhci = xhci; pdata.vendor = ((dwc->revision & DWC3_GSNPSID_MASK) >> __ffs(DWC3_GSNPSID_MASK) & DWC3_GSNPSREV_MASK); pdata.revision = dwc->revision & DWC3_GSNPSREV_MASK; ret = platform_device_add_data(xhci, (const void *) &pdata, sizeof(struct xhci_plat_data)); if (ret) { dev_err(dwc->dev, "couldn't add pdata to xHCI device\n"); goto err1; } ret = platform_device_add_resources(xhci, dwc->xhci_resources, DWC3_XHCI_RESOURCES_NUM); if (ret) { dev_err(dwc->dev, "couldn't add resources to xHCI device\n"); goto err1; } ret = platform_device_add(xhci); if (ret) { dev_err(dwc->dev, "failed to register xHCI device\n"); goto err1; } return 0; err1: platform_device_put(xhci); err0: return ret; } void dwc3_host_exit(struct dwc3 *dwc) { platform_device_unregister(dwc->xhci); }
gpl-2.0
damienyong/Kernel-3.0.8
kernel/drivers/net/usb/cx82310_eth.c
3254
9102
/* * Driver for USB ethernet port of Conexant CX82310-based ADSL routers * Copyright (C) 2010 by Ondrej Zary * some parts inspired by the cxacru driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> enum cx82310_cmd { CMD_START = 0x84, /* no effect? */ CMD_STOP = 0x85, /* no effect? */ CMD_GET_STATUS = 0x90, /* returns nothing? */ CMD_GET_MAC_ADDR = 0x91, /* read MAC address */ CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */ CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */ }; enum cx82310_status { STATUS_UNDEFINED, STATUS_SUCCESS, STATUS_ERROR, STATUS_UNSUPPORTED, STATUS_UNIMPLEMENTED, STATUS_PARAMETER_ERROR, STATUS_DBG_LOOPBACK, }; #define CMD_PACKET_SIZE 64 /* first command after power on can take around 8 seconds */ #define CMD_TIMEOUT 15000 #define CMD_REPLY_RETRY 5 #define CX82310_MTU 1514 #define CMD_EP 0x01 /* * execute control command * - optionally send some data (command parameters) * - optionally wait for the reply * - optionally read some data from the reply */ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, u8 *wdata, int wlen, u8 *rdata, int rlen) { int actual_len, retries, ret; struct usb_device *udev = dev->udev; u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* create command packet */ buf[0] = cmd; if (wdata) memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4)); /* send command packet */ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { dev_err(&dev->udev->dev, "send command %#x: error %d\n", cmd, ret); goto end; } if (reply) { /* wait for reply, retry if it's empty */ for (retries = 0; retries < CMD_REPLY_RETRY; retries++) { ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { dev_err(&dev->udev->dev, "reply receive error %d\n", ret); goto end; } if (actual_len > 0) break; } if (actual_len == 0) { dev_err(&dev->udev->dev, "no reply to command %#x\n", cmd); ret = -EIO; goto end; } if (buf[0] != cmd) { dev_err(&dev->udev->dev, "got reply to command %#x, expected: %#x\n", buf[0], cmd); ret = -EIO; goto end; } if (buf[1] != STATUS_SUCCESS) { dev_err(&dev->udev->dev, "command %#x failed: %#x\n", cmd, buf[1]); ret = -EIO; goto end; } if (rdata) memcpy(rdata, buf + 4, min_t(int, rlen, CMD_PACKET_SIZE - 4)); } end: kfree(buf); return ret; } #define partial_len data[0] /* length of partial packet data */ #define partial_rem data[1] /* remaining (missing) data length */ #define partial_data data[2] /* partial packet data */ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; char buf[15]; struct usb_device *udev = dev->udev; /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 && strcmp(buf, "USB NET CARD")) { dev_info(&udev->dev, "ignoring: probably an ADSL modem\n"); return -ENODEV; } ret = usbnet_get_endpoints(dev, intf); if (ret) return ret; /* * this must not include ethernet header as the device can send partial * packets with no header (and sometimes even empty URBs) */ dev->net->hard_header_len = 0; /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */ dev->hard_mtu = CX82310_MTU + 2; /* we can receive URBs up to 4KB from the device */ dev->rx_urb_size = 4096; dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL); if (!dev->partial_data) return -ENOMEM; /* enable ethernet mode (?) */ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); if (ret) { dev_err(&udev->dev, "unable to enable ethernet mode: %d\n", ret); goto err; } /* get the MAC address */ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, dev->net->dev_addr, ETH_ALEN); if (ret) { dev_err(&udev->dev, "unable to read MAC address: %d\n", ret); goto err; } /* start (does not seem to have any effect?) */ ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0); if (ret) goto err; return 0; err: kfree((void *)dev->partial_data); return ret; } static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf) { kfree((void *)dev->partial_data); } /* * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte * packet length at the beginning. * The last packet might be incomplete (when it crosses the 4KB URB size), * continuing in the next skb (without any headers). * If a packet has odd length, there is one extra byte at the end (before next * packet or at the end of the URB). */ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int len; struct sk_buff *skb2; /* * If the last skb ended with an incomplete packet, this skb contains * end of that packet at the beginning. */ if (dev->partial_rem) { len = dev->partial_len + dev->partial_rem; skb2 = alloc_skb(len, GFP_ATOMIC); if (!skb2) return 0; skb_put(skb2, len); memcpy(skb2->data, (void *)dev->partial_data, dev->partial_len); memcpy(skb2->data + dev->partial_len, skb->data, dev->partial_rem); usbnet_skb_return(dev, skb2); skb_pull(skb, (dev->partial_rem + 1) & ~1); dev->partial_rem = 0; if (skb->len < 2) return 1; } /* a skb can contain multiple packets */ while (skb->len > 1) { /* first two bytes are packet length */ len = skb->data[0] | (skb->data[1] << 8); skb_pull(skb, 2); /* if last packet in the skb, let usbnet to process it */ if (len == skb->len || len + 1 == skb->len) { skb_trim(skb, len); break; } if (len > CX82310_MTU) { dev_err(&dev->udev->dev, "RX packet too long: %d B\n", len); return 0; } /* incomplete packet, save it for the next skb */ if (len > skb->len) { dev->partial_len = skb->len; dev->partial_rem = len - skb->len; memcpy((void *)dev->partial_data, skb->data, dev->partial_len); skb_pull(skb, skb->len); break; } skb2 = alloc_skb(len, GFP_ATOMIC); if (!skb2) return 0; skb_put(skb2, len); memcpy(skb2->data, skb->data, len); /* process the packet */ usbnet_skb_return(dev, skb2); skb_pull(skb, (len + 1) & ~1); } /* let usbnet process the last packet */ return 1; } /* TX is easy, just add 2 bytes of length at the beginning */ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int len = skb->len; if (skb_headroom(skb) < 2) { struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } skb_push(skb, 2); skb->data[0] = len; skb->data[1] = len >> 8; return skb; } static const struct driver_info cx82310_info = { .description = "Conexant CX82310 USB ethernet", .flags = FLAG_ETHER, .bind = cx82310_bind, .unbind = cx82310_unbind, .rx_fixup = cx82310_rx_fixup, .tx_fixup = cx82310_tx_fixup, }; #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_DEV_INFO, \ .idVendor = (vend), \ .idProduct = (prod), \ .bDeviceClass = (cl), \ .bDeviceSubClass = (sc), \ .bDeviceProtocol = (pr) static const struct usb_device_id products[] = { { USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver cx82310_driver = { .name = "cx82310_eth", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init cx82310_init(void) { return usb_register(&cx82310_driver); } module_init(cx82310_init); static void __exit cx82310_exit(void) { usb_deregister(&cx82310_driver); } module_exit(cx82310_exit); MODULE_AUTHOR("Ondrej Zary"); MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver"); MODULE_LICENSE("GPL");
gpl-2.0
Perferom/android_kernel_samsung_msm
drivers/misc/ad525x_dpot.c
3254
20793
/* * ad525x_dpot: Driver for the Analog Devices digital potentiometers * Copyright (c) 2009-2010 Analog Devices, Inc. * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> * * DEVID #Wipers #Positions Resistor Options (kOhm) * AD5258 1 64 1, 10, 50, 100 * AD5259 1 256 5, 10, 50, 100 * AD5251 2 64 1, 10, 50, 100 * AD5252 2 256 1, 10, 50, 100 * AD5255 3 512 25, 250 * AD5253 4 64 1, 10, 50, 100 * AD5254 4 256 1, 10, 50, 100 * AD5160 1 256 5, 10, 50, 100 * AD5161 1 256 5, 10, 50, 100 * AD5162 2 256 2.5, 10, 50, 100 * AD5165 1 256 100 * AD5200 1 256 10, 50 * AD5201 1 33 10, 50 * AD5203 4 64 10, 100 * AD5204 4 256 10, 50, 100 * AD5206 6 256 10, 50, 100 * AD5207 2 256 10, 50, 100 * AD5231 1 1024 10, 50, 100 * AD5232 2 256 10, 50, 100 * AD5233 4 64 10, 50, 100 * AD5235 2 1024 25, 250 * AD5260 1 256 20, 50, 200 * AD5262 2 256 20, 50, 200 * AD5263 4 256 20, 50, 200 * AD5290 1 256 10, 50, 100 * AD5291 1 256 20, 50, 100 (20-TP) * AD5292 1 1024 20, 50, 100 (20-TP) * AD5293 1 1024 20, 50, 100 * AD7376 1 128 10, 50, 100, 1M * AD8400 1 256 1, 10, 50, 100 * AD8402 2 256 1, 10, 50, 100 * AD8403 4 256 1, 10, 50, 100 * ADN2850 3 512 25, 250 * AD5241 1 256 10, 100, 1M * AD5246 1 128 5, 10, 50, 100 * AD5247 1 128 5, 10, 50, 100 * AD5245 1 256 5, 10, 50, 100 * AD5243 2 256 2.5, 10, 50, 100 * AD5248 2 256 2.5, 10, 50, 100 * AD5242 2 256 20, 50, 200 * AD5280 1 256 20, 50, 200 * AD5282 2 256 20, 50, 200 * ADN2860 3 512 25, 250 * AD5273 1 64 1, 10, 50, 100 (OTP) * AD5171 1 64 5, 10, 50, 100 (OTP) * AD5170 1 256 2.5, 10, 50, 100 (OTP) * AD5172 2 256 2.5, 10, 50, 100 (OTP) * AD5173 2 256 2.5, 10, 50, 100 (OTP) * AD5270 1 1024 20, 50, 100 (50-TP) * AD5271 1 256 20, 50, 100 (50-TP) * AD5272 1 1024 20, 50, 100 (50-TP) * AD5274 1 256 20, 50, 100 (50-TP) * * See Documentation/misc-devices/ad525x_dpot.txt for more info. * * derived from ad5258.c * Copyright (c) 2009 Cyber Switching, Inc. * Author: Chris Verges <chrisv@cyberswitching.com> * * derived from ad5252.c * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org> * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #define DRIVER_VERSION "0.2" #include "ad525x_dpot.h" /* * Client data (each client gets its own) */ struct dpot_data { struct ad_dpot_bus_data bdata; struct mutex update_lock; unsigned rdac_mask; unsigned max_pos; unsigned long devid; unsigned uid; unsigned feat; unsigned wipers; u16 rdac_cache[MAX_RDACS]; DECLARE_BITMAP(otp_en_mask, MAX_RDACS); }; static inline int dpot_read_d8(struct dpot_data *dpot) { return dpot->bdata.bops->read_d8(dpot->bdata.client); } static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg) { return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg); } static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg) { return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg); } static inline int dpot_write_d8(struct dpot_data *dpot, u8 val) { return dpot->bdata.bops->write_d8(dpot->bdata.client, val); } static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val) { return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val); } static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val) { return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val); } static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg) { unsigned ctrl = 0; int value; if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { if (dpot->feat & F_RDACS_WONLY) return dpot->rdac_cache[reg & DPOT_RDAC_MASK]; if (dpot->uid == DPOT_UID(AD5291_ID) || dpot->uid == DPOT_UID(AD5292_ID) || dpot->uid == DPOT_UID(AD5293_ID)) { value = dpot_read_r8d8(dpot, DPOT_AD5291_READ_RDAC << 2); if (dpot->uid == DPOT_UID(AD5291_ID)) value = value >> 2; return value; } else if (dpot->uid == DPOT_UID(AD5270_ID) || dpot->uid == DPOT_UID(AD5271_ID)) { value = dpot_read_r8d8(dpot, DPOT_AD5270_1_2_4_READ_RDAC << 2); if (value < 0) return value; if (dpot->uid == DPOT_UID(AD5271_ID)) value = value >> 2; return value; } ctrl = DPOT_SPI_READ_RDAC; } else if (reg & DPOT_ADDR_EEPROM) { ctrl = DPOT_SPI_READ_EEPROM; } if (dpot->feat & F_SPI_16BIT) return dpot_read_r8d8(dpot, ctrl); else if (dpot->feat & F_SPI_24BIT) return dpot_read_r8d16(dpot, ctrl); return -EFAULT; } static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) { int value; unsigned ctrl = 0; switch (dpot->uid) { case DPOT_UID(AD5246_ID): case DPOT_UID(AD5247_ID): return dpot_read_d8(dpot); case DPOT_UID(AD5245_ID): case DPOT_UID(AD5241_ID): case DPOT_UID(AD5242_ID): case DPOT_UID(AD5243_ID): case DPOT_UID(AD5248_ID): case DPOT_UID(AD5280_ID): case DPOT_UID(AD5282_ID): ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? 0 : DPOT_AD5282_RDAC_AB; return dpot_read_r8d8(dpot, ctrl); case DPOT_UID(AD5170_ID): case DPOT_UID(AD5171_ID): case DPOT_UID(AD5273_ID): return dpot_read_d8(dpot); case DPOT_UID(AD5172_ID): case DPOT_UID(AD5173_ID): ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? 0 : DPOT_AD5172_3_A0; return dpot_read_r8d8(dpot, ctrl); case DPOT_UID(AD5272_ID): case DPOT_UID(AD5274_ID): dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0); value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2); if (value < 0) return value; /* * AD5272/AD5274 returns high byte first, however * underling smbus expects low byte first. */ value = swab16(value); if (dpot->uid == DPOT_UID(AD5271_ID)) value = value >> 2; return value; default: if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) return dpot_read_r8d16(dpot, (reg & 0xF8) | ((reg & 0x7) << 1)); else return dpot_read_r8d8(dpot, reg); } } static s32 dpot_read(struct dpot_data *dpot, u8 reg) { if (dpot->feat & F_SPI) return dpot_read_spi(dpot, reg); else return dpot_read_i2c(dpot, reg); } static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value) { unsigned val = 0; if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) { if (dpot->feat & F_RDACS_WONLY) dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value; if (dpot->feat & F_AD_APPDATA) { if (dpot->feat & F_SPI_8BIT) { val = ((reg & DPOT_RDAC_MASK) << DPOT_MAX_POS(dpot->devid)) | value; return dpot_write_d8(dpot, val); } else if (dpot->feat & F_SPI_16BIT) { val = ((reg & DPOT_RDAC_MASK) << DPOT_MAX_POS(dpot->devid)) | value; return dpot_write_r8d8(dpot, val >> 8, val & 0xFF); } else BUG(); } else { if (dpot->uid == DPOT_UID(AD5291_ID) || dpot->uid == DPOT_UID(AD5292_ID) || dpot->uid == DPOT_UID(AD5293_ID)) { dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2, DPOT_AD5291_UNLOCK_CMD); if (dpot->uid == DPOT_UID(AD5291_ID)) value = value << 2; return dpot_write_r8d8(dpot, (DPOT_AD5291_RDAC << 2) | (value >> 8), value & 0xFF); } else if (dpot->uid == DPOT_UID(AD5270_ID) || dpot->uid == DPOT_UID(AD5271_ID)) { dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2, DPOT_AD5270_1_2_4_UNLOCK_CMD); if (dpot->uid == DPOT_UID(AD5271_ID)) value = value << 2; return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) | (value >> 8), value & 0xFF); } val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK); } } else if (reg & DPOT_ADDR_EEPROM) { val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK); } else if (reg & DPOT_ADDR_CMD) { switch (reg) { case DPOT_DEC_ALL_6DB: val = DPOT_SPI_DEC_ALL_6DB; break; case DPOT_INC_ALL_6DB: val = DPOT_SPI_INC_ALL_6DB; break; case DPOT_DEC_ALL: val = DPOT_SPI_DEC_ALL; break; case DPOT_INC_ALL: val = DPOT_SPI_INC_ALL; break; } } else if (reg & DPOT_ADDR_OTP) { if (dpot->uid == DPOT_UID(AD5291_ID) || dpot->uid == DPOT_UID(AD5292_ID)) { return dpot_write_r8d8(dpot, DPOT_AD5291_STORE_XTPM << 2, 0); } else if (dpot->uid == DPOT_UID(AD5270_ID) || dpot->uid == DPOT_UID(AD5271_ID)) { return dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); } } else BUG(); if (dpot->feat & F_SPI_16BIT) return dpot_write_r8d8(dpot, val, value); else if (dpot->feat & F_SPI_24BIT) return dpot_write_r8d16(dpot, val, value); return -EFAULT; } static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value) { /* Only write the instruction byte for certain commands */ unsigned tmp = 0, ctrl = 0; switch (dpot->uid) { case DPOT_UID(AD5246_ID): case DPOT_UID(AD5247_ID): return dpot_write_d8(dpot, value); break; case DPOT_UID(AD5245_ID): case DPOT_UID(AD5241_ID): case DPOT_UID(AD5242_ID): case DPOT_UID(AD5243_ID): case DPOT_UID(AD5248_ID): case DPOT_UID(AD5280_ID): case DPOT_UID(AD5282_ID): ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? 0 : DPOT_AD5282_RDAC_AB; return dpot_write_r8d8(dpot, ctrl, value); break; case DPOT_UID(AD5171_ID): case DPOT_UID(AD5273_ID): if (reg & DPOT_ADDR_OTP) { tmp = dpot_read_d8(dpot); if (tmp >> 6) /* Ready to Program? */ return -EFAULT; ctrl = DPOT_AD5273_FUSE; } return dpot_write_r8d8(dpot, ctrl, value); break; case DPOT_UID(AD5172_ID): case DPOT_UID(AD5173_ID): ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? 0 : DPOT_AD5172_3_A0; if (reg & DPOT_ADDR_OTP) { tmp = dpot_read_r8d16(dpot, ctrl); if (tmp >> 14) /* Ready to Program? */ return -EFAULT; ctrl |= DPOT_AD5170_2_3_FUSE; } return dpot_write_r8d8(dpot, ctrl, value); break; case DPOT_UID(AD5170_ID): if (reg & DPOT_ADDR_OTP) { tmp = dpot_read_r8d16(dpot, tmp); if (tmp >> 14) /* Ready to Program? */ return -EFAULT; ctrl = DPOT_AD5170_2_3_FUSE; } return dpot_write_r8d8(dpot, ctrl, value); break; case DPOT_UID(AD5272_ID): case DPOT_UID(AD5274_ID): dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2, DPOT_AD5270_1_2_4_UNLOCK_CMD); if (reg & DPOT_ADDR_OTP) return dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); if (dpot->uid == DPOT_UID(AD5274_ID)) value = value << 2; return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) | (value >> 8), value & 0xFF); break; default: if (reg & DPOT_ADDR_CMD) return dpot_write_d8(dpot, reg); if (dpot->max_pos > 256) return dpot_write_r8d16(dpot, (reg & 0xF8) | ((reg & 0x7) << 1), value); else /* All other registers require instruction + data bytes */ return dpot_write_r8d8(dpot, reg, value); } } static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value) { if (dpot->feat & F_SPI) return dpot_write_spi(dpot, reg, value); else return dpot_write_i2c(dpot, reg, value); } /* sysfs functions */ static ssize_t sysfs_show_reg(struct device *dev, struct device_attribute *attr, char *buf, u32 reg) { struct dpot_data *data = dev_get_drvdata(dev); s32 value; if (reg & DPOT_ADDR_OTP_EN) return sprintf(buf, "%s\n", test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ? "enabled" : "disabled"); mutex_lock(&data->update_lock); value = dpot_read(data, reg); mutex_unlock(&data->update_lock); if (value < 0) return -EINVAL; /* * Let someone else deal with converting this ... * the tolerance is a two-byte value where the MSB * is a sign + integer value, and the LSB is a * decimal value. See page 18 of the AD5258 * datasheet (Rev. A) for more details. */ if (reg & DPOT_REG_TOL) return sprintf(buf, "0x%04x\n", value & 0xFFFF); else return sprintf(buf, "%u\n", value & data->rdac_mask); } static ssize_t sysfs_set_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, u32 reg) { struct dpot_data *data = dev_get_drvdata(dev); unsigned long value; int err; if (reg & DPOT_ADDR_OTP_EN) { if (!strncmp(buf, "enabled", sizeof("enabled"))) set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); else clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); return count; } if ((reg & DPOT_ADDR_OTP) && !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)) return -EPERM; err = strict_strtoul(buf, 10, &value); if (err) return err; if (value > data->rdac_mask) value = data->rdac_mask; mutex_lock(&data->update_lock); dpot_write(data, reg, value); if (reg & DPOT_ADDR_EEPROM) msleep(26); /* Sleep while the EEPROM updates */ else if (reg & DPOT_ADDR_OTP) msleep(400); /* Sleep while the OTP updates */ mutex_unlock(&data->update_lock); return count; } static ssize_t sysfs_do_cmd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, u32 reg) { struct dpot_data *data = dev_get_drvdata(dev); mutex_lock(&data->update_lock); dpot_write(data, reg, 0); mutex_unlock(&data->update_lock); return count; } /* ------------------------------------------------------------------------- */ #define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \ show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return sysfs_show_reg(dev, attr, buf, _reg); \ } #define DPOT_DEVICE_SET(_name, _reg) static ssize_t \ set_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ return sysfs_set_reg(dev, attr, buf, count, _reg); \ } #define DPOT_DEVICE_SHOW_SET(name, reg) \ DPOT_DEVICE_SHOW(name, reg) \ DPOT_DEVICE_SET(name, reg) \ static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name); #define DPOT_DEVICE_SHOW_ONLY(name, reg) \ DPOT_DEVICE_SHOW(name, reg) \ static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL); DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0); DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0); DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0); DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0); DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0); DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1); DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1); DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1); DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1); DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1); DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2); DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2); DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2); DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2); DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2); DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3); DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3); DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3); DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3); DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3); DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4); DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4); DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4); DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4); DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4); DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5); DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5); DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5); DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5); DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5); static const struct attribute *dpot_attrib_wipers[] = { &dev_attr_rdac0.attr, &dev_attr_rdac1.attr, &dev_attr_rdac2.attr, &dev_attr_rdac3.attr, &dev_attr_rdac4.attr, &dev_attr_rdac5.attr, NULL }; static const struct attribute *dpot_attrib_eeprom[] = { &dev_attr_eeprom0.attr, &dev_attr_eeprom1.attr, &dev_attr_eeprom2.attr, &dev_attr_eeprom3.attr, &dev_attr_eeprom4.attr, &dev_attr_eeprom5.attr, NULL }; static const struct attribute *dpot_attrib_otp[] = { &dev_attr_otp0.attr, &dev_attr_otp1.attr, &dev_attr_otp2.attr, &dev_attr_otp3.attr, &dev_attr_otp4.attr, &dev_attr_otp5.attr, NULL }; static const struct attribute *dpot_attrib_otp_en[] = { &dev_attr_otp0en.attr, &dev_attr_otp1en.attr, &dev_attr_otp2en.attr, &dev_attr_otp3en.attr, &dev_attr_otp4en.attr, &dev_attr_otp5en.attr, NULL }; static const struct attribute *dpot_attrib_tolerance[] = { &dev_attr_tolerance0.attr, &dev_attr_tolerance1.attr, &dev_attr_tolerance2.attr, &dev_attr_tolerance3.attr, &dev_attr_tolerance4.attr, &dev_attr_tolerance5.attr, NULL }; /* ------------------------------------------------------------------------- */ #define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \ set_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ return sysfs_do_cmd(dev, attr, buf, count, _cmd); \ } \ static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name); DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL); DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL); DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB); DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB); static struct attribute *ad525x_attributes_commands[] = { &dev_attr_inc_all.attr, &dev_attr_dec_all.attr, &dev_attr_inc_all_6db.attr, &dev_attr_dec_all_6db.attr, NULL }; static const struct attribute_group ad525x_group_commands = { .attrs = ad525x_attributes_commands, }; __devinit int ad_dpot_add_files(struct device *dev, unsigned features, unsigned rdac) { int err = sysfs_create_file(&dev->kobj, dpot_attrib_wipers[rdac]); if (features & F_CMD_EEP) err |= sysfs_create_file(&dev->kobj, dpot_attrib_eeprom[rdac]); if (features & F_CMD_TOL) err |= sysfs_create_file(&dev->kobj, dpot_attrib_tolerance[rdac]); if (features & F_CMD_OTP) { err |= sysfs_create_file(&dev->kobj, dpot_attrib_otp_en[rdac]); err |= sysfs_create_file(&dev->kobj, dpot_attrib_otp[rdac]); } if (err) dev_err(dev, "failed to register sysfs hooks for RDAC%d\n", rdac); return err; } inline void ad_dpot_remove_files(struct device *dev, unsigned features, unsigned rdac) { sysfs_remove_file(&dev->kobj, dpot_attrib_wipers[rdac]); if (features & F_CMD_EEP) sysfs_remove_file(&dev->kobj, dpot_attrib_eeprom[rdac]); if (features & F_CMD_TOL) sysfs_remove_file(&dev->kobj, dpot_attrib_tolerance[rdac]); if (features & F_CMD_OTP) { sysfs_remove_file(&dev->kobj, dpot_attrib_otp_en[rdac]); sysfs_remove_file(&dev->kobj, dpot_attrib_otp[rdac]); } } __devinit int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id) { struct dpot_data *data; int i, err = 0; data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } dev_set_drvdata(dev, data); mutex_init(&data->update_lock); data->bdata = *bdata; data->devid = id->devid; data->max_pos = 1 << DPOT_MAX_POS(data->devid); data->rdac_mask = data->max_pos - 1; data->feat = DPOT_FEAT(data->devid); data->uid = DPOT_UID(data->devid); data->wipers = DPOT_WIPERS(data->devid); for (i = DPOT_RDAC0; i < MAX_RDACS; i++) if (data->wipers & (1 << i)) { err = ad_dpot_add_files(dev, data->feat, i); if (err) goto exit_remove_files; /* power-up midscale */ if (data->feat & F_RDACS_WONLY) data->rdac_cache[i] = data->max_pos / 2; } if (data->feat & F_CMD_INC) err = sysfs_create_group(&dev->kobj, &ad525x_group_commands); if (err) { dev_err(dev, "failed to register sysfs hooks\n"); goto exit_free; } dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", id->name, data->max_pos); return 0; exit_remove_files: for (i = DPOT_RDAC0; i < MAX_RDACS; i++) if (data->wipers & (1 << i)) ad_dpot_remove_files(dev, data->feat, i); exit_free: kfree(data); dev_set_drvdata(dev, NULL); exit: dev_err(dev, "failed to create client for %s ID 0x%lX\n", id->name, id->devid); return err; } EXPORT_SYMBOL(ad_dpot_probe); __devexit int ad_dpot_remove(struct device *dev) { struct dpot_data *data = dev_get_drvdata(dev); int i; for (i = DPOT_RDAC0; i < MAX_RDACS; i++) if (data->wipers & (1 << i)) ad_dpot_remove_files(dev, data->feat, i); kfree(data); return 0; } EXPORT_SYMBOL(ad_dpot_remove); MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " "Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Digital potentiometer driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
gpl-2.0
MoKee/android_kernel_lge_hammerheadcaf
drivers/net/ethernet/3com/3c59x.c
3766
104937
/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ /* Written 1996-1999 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 and the EtherLink XL 3c900 and 3c905 cards. Problem reports and questions should be directed to vortex@scyld.com The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ /* * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation * as well as other drivers * * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k * due to dead code elimination. There will be some performance benefits from this due to * elimination of all the tests and reduced cache footprint. */ #define DRV_NAME "3c59x" /* A few values that may be tweaked. */ /* Keep the ring sizes a power of two for efficiency. */ #define TX_RING_SIZE 16 #define RX_RING_SIZE 32 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* "Knobs" that adjust features and parameters. */ /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1512 effectively disables this feature. */ #ifndef __arm__ static int rx_copybreak = 200; #else /* ARM systems perform better by disregarding the bus-master transfer capability of these cards. -- rmk */ static int rx_copybreak = 1513; #endif /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ static const int mtu = 1500; /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 32; /* Tx timeout interval (millisecs) */ static int watchdog = 5000; /* Allow aggregation of Tx interrupts. Saves CPU load at the cost * of possible Tx stalls if the system is blocking interrupts * somewhere else. Undefine this to disable. */ #define tx_interrupt_mitigation 1 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ #define vortex_debug debug #ifdef VORTEX_DEBUG static int vortex_debug = VORTEX_DEBUG; #else static int vortex_debug = 1; #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/highmem.h> #include <linux/eisa.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/gfp.h> #include <asm/irq.h> /* For nr_irqs only. */ #include <asm/io.h> #include <asm/uaccess.h> /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. This is only in the support-all-kernels source code. */ #define RUN_AT(x) (jiffies + (x)) #include <linux/delay.h> static const char version[] __devinitconst = DRV_NAME ": Donald Becker and others.\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); MODULE_LICENSE("GPL"); /* Operational parameter that usually are not changed. */ /* The Vortex size is twice that of the original EtherLinkIII series: the runtime register window, window 1, is now always mapped in. The Boomerang size is twice as large as the Vortex -- it has additional bus master control registers. */ #define VORTEX_TOTAL_SIZE 0x20 #define BOOMERANG_TOTAL_SIZE 0x40 /* Set iff a MII transceiver on any interface requires mdio preamble. This only set with the original DP83840 on older 3c905 boards, so the extra code size of a per-interface flag is not worthwhile. */ static char mii_preamble_required; #define PFX DRV_NAME ": " /* Theory of Operation I. Board Compatibility This device driver is designed for the 3Com FastEtherLink and FastEtherLink XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs versions of the FastEtherLink cards. The supported product IDs are 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 The related ISA 3c515 is supported with a separate driver, 3c515.c, included with the kernel source or available from cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html II. Board-specific settings PCI bus devices are configured by the system at boot time, so no jumpers need to be set on the board. The system BIOS should be set to assign the PCI INTA signal to an otherwise unused system IRQ line. The EEPROM settings for media type and forced-full-duplex are observed. The EEPROM media type should be left at the default "autoselect" unless using 10base2 or AUI connections which cannot be reliably detected. III. Driver operation The 3c59x series use an interface that's very similar to the previous 3c5x9 series. The primary interface is two programmed-I/O FIFOs, with an alternate single-contiguous-region bus-master transfer (see next). The 3c900 "Boomerang" series uses a full-bus-master interface with separate lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, DEC Tulip and Intel Speedo3. The first chip version retains a compatible programmed-I/O interface that has been removed in 'B' and subsequent board revisions. One extension that is advertised in a very large font is that the adapters are capable of being bus masters. On the Vortex chip this capability was only for a single contiguous region making it far less useful than the full bus master capability. There is a significant performance impact of taking an extra interrupt or polling for the completion of each transfer, as well as difficulty sharing the single transfer engine between the transmit and receive threads. Using DMA transfers is a win only with large blocks or with the flawed versions of the Intel Orion motherboard PCI controller. The Boomerang chip's full-bus-master interface is useful, and has the currently-unused advantages over other similar chips that queued transmit packets may be reordered and receive buffer groups are associated with a single frame. With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. Rather than a fixed intermediate receive buffer, this scheme allocates full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is chosen to trade-off the memory wasted by passing the full-sized skbuff to the queue layer for all frames vs. the copying cost of copying a frame to a correctly-sized skbuff. IIIC. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and other software. IV. Notes Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development 3c590, 3c595, and 3c900 boards. The name "Vortex" is the internal 3Com project name for the PCI ASIC, and the EISA version is called "Demon". According to Terry these names come from rides at the local amusement park. The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! This driver only supports ethernet packets because of the skbuff allocation limit of 4K. */ /* This table drives the PCI probe routines. It's mostly boilerplate in all of the drivers, and will likely be provided by some future kernel. */ enum pci_flags_bit { PCI_USES_MASTER=4, }; enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; enum vortex_chips { CH_3C590 = 0, CH_3C592, CH_3C597, CH_3C595_1, CH_3C595_2, CH_3C595_3, CH_3C900_1, CH_3C900_2, CH_3C900_3, CH_3C900_4, CH_3C900_5, CH_3C900B_FL, CH_3C905_1, CH_3C905_2, CH_3C905B_TX, CH_3C905B_1, CH_3C905B_2, CH_3C905B_FX, CH_3C905C, CH_3C9202, CH_3C980, CH_3C9805, CH_3CSOHO100_TX, CH_3C555, CH_3C556, CH_3C556B, CH_3C575, CH_3C575_1, CH_3CCFE575, CH_3CCFE575CT, CH_3CCFE656, CH_3CCFEM656, CH_3CCFEM656_1, CH_3C450, CH_3C920, CH_3C982A, CH_3C982B, CH_905BT4, CH_920B_EMB_WNM, }; /* note: this array directly indexed by above enums, and MUST * be kept in sync with both the enums above, and the PCI device * table below */ static struct vortex_chip_info { const char *name; int flags; int drv_flags; int io_size; } vortex_info_tbl[] __devinitdata = { {"3c590 Vortex 10Mbps", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseTx", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseT4", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100base-MII", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c900 Boomerang 10baseT", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Boomerang 10Mbps Combo", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps Combo", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900B-FL Cyclone 10base-FL", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905 Boomerang 100baseTx", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3c905 Boomerang 100baseT4", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3C905B-TX Fast Etherlink XL PCI", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 100baseTx", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 10/100/BNC", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c905B-FX Cyclone 100baseFx", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905C Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3cSOHO100-TX Hurricane", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c555 Laptop Hurricane", PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, {"3c556 Laptop Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| HAS_HWCKSM, 128, }, {"3c556B Laptop Hurricane", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| WNO_XCVR_PWR|HAS_HWCKSM, 128, }, {"3c575 [Megahertz] 10/100 LAN CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3c575 Boomerang CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3CCFE575BT Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFE575CT Tornado CardBus", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3CCFE656 Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFEM656B Cyclone+Winmodem CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c920 Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c982 Hydra Dual Port A", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c982 Hydra Dual Port B", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c905B-T4", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {NULL,}, /* NULL terminated list. */ }; static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); /* Operational definitions. These are not used by other compilation units and thus are not exported in a ".h" file. First the windows. There are eight register windows, with the command and status registers available in each. */ #define EL3_CMD 0x0e #define EL3_STATUS 0x0e /* The top five bits written to EL3_CMD are a command, the lower 11 bits are the parameter, if applicable. Note that 11 parameters bits was fine for ethernet, but the new chip can handle FDDI length frames (~4500 octets) and now parameters count 32-bit 'Dwords' rather than octets. */ enum vortex_cmd { TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, UpStall = 6<<11, UpUnstall = (6<<11)+1, DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, SetTxThreshold = 18<<11, SetTxStart = 19<<11, StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; /* The SetRxFilter command accepts the following classes: */ enum RxFilter { RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; /* Bits in the general status register. */ enum vortex_status { IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, IntReq = 0x0040, StatsFull = 0x0080, DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, DMAInProgress = 1<<11, /* DMA controller is still busy.*/ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ }; /* Register window 1 offsets, the window used in normal operation. On the Vortex this window is always mapped at offsets 0x10-0x1f. */ enum Window1 { TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ }; enum Window0 { Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ Wn0EepromData = 12, /* Window 0: EEPROM results register. */ IntrStatus=0x0E, /* Valid in all windows. */ }; enum Win0_EEPROM_bits { EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ }; /* EEPROM locations. */ enum eeprom_offset { PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, DriverTune=13, Checksum=15}; enum Window2 { /* Window 2. */ Wn2_ResetOptions=12, }; enum Window3 { /* Window 3: MAC/config bits. */ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, }; #define BFEXT(value, offset, bitcount) \ ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) #define BFINS(lhs, rhs, offset, bitcount) \ (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) #define RAM_SIZE(v) BFEXT(v, 0, 3) #define RAM_WIDTH(v) BFEXT(v, 3, 1) #define RAM_SPEED(v) BFEXT(v, 4, 2) #define ROM_SIZE(v) BFEXT(v, 6, 2) #define RAM_SPLIT(v) BFEXT(v, 16, 2) #define XCVR(v) BFEXT(v, 20, 4) #define AUTOSELECT(v) BFEXT(v, 24, 1) enum Window4 { /* Window 4: Xcvr/media bits. */ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, }; enum Win4_Media_bits { Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ Media_LnkBeat = 0x0800, }; enum Window7 { /* Window 7: Bus Master control. */ Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, }; /* Boomerang bus master control registers. */ enum MasterCtrl { PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, }; /* The Rx and Tx descriptor lists. Caution Alpha hackers: these types are 32 bits! Note also the 8 byte alignment contraint on tx_ring[] and rx_ring[]. */ #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ struct boom_rx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; __le32 addr; /* Up to 63 addr/len pairs possible. */ __le32 length; /* Set LAST_FRAG to indicate last pair. */ }; /* Values for the Rx status entry. */ enum rx_desc_status { RxDComplete=0x00008000, RxDError=0x4000, /* See boomerang_rx() for actual error bits */ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, }; #ifdef MAX_SKB_FRAGS #define DO_ZEROCOPY 1 #else #define DO_ZEROCOPY 0 #endif struct boom_tx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; /* bits 0:12 length, others see below. */ #if DO_ZEROCOPY struct { __le32 addr; __le32 length; } frag[1+MAX_SKB_FRAGS]; #else __le32 addr; __le32 length; #endif }; /* Values for the Tx status entry. */ enum tx_desc_status { CRCDisable=0x2000, TxDComplete=0x8000, AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ }; /* Chip features we care about in vp->capabilities, read from the EEPROM. */ enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; struct vortex_extra_stats { unsigned long tx_deferred; unsigned long tx_max_collisions; unsigned long tx_multiple_collisions; unsigned long tx_single_collisions; unsigned long rx_bad_ssd; }; struct vortex_private { /* The Rx and Tx rings should be quad-word-aligned. */ struct boom_rx_desc* rx_ring; struct boom_tx_desc* tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; /* The addresses of transmit- and receive-in-place skbuffs. */ struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ /* PCI configuration space information. */ struct device *gendev; void __iomem *ioaddr; /* IO address space */ void __iomem *cb_fn_base; /* CardBus function status addr space. */ /* Some values here only for performance evaluation and path-coverage */ int rx_nocopy, rx_copy, queued_packet, rx_csumhits; int card_idx; /* The remainder are related to chip state, mostly media selection. */ struct timer_list timer; /* Media selection timer. */ struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ int options; /* User-settable misc. driver options. */ unsigned int media_override:4, /* Passed-in media type. */ default_media:4, /* Read from the EEPROM/Wn3_Config. */ full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ partner_flow_ctrl:1, /* Partner supports flow control */ has_nway:1, enable_wol:1, /* Wake-on-LAN is enabled */ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. * no additional locking is required for the enable_wol and acpi_set_WOL() */ int drv_flags; u16 status_enable; u16 intr_enable; u16 available_media; /* From Wn3_Options. */ u16 capabilities, info1, info2; /* Various, from EEPROM. */ u16 advertising; /* NWay media advertisement */ unsigned char phys[2]; /* MII device addresses. */ u16 deferred; /* Resend these interrupts when we * bale from the ISR */ u16 io_size; /* Size of PCI region (for release_region) */ /* Serialises access to hardware other than MII and variables below. * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ spinlock_t lock; spinlock_t mii_lock; /* Serialises access to MII */ struct mii_if_info mii; /* MII lib hooks/info */ spinlock_t window_lock; /* Serialises access to windowed regs */ int window; /* Register window */ }; static void window_set(struct vortex_private *vp, int window) { if (window != vp->window) { iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); vp->window = window; } } #define DEFINE_WINDOW_IO(size) \ static u ## size \ window_read ## size(struct vortex_private *vp, int window, int addr) \ { \ unsigned long flags; \ u ## size ret; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ ret = ioread ## size(vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ return ret; \ } \ static void \ window_write ## size(struct vortex_private *vp, u ## size value, \ int window, int addr) \ { \ unsigned long flags; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ iowrite ## size(value, vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ } DEFINE_WINDOW_IO(8) DEFINE_WINDOW_IO(16) DEFINE_WINDOW_IO(32) #ifdef CONFIG_PCI #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) #else #define DEVICE_PCI(dev) NULL #endif #define VORTEX_PCI(vp) \ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) #ifdef CONFIG_EISA #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) #else #define DEVICE_EISA(dev) NULL #endif #define VORTEX_EISA(vp) \ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) /* The action to take with a media selection timer tick. Note that we deviate from the 3Com order by checking 10base2 before AUI. */ enum xcvr_types { XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, }; static const struct media_table { char *name; unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ mask:8, /* The transceiver-present bit in Wn3_Config.*/ next:8; /* The media type to try next. */ int wait; /* Time before we check media status. */ } media_tbl[] = { { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, { "undefined", 0, 0x80, XCVR_10baseT, 10000}, { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, { "undefined", 0, 0x01, XCVR_10baseT, 10000}, { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, { "Default", 0, 0xFF, XCVR_10baseT, 10000}, }; static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "tx_deferred" }, { "tx_max_collisions" }, { "tx_multiple_collisions" }, { "tx_single_collisions" }, { "rx_bad_ssd" }, }; /* number of ETHTOOL_GSTATS u64's */ #define VORTEX_NUM_STATS 5 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx); static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); static void vortex_timer(unsigned long arg); static void rx_oom_timer(unsigned long arg); static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); static irqreturn_t vortex_interrupt(int irq, void *dev_id); static irqreturn_t boomerang_interrupt(int irq, void *dev_id); static int vortex_close(struct net_device *dev); static void dump_tx_ring(struct net_device *dev); static void update_stats(void __iomem *ioaddr, struct net_device *dev); static struct net_device_stats *vortex_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); #ifdef CONFIG_PCI static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); #endif static void vortex_tx_timeout(struct net_device *dev); static void acpi_set_WOL(struct net_device *dev); static const struct ethtool_ops vortex_ethtool_ops; static void set_8021q_mode(struct net_device *dev, int enable); /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ /* Option count limit only -- unlimited interfaces are supported. */ #define MAX_UNITS 8 static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int global_options = -1; static int global_full_duplex = -1; static int global_enable_wol = -1; static int global_use_mmio = -1; /* Variables to work-around the Compaq PCI BIOS32 problem. */ static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; static struct net_device *compaq_net_device; static int vortex_cards_found; module_param(debug, int, 0); module_param(global_options, int, 0); module_param_array(options, int, NULL, 0); module_param(global_full_duplex, int, 0); module_param_array(full_duplex, int, NULL, 0); module_param_array(hw_checksums, int, NULL, 0); module_param_array(flow_ctrl, int, NULL, 0); module_param(global_enable_wol, int, 0); module_param_array(enable_wol, int, NULL, 0); module_param(rx_copybreak, int, 0); module_param(max_interrupt_work, int, 0); module_param(compaq_ioaddr, int, 0); module_param(compaq_irq, int, 0); module_param(compaq_device_id, int, 0); module_param(watchdog, int, 0); module_param(global_use_mmio, int, 0); module_param_array(use_mmio, int, NULL, 0); MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); local_irq_restore(flags); } #endif #ifdef CONFIG_PM static int vortex_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); if (!ndev || !netif_running(ndev)) return 0; netif_device_detach(ndev); vortex_down(ndev, 1); return 0; } static int vortex_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); int err; if (!ndev || !netif_running(ndev)) return 0; err = vortex_up(ndev); if (err) return err; netif_device_attach(ndev); return 0; } static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, .thaw = vortex_resume, .poweroff = vortex_suspend, .restore = vortex_resume, }; #define VORTEX_PM_OPS (&vortex_pm_ops) #else /* !CONFIG_PM */ #define VORTEX_PM_OPS NULL #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA static struct eisa_device_id vortex_eisa_ids[] = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); static int __init vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; edev = to_eisa_device(device); if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) return -EBUSY; ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, edev->id.driver_data, vortex_cards_found)) { release_region(edev->base_addr, VORTEX_TOTAL_SIZE); return -ENODEV; } vortex_cards_found++; return 0; } static int __devexit vortex_eisa_remove(struct device *device) { struct eisa_device *edev; struct net_device *dev; struct vortex_private *vp; void __iomem *ioaddr; edev = to_eisa_device(device); dev = eisa_get_drvdata(edev); if (!dev) { pr_err("vortex_eisa_remove called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); ioaddr = vp->ioaddr; unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); release_region(dev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; } static struct eisa_driver vortex_eisa_driver = { .id_table = vortex_eisa_ids, .driver = { .name = "3c59x", .probe = vortex_eisa_probe, .remove = __devexit_p(vortex_eisa_remove) } }; #endif /* CONFIG_EISA */ /* returns count found (>= 0), or negative on error */ static int __init vortex_eisa_init(void) { int eisa_found = 0; int orig_cards_found = vortex_cards_found; #ifdef CONFIG_EISA int err; err = eisa_driver_register (&vortex_eisa_driver); if (!err) { /* * Because of the way EISA bus is probed, we cannot assume * any device have been found when we exit from * eisa_driver_register (the bus root driver may not be * initialized yet). So we blindly assume something was * found, and let the sysfs magic happened... */ eisa_found = 1; } #endif /* Special code to work-around the Compaq PCI BIOS32 problem. */ if (compaq_ioaddr) { vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), compaq_irq, compaq_device_id, vortex_cards_found++); } return vortex_cards_found - orig_cards_found + eisa_found; } /* returns count (>= 0), or negative on error */ static int __devinit vortex_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc, unit, pci_bar; struct vortex_chip_info *vci; void __iomem *ioaddr; /* wake up and enable device */ rc = pci_enable_device(pdev); if (rc < 0) goto out; unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { /* Determine the default if the user didn't override us */ vci = &vortex_info_tbl[ent->driver_data]; pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) pci_bar = use_mmio[unit] ? 1 : 0; else pci_bar = global_use_mmio ? 1 : 0; ioaddr = pci_iomap(pdev, pci_bar, 0); if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { pci_disable_device(pdev); rc = -ENOMEM; goto out; } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); pci_disable_device(pdev); goto out; } vortex_cards_found++; out: return rc; } static const struct net_device_ops boomrang_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = boomerang_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; static const struct net_device_ops vortex_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = vortex_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; /* * Start up the PCI/EISA device which is described by *gendev. * Return 0 on success. * * NOTE: pdev can be NULL, for the case of a Compaq device */ static int __devinit vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx) { struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ int i, step; struct net_device *dev; static int printed_version; int retval, print_info; struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; const char *print_name = "3c59x"; struct pci_dev *pdev = NULL; struct eisa_device *edev = NULL; if (!printed_version) { pr_info("%s", version); printed_version = 1; } if (gendev) { if ((pdev = DEVICE_PCI(gendev))) { print_name = pci_name(pdev); } if ((edev = DEVICE_EISA(gendev))) { print_name = dev_name(&edev->dev); } } dev = alloc_etherdev(sizeof(*vp)); retval = -ENOMEM; if (!dev) goto out; SET_NETDEV_DEV(dev, gendev); vp = netdev_priv(dev); option = global_options; /* The lower four bits are the media type. */ if (dev->mem_start) { /* * The 'options' param is passed in as the third arg to the * LILO 'ether=' argument for non-modular use */ option = dev->mem_start; } else if (card_idx < MAX_UNITS) { if (options[card_idx] >= 0) option = options[card_idx]; } if (option > 0) { if (option & 0x8000) vortex_debug = 7; if (option & 0x4000) vortex_debug = 2; if (option & 0x0400) vp->enable_wol = 1; } print_info = (vortex_debug > 1); if (print_info) pr_info("See Documentation/networking/vortex.txt\n"); pr_info("%s: 3Com %s %s at %p.\n", print_name, pdev ? "PCI" : "EISA", vci->name, ioaddr); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; dev->mtu = mtu; vp->ioaddr = ioaddr; vp->large_frames = mtu > 1500; vp->drv_flags = vci->drv_flags; vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; vp->io_size = vci->io_size; vp->card_idx = card_idx; vp->window = -1; /* module list only for Compaq device */ if (gendev == NULL) { compaq_net_device = dev; } /* PCI-only startup logic */ if (pdev) { /* EISA resources already marked, so only PCI needs to do this here */ /* Ignore return value, because Cardbus drivers already allocate for us */ if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) vp->must_free_region = 1; /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); if (vci->drv_flags & IS_VORTEX) { u8 pci_latency; u8 new_latency = 248; /* Check the PCI latency value. On the 3c590 series the latency timer must be set to the maximum value to avoid data corruption that occurs when the timer expires during a transfer. This bug exists the Vortex chip only. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < new_latency) { pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", print_name, pci_latency, new_latency); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); } } } spin_lock_init(&vp->lock); spin_lock_init(&vp->mii_lock); spin_lock_init(&vp->window_lock); vp->gendev = gendev; vp->mii.dev = dev; vp->mii.mdio_read = mdio_read; vp->mii.mdio_write = mdio_write; vp->mii.phy_id_mask = 0x1f; vp->mii.reg_num_mask = 0x1f; /* Makes sure rings are at least 16 byte aligned. */ vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) goto free_region; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; /* if we are a PCI driver, we store info in pdev->driver_data * instead of a module list */ if (pdev) pci_set_drvdata(pdev, dev); if (edev) eisa_set_drvdata(edev, dev); vp->media_override = 7; if (option >= 0) { vp->media_override = ((option & 7) == 2) ? 0 : option & 15; if (vp->media_override != 7) vp->medialock = 1; vp->full_duplex = (option & 0x200) ? 1 : 0; vp->bus_master = (option & 16) ? 1 : 0; } if (global_full_duplex > 0) vp->full_duplex = 1; if (global_enable_wol > 0) vp->enable_wol = 1; if (card_idx < MAX_UNITS) { if (full_duplex[card_idx] > 0) vp->full_duplex = 1; if (flow_ctrl[card_idx] > 0) vp->flow_ctrl = 1; if (enable_wol[card_idx] > 0) vp->enable_wol = 1; } vp->mii.force_media = vp->full_duplex; vp->options = option; /* Read the station address from the EEPROM. */ { int base; if (vci->drv_flags & EEPROM_8BIT) base = 0x230; else if (vci->drv_flags & EEPROM_OFFSET) base = EEPROM_Read + 0x30; else base = EEPROM_Read; for (i = 0; i < 0x40; i++) { int timer; window_write16(vp, base + i, 0, Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ for (timer = 10; timer >= 0; timer--) { udelay(162); if ((window_read16(vp, 0, Wn0EepromCmd) & 0x8000) == 0) break; } eeprom[i] = window_read16(vp, 0, Wn0EepromData); } } for (i = 0; i < 0x18; i++) checksum ^= eeprom[i]; checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ while (i < 0x21) checksum ^= eeprom[i++]; checksum = (checksum ^ (checksum >> 8)) & 0xff; } if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this gets found in the wild in failure cases. Crypto is hard 8) */ if (!is_valid_ether_addr(dev->dev_addr)) { retval = -EINVAL; pr_err("*** EEPROM MAC address is invalid.\n"); goto free_ring; /* With every pack */ } for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); if (print_info) pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (dev->irq <= 0 || dev->irq >= nr_irqs) pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", dev->irq); step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); } if (pdev && vci->drv_flags & HAS_CB_FNS) { unsigned short n; vp->cb_fn_base = pci_iomap(pdev, 2, 0); if (!vp->cb_fn_base) { retval = -ENOMEM; goto free_ring; } if (print_info) { pr_info("%s: CardBus functions mapped %16.16llx->%p\n", print_name, (unsigned long long)pci_resource_start(pdev, 2), vp->cb_fn_base); } n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); if (vp->drv_flags & WNO_XCVR_PWR) { window_write16(vp, 0x0800, 0, 0); } } /* Extract our information from the EEPROM data. */ vp->info1 = eeprom[13]; vp->info2 = eeprom[15]; vp->capabilities = eeprom[16]; if (vp->info1 & 0x8000) { vp->full_duplex = 1; if (print_info) pr_info("Full duplex capable\n"); } { static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; unsigned int config; vp->available_media = window_read16(vp, 3, Wn3_Options); if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ vp->available_media = 0x40; config = window_read32(vp, 3, Wn3_Config); if (print_info) { pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", config, window_read16(vp, 3, Wn3_Options)); pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 8 << RAM_SIZE(config), RAM_WIDTH(config) ? "word" : "byte", ram_split[RAM_SPLIT(config)], AUTOSELECT(config) ? "autoselect/" : "", XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : media_tbl[XCVR(config)].name); } vp->default_media = XCVR(config); if (vp->default_media == XCVR_NWAY) vp->has_nway = 1; vp->autoselect = AUTOSELECT(config); } if (vp->media_override != 7) { pr_info("%s: Media override to transceiver type %d (%s).\n", print_name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else dev->if_port = vp->default_media; if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { int phy, phy_idx = 0; mii_preamble_required++; if (vp->drv_flags & EXTRA_PREAMBLE) mii_preamble_required++; mdio_sync(vp, 32); mdio_read(dev, 24, MII_BMSR); for (phy = 0; phy < 32 && phy_idx < 1; phy++) { int mii_status, phyx; /* * For the 3c905CX we look at index 24 first, because it bogusly * reports an external PHY at all indices */ if (phy == 0) phyx = 24; else if (phy <= 24) phyx = phy - 1; else phyx = phy; mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status && mii_status != 0xffff) { vp->phys[phy_idx++] = phyx; if (print_info) { pr_info(" MII transceiver found at address %d, status %4x.\n", phyx, mii_status); } if ((mii_status & 0x0040) == 0) mii_preamble_required++; } } mii_preamble_required--; if (phy_idx == 0) { pr_warning(" ***WARNING*** No MII transceivers found!\n"); vp->phys[0] = 24; } else { vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); if (vp->full_duplex) { /* Only advertise the FD media types. */ vp->advertising &= ~0x02A0; mdio_write(dev, vp->phys[0], 4, vp->advertising); } } vp->mii.phy_id = vp->phys[0]; } if (vp->capabilities & CapBusMaster) { vp->full_bus_master_tx = 1; if (print_info) { pr_info(" Enabling bus-master transmits and %s receives.\n", (vp->info2 & 1) ? "early" : "whole-frame" ); } vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; vp->bus_master = 0; /* AKPM: vortex only */ } /* The 3c59x-specific entries in the device structure. */ if (vp->full_bus_master_tx) { dev->netdev_ops = &boomrang_netdev_ops; /* Actually, it still should work with iommu. */ if (card_idx < MAX_UNITS && ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || hw_checksums[card_idx] == 1)) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; } } else dev->netdev_ops = &vortex_netdev_ops; if (print_info) { pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", print_name, (dev->features & NETIF_F_SG) ? "en":"dis", (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); } dev->ethtool_ops = &vortex_ethtool_ops; dev->watchdog_timeo = (watchdog * HZ) / 1000; if (pdev) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } retval = register_netdev(dev); if (retval == 0) return 0; free_ring: pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); free_region: if (vp->must_free_region) release_region(dev->base_addr, vci->io_size); free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: return retval; } static void issue_and_wait(struct net_device *dev, int cmd) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; iowrite16(cmd, ioaddr + EL3_CMD); for (i = 0; i < 2000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) return; } /* OK, that didn't work. Do it the slow way. One second */ for (i = 0; i < 100000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { if (vortex_debug > 1) pr_info("%s: command 0x%04x took %d usecs\n", dev->name, cmd, i * 10); return; } udelay(10); } pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); } static void vortex_set_duplex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); pr_info("%s: setting %s-duplex.\n", dev->name, (vp->full_duplex) ? "full" : "half"); /* Set the full-duplex bit. */ window_write16(vp, ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | (vp->large_frames ? 0x40 : 0) | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), 3, Wn3_MAC_Ctrl); } static void vortex_check_media(struct net_device *dev, unsigned int init) { struct vortex_private *vp = netdev_priv(dev); unsigned int ok_to_print = 0; if (vortex_debug > 3) ok_to_print = 1; if (mii_check_media(&vp->mii, ok_to_print, init)) { vp->full_duplex = vp->mii.full_duplex; vortex_set_duplex(dev); } else if (init) { vortex_set_duplex(dev); } } static int vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned int config; int i, mii_reg1, mii_reg5, err = 0; if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); err = pci_enable_device(VORTEX_PCI(vp)); if (err) { pr_warning("%s: Could not enable device\n", dev->name); goto err_out; } } /* Before initializing select the active media port. */ config = window_read32(vp, 3, Wn3_Config); if (vp->media_override != 7) { pr_info("%s: Media override to transceiver %d (%s).\n", dev->name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else if (vp->autoselect) { if (vp->has_nway) { if (vortex_debug > 1) pr_info("%s: using NWAY device table, not %d\n", dev->name, dev->if_port); dev->if_port = XCVR_NWAY; } else { /* Find first available media type, starting with 100baseTx. */ dev->if_port = XCVR_100baseTx; while (! (vp->available_media & media_tbl[dev->if_port].mask)) dev->if_port = media_tbl[dev->if_port].next; if (vortex_debug > 1) pr_info("%s: first available media type: %s\n", dev->name, media_tbl[dev->if_port].name); } } else { dev->if_port = vp->default_media; if (vortex_debug > 1) pr_info("%s: using default media %s\n", dev->name, media_tbl[dev->if_port].name); } init_timer(&vp->timer); vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); vp->timer.data = (unsigned long)dev; vp->timer.function = vortex_timer; /* timer handler */ add_timer(&vp->timer); init_timer(&vp->rx_oom_timer); vp->rx_oom_timer.data = (unsigned long)dev; vp->rx_oom_timer.function = rx_oom_timer; if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", dev->name, media_tbl[dev->if_port].name); vp->full_duplex = vp->mii.force_media; config = BFINS(config, dev->if_port, 20, 4); if (vortex_debug > 6) pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); window_write32(vp, config, 3, Wn3_Config); if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); vp->mii.full_duplex = vp->full_duplex; vortex_check_media(dev, 1); } else vortex_set_duplex(dev); issue_and_wait(dev, TxReset); /* * Don't reset the PHY - that upsets autonegotiation during DHCP operations. */ issue_and_wait(dev, RxReset|0x04); iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); if (vortex_debug > 1) { pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); } /* Set the station address and mask in window 2 each time opened. */ for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); for (; i < 12; i+=2) window_write16(vp, 0, 2, i); if (vp->cb_fn_base) { unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); } if (dev->if_port == XCVR_10base2) /* Start the thinnet transceiver. We should really wait 50ms...*/ iowrite16(StartCoax, ioaddr + EL3_CMD); if (dev->if_port != XCVR_NWAY) { window_write16(vp, (window_read16(vp, 4, Wn4_Media) & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); } /* Switch to the stats window, and clear all stats by reading. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); for (i = 0; i < 10; i++) window_read8(vp, 6, i); window_read16(vp, 6, 10); window_read16(vp, 6, 12); /* New: On the Vortex we must also clear the BadSSD counter. */ window_read8(vp, 4, 12); /* ..and on the Boomerang we enable the extra statistics bits. */ window_write16(vp, 0x0040, 4, Wn4_NetDiag); if (vp->full_bus_master_rx) { /* Boomerang bus master. */ vp->cur_rx = vp->dirty_rx = 0; /* Initialize the RxEarly register as recommended. */ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); iowrite32(0x0020, ioaddr + PktStatus); iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); } if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ vp->cur_tx = vp->dirty_tx = 0; if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ /* Clear the Rx, Tx rings. */ for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ vp->rx_ring[i].status = 0; for (i = 0; i < TX_RING_SIZE; i++) vp->tx_skbuff[i] = NULL; iowrite32(0, ioaddr + DownListPtr); } /* Set receiver mode: presumably accept b-case and phys addr only. */ set_rx_mode(dev); /* enable 802.1q tagged frames */ set_8021q_mode(dev, 1); iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ /* Allow status bits to be seen. */ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| (vp->full_bus_master_tx ? DownComplete : TxAvailable) | (vp->full_bus_master_rx ? UpComplete : RxComplete) | (vp->bus_master ? DMADone : 0); vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | (vp->full_bus_master_rx ? 0 : RxComplete) | StatsFull | HostError | TxComplete | IntReq | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; iowrite16(vp->status_enable, ioaddr + EL3_CMD); /* Ack all pending events, and set active indicator mask. */ iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); err_out: return err; } static int vortex_open(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int i; int retval; /* Use the now-standard shared IRQ implementation. */ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); goto err; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ if (vortex_debug > 2) pr_debug("%s: Filling in the Rx ring.\n", dev->name); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); vp->rx_ring[i].status = 0; /* Clear complete bit. */ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, GFP_KERNEL); vp->rx_skbuff[i] = skb; if (skb == NULL) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { int j; pr_emerg("%s: no memory for rx ring\n", dev->name); for (j = 0; j < i; j++) { if (vp->rx_skbuff[j]) { dev_kfree_skb(vp->rx_skbuff[j]); vp->rx_skbuff[j] = NULL; } } retval = -ENOMEM; goto err_free_irq; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); } retval = vortex_up(dev); if (!retval) goto out; err_free_irq: free_irq(dev->irq, dev); err: if (vortex_debug > 1) pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); out: return retval; } static void vortex_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; int media_status; if (vortex_debug > 2) { pr_debug("%s: Media selection timer tick happened, %s.\n", dev->name, media_tbl[dev->if_port].name); pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } media_status = window_read16(vp, 4, Wn4_Media); switch (dev->if_port) { case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: if (media_status & Media_LnkBeat) { netif_carrier_on(dev); ok = 1; if (vortex_debug > 1) pr_debug("%s: Media %s has link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } else { netif_carrier_off(dev); if (vortex_debug > 1) { pr_debug("%s: Media %s has no link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } } break; case XCVR_MII: case XCVR_NWAY: { ok = 1; vortex_check_media(dev, 0); } break; default: /* Other media types handled by Tx timeouts. */ if (vortex_debug > 1) pr_debug("%s: Media %s has no indication, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); ok = 1; } if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) next_tick = 5*HZ; if (vp->medialock) goto leave_media_alone; if (!ok) { unsigned int config; spin_lock_irq(&vp->lock); do { dev->if_port = media_tbl[dev->if_port].next; } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); if (dev->if_port == XCVR_Default) { /* Go back to default. */ dev->if_port = vp->default_media; if (vortex_debug > 1) pr_debug("%s: Media selection failing, using default %s port.\n", dev->name, media_tbl[dev->if_port].name); } else { if (vortex_debug > 1) pr_debug("%s: Media selection failed, now trying %s port.\n", dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } window_write16(vp, (media_status & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); config = window_read32(vp, 3, Wn3_Config); config = BFINS(config, dev->if_port, 20, 4); window_write32(vp, config, 3, Wn3_Config); iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); if (vortex_debug > 1) pr_debug("wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ spin_unlock_irq(&vp->lock); } leave_media_alone: if (vortex_debug > 2) pr_debug("%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); } static void vortex_tx_timeout(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread16(ioaddr + EL3_STATUS)); pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", window_read16(vp, 4, Wn4_NetDiag), window_read16(vp, 4, Wn4_Media), ioread32(ioaddr + PktStatus), window_read16(vp, 4, Wn4_FIFODiag)); /* Slight code bloat to be user friendly. */ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) pr_err("%s: Transmitter encountered 16 collisions --" " network cable problem?\n", dev->name); if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { pr_err("%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ { /* * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; local_irq_save(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); local_irq_restore(flags); } } if (vortex_debug > 0) dump_tx_ring(dev); issue_and_wait(dev, TxReset); dev->stats.tx_errors++; if (vp->full_bus_master_tx) { pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) netif_wake_queue (dev); if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { dev->stats.tx_dropped++; netif_wake_queue(dev); } /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* prevent tx timeout */ } /* * Handle uncommon interrupt sources. This is a separate routine to minimize * the cache impact. */ static void vortex_error(struct net_device *dev, int status) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int do_tx_reset = 0, reset_mask = 0; unsigned char tx_status = 0; if (vortex_debug > 2) { pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); } if (status & TxComplete) { /* Really "TxError" for us. */ tx_status = ioread8(ioaddr + TxStatus); /* Presumably a tx-timeout. We must merely re-enable. */ if (vortex_debug > 2 || (tx_status != 0x88 && vortex_debug > 0)) { pr_err("%s: Transmit error, Tx status register %2.2x.\n", dev->name, tx_status); if (tx_status == 0x82) { pr_err("Probably a duplex mismatch. See " "Documentation/networking/vortex.txt\n"); } dump_tx_ring(dev); } if (tx_status & 0x14) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ do_tx_reset = 1; } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ do_tx_reset = 1; reset_mask = 0x0108; /* Reset interface logic, but not download logic */ } else { /* Merely re-enable the transmitter. */ iowrite16(TxEnable, ioaddr + EL3_CMD); } } if (status & RxEarly) /* Rx early is unused. */ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) pr_debug("%s: Updating stats.\n", dev->name); update_stats(ioaddr, dev); /* HACK: Disable statistics as an interrupt source. */ /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && ioread16(ioaddr + EL3_STATUS) & StatsFull) { pr_warning("%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); iowrite16(SetIntrEnb | (window_read16(vp, 5, 10) & ~StatsFull), ioaddr + EL3_CMD); vp->intr_enable &= ~StatsFull; DoneDidThat++; } } if (status & IntReq) { /* Restore all interrupt sources. */ iowrite16(vp->status_enable, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); } if (status & HostError) { u16 fifo_diag; fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { int bus_status = ioread32(ioaddr + PktStatus); /* 0x80000000 PCI master abort. */ /* 0x40000000 PCI target abort. */ if (vortex_debug) pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); /* In this case, blow the card away */ /* Must not enter D3 or we can't legally issue the reset! */ vortex_down(dev, 0); issue_and_wait(dev, TotalReset | 0xff); vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ } else if (fifo_diag & 0x0400) do_tx_reset = 1; if (fifo_diag & 0x3000) { /* Reset Rx fifo and upload logic */ issue_and_wait(dev, RxReset|0x07); /* Set the Rx filter to the current state. */ set_rx_mode(dev); /* enable 802.1q VLAN tagged frames */ set_8021q_mode(dev, 1); iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); } } if (do_tx_reset) { issue_and_wait(dev, TxReset|reset_mask); iowrite16(TxEnable, ioaddr + EL3_CMD); if (!vp->full_bus_master_tx) netif_wake_queue(dev); } } static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Put out the doubleword header... */ iowrite32(skb->len, ioaddr + TX_FIFO); if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE); spin_lock_irq(&vp->window_lock); window_set(vp, 7); iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); iowrite16(len, ioaddr + Wn7_MasterLen); spin_unlock_irq(&vp->window_lock); vp->tx_skb = skb; iowrite16(StartDMADown, ioaddr + EL3_CMD); /* netif_wake_queue() will be called at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); dev_kfree_skb (skb); if (ioread16(ioaddr + TxFree) > 1536) { netif_start_queue (dev); /* AKPM: redundant? */ } else { /* Interrupt us when the FIFO has room for max-sized packet. */ netif_stop_queue(dev); iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } } /* Clear the Tx status stack. */ { int tx_status; int i = 32; while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ if (vortex_debug > 2) pr_debug("%s: Tx error, status %2.2x.\n", dev->name, tx_status); if (tx_status & 0x04) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } iowrite16(TxEnable, ioaddr + EL3_CMD); } iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ } } return NETDEV_TX_OK; } static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; if (vortex_debug > 6) { pr_debug("boomerang_start_xmit()\n"); pr_debug("%s: Trying to send a packet, Tx index %d.\n", dev->name, vp->cur_tx); } /* * We can't allow a recursion from our interrupt handler back into the * tx routine, as they take the same spin lock, and that causes * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in * a bit */ if (vp->handling_irq) return NETDEV_TX_BUSY; if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", dev->name); netif_stop_queue(dev); return NETDEV_TX_BUSY; } vp->tx_skbuff[entry] = skb; vp->tx_ring[entry].next = 0; #if DO_ZEROCOPY if (skb->ip_summed != CHECKSUM_PARTIAL) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); else vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); } else { int i; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb_headlen(skb), PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; vp->tx_ring[entry].frag[i+1].addr = cpu_to_le32(pci_map_single( VORTEX_PCI(vp), (void *)skb_frag_address(frag), skb_frag_size(frag), PCI_DMA_TODEVICE)); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); else vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)); } } #else vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); #endif spin_lock_irqsave(&vp->lock, flags); /* Wait for the stall to complete. */ issue_and_wait(dev, DownStall); prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); if (ioread32(ioaddr + DownListPtr) == 0) { iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); vp->queued_packet++; } vp->cur_tx++; if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */ #if defined(tx_interrupt_mitigation) /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef * were selected, this would corrupt DN_COMPLETE. No? */ prev_entry->status &= cpu_to_le32(~TxIntrUploaded); #endif } iowrite16(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ /* * This is the ISR for the vortex series chips. * full_bus_master_tx == 0 && full_bus_master_rx == 0 */ static irqreturn_t vortex_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; int handled = 0; ioaddr = vp->ioaddr; spin_lock(&vp->lock); status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("vortex_interrupt(). status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs cause this */ handled = 1; if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (status == 0xffff) /* h/w no longer present (hotplug)? */ goto handler_exit; if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); spin_lock(&vp->window_lock); window_set(vp, 7); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & RxComplete) vortex_rx(dev); if (status & TxAvailable) { if (vortex_debug > 5) pr_debug(" TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); netif_wake_queue (dev); } if (status & DMADone) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to * insufficient FIFO room, the TxAvailable test will succeed and call * netif_wake_queue() */ netif_wake_queue(dev); } else { /* Interrupt when FIFO has room for max-sized packet. */ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); netif_stop_queue(dev); } } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; if (status & RxEarly) vortex_rx(dev); spin_unlock(&vp->window_lock); vortex_error(dev, status); spin_lock(&vp->window_lock); window_set(vp, 7); } if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); spin_unlock(&vp->window_lock); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } /* * This is the ISR for the boomerang series chips. * full_bus_master_tx == 1 && full_bus_master_rx == 1 */ static irqreturn_t boomerang_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; ioaddr = vp->ioaddr; /* * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout * and boomerang_start_xmit */ spin_lock(&vp->lock); vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("boomerang_interrupt. status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs can cause this */ if (status == 0xffff) { /* h/w no longer present (hotplug)? */ if (vortex_debug > 1) pr_debug("boomerang_interrupt(1): status = 0xffff\n"); goto handler_exit; } if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & UpComplete) { iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); if (vortex_debug > 5) pr_debug("boomerang_interrupt->boomerang_rx\n"); boomerang_rx(dev); } if (status & DownComplete) { unsigned int dirty_tx = vp->dirty_tx; iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); while (vp->cur_tx - dirty_tx > 0) { int entry = dirty_tx % TX_RING_SIZE; #if 1 /* AKPM: the latter is faster, but cyclone-only */ if (ioread32(ioaddr + DownListPtr) == vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) break; /* It still hasn't been processed. */ #else if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) break; /* It still hasn't been processed. */ #endif if (vp->tx_skbuff[entry]) { struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { pr_debug("boomerang_interrupt: no skb!\n"); } /* dev->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { if (vortex_debug > 6) pr_debug("boomerang_interrupt: wake queue\n"); netif_wake_queue (dev); } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) vortex_error(dev, status); if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: vp->handling_irq = 0; spin_unlock(&vp->lock); return IRQ_HANDLED; } static int vortex_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; short rx_status; if (vortex_debug > 5) pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { if (rx_status & 0x4000) { /* Error, update stats. */ unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; skb = netdev_alloc_skb(dev, pkt_len + 5); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); if (skb != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), pkt_len, PCI_DMA_FROMDEVICE); iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16(StartDMAUp, ioaddr + EL3_CMD); while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) ; pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); } else { ioread32_rep(ioaddr + RX_FIFO, skb_put(skb, pkt_len), (pkt_len + 3) >> 2); } iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) break; continue; } else if (vortex_debug > 0) pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", dev->name, pkt_len); dev->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } return 0; } static int boomerang_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int entry = vp->cur_rx % RX_RING_SIZE; void __iomem *ioaddr = vp->ioaddr; int rx_status; int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; if (vortex_debug > 5) pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ if (--rx_work_limit < 0) break; if (rx_status & RxDError) { /* Error, update stats. */ unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ memcpy(skb_put(skb, pkt_len), vp->rx_skbuff[entry]->data, pkt_len); pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_copy++; } else { /* Pass up the skbuff already on the Rx ring. */ skb = vp->rx_skbuff[entry]; vp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); { /* Use hardware checksum info. */ int csum_bits = rx_status & 0xee000000; if (csum_bits && (csum_bits == (IPChksumValid | TCPChksumValid) || csum_bits == (IPChksumValid | UDPChksumValid))) { skb->ip_summed = CHECKSUM_UNNECESSARY; vp->rx_csumhits++; } } netif_rx(skb); dev->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { pr_warning("%s: memory shortage\n", dev->name); last_jif = jiffies; } if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); break; /* Bad news! */ } vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_skbuff[entry] = skb; } vp->rx_ring[entry].status = 0; /* Clear complete bit. */ iowrite16(UpUnstall, ioaddr + EL3_CMD); } return 0; } /* * If we've hit a total OOM refilling the Rx ring we poll once a second * for some memory. Otherwise there is no way to restart the rx process. */ static void rx_oom_timer(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct vortex_private *vp = netdev_priv(dev); spin_lock_irq(&vp->lock); if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ boomerang_rx(dev); if (vortex_debug > 1) { pr_debug("%s: rx_oom_timer %s\n", dev->name, ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); } spin_unlock_irq(&vp->lock); } static void vortex_down(struct net_device *dev, int final_down) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; netif_stop_queue (dev); del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); /* Turn off statistics ASAP. We update dev->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ iowrite16(RxDisable, ioaddr + EL3_CMD); iowrite16(TxDisable, ioaddr + EL3_CMD); /* Disable receiving 802.1q tagged frames */ set_8021q_mode(dev, 0); if (dev->if_port == XCVR_10base2) /* Turn off thinnet power. Green! */ iowrite16(StopCoax, ioaddr + EL3_CMD); iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); update_stats(ioaddr, dev); if (vp->full_bus_master_rx) iowrite32(0, ioaddr + UpListPtr); if (vp->full_bus_master_tx) iowrite32(0, ioaddr + DownListPtr); if (final_down && VORTEX_PCI(vp)) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } } static int vortex_close(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; if (netif_device_present(dev)) vortex_down(dev, 1); if (vortex_debug > 1) { pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" " tx_queued %d Rx pre-checksummed %d.\n", dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); } #if DO_ZEROCOPY if (vp->rx_csumhits && (vp->drv_flags & HAS_HWCKSM) == 0 && (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); } #endif free_irq(dev->irq, dev); if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) { pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dev_kfree_skb(vp->rx_skbuff[i]); vp->rx_skbuff[i] = NULL; } } if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ for (i = 0; i < TX_RING_SIZE; i++) { if (vp->tx_skbuff[i]) { struct sk_buff *skb = vp->tx_skbuff[i]; #if DO_ZEROCOPY int k; for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb(skb); vp->tx_skbuff[i] = NULL; } } } return 0; } static void dump_tx_ring(struct net_device *dev) { if (vortex_debug > 0) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; if (vp->full_bus_master_tx) { int i; int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", vp->full_bus_master_tx, vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, vp->cur_tx, vp->cur_tx % TX_RING_SIZE); pr_err(" Transmit list %8.8x vs. %p.\n", ioread32(ioaddr + DownListPtr), &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); issue_and_wait(dev, DownStall); for (i = 0; i < TX_RING_SIZE; i++) { unsigned int length; #if DO_ZEROCOPY length = le32_to_cpu(vp->tx_ring[i].frag[0].length); #else length = le32_to_cpu(vp->tx_ring[i].length); #endif pr_err(" %d: @%p length %8.8x status %8.8x\n", i, &vp->tx_ring[i], length, le32_to_cpu(vp->tx_ring[i].status)); } if (!stalled) iowrite16(DownUnstall, ioaddr + EL3_CMD); } } } static struct net_device_stats *vortex_get_stats(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ spin_lock_irqsave (&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } return &dev->stats; } /* Update statistics. Unlike with the EL3 we need not worry about interrupts changing the window setting from underneath us, but we must still guard against a race condition with a StatsUpdate interrupt updating the table. This is done by checking that the ASM (!) code generated uses atomic updates with '+='. */ static void update_stats(void __iomem *ioaddr, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); dev->stats.tx_window_errors += window_read8(vp, 6, 4); dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); dev->stats.tx_packets += window_read8(vp, 6, 6); dev->stats.tx_packets += (window_read8(vp, 6, 9) & 0x30) << 4; /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ dev->stats.rx_bytes += window_read16(vp, 6, 10); dev->stats.tx_bytes += window_read16(vp, 6, 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); vp->xstats.tx_deferred += window_read8(vp, 6, 8); vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = window_read8(vp, 4, 13); dev->stats.rx_bytes += (up & 0x0f) << 16; dev->stats.tx_bytes += (up & 0xf0) << 12; } } static int vortex_nway_reset(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); return mii_nway_restart(&vp->mii); } static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_gset(&vp->mii, cmd); } static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_sset(&vp->mii, cmd); } static u32 vortex_get_msglevel(struct net_device *dev) { return vortex_debug; } static void vortex_set_msglevel(struct net_device *dev, u32 dbg) { vortex_debug = dbg; } static int vortex_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return VORTEX_NUM_STATS; default: return -EOPNOTSUPP; } } static void vortex_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; spin_lock_irqsave(&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore(&vp->lock, flags); data[0] = vp->xstats.tx_deferred; data[1] = vp->xstats.tx_max_collisions; data[2] = vp->xstats.tx_multiple_collisions; data[3] = vp->xstats.tx_single_collisions; data[4] = vp->xstats.rx_bad_ssd; } static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; default: WARN_ON(1); break; } } static void vortex_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct vortex_private *vp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); if (VORTEX_PCI(vp)) { strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), sizeof(info->bus_info)); } else { if (VORTEX_EISA(vp)) strlcpy(info->bus_info, dev_name(vp->gendev), sizeof(info->bus_info)); else snprintf(info->bus_info, sizeof(info->bus_info), "EISA 0x%lx %d", dev->base_addr, dev->irq); } } static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return; wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (vp->enable_wol) wol->wolopts |= WAKE_MAGIC; } static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if (wol->wolopts & WAKE_MAGIC) vp->enable_wol = 1; else vp->enable_wol = 0; acpi_set_WOL(dev); return 0; } static const struct ethtool_ops vortex_ethtool_ops = { .get_drvinfo = vortex_get_drvinfo, .get_strings = vortex_get_strings, .get_msglevel = vortex_get_msglevel, .set_msglevel = vortex_set_msglevel, .get_ethtool_stats = vortex_get_ethtool_stats, .get_sset_count = vortex_get_sset_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, .get_wol = vortex_get_wol, .set_wol = vortex_set_wol, }; #ifdef CONFIG_PCI /* * Must power the device up to do MDIO operations */ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int err; struct vortex_private *vp = netdev_priv(dev); pci_power_t state = 0; if(VORTEX_PCI(vp)) state = VORTEX_PCI(vp)->current_state; /* The kernel core really should have pci_get_power_state() */ if(state != 0) pci_set_power_state(VORTEX_PCI(vp), PCI_D0); err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); if(state != 0) pci_set_power_state(VORTEX_PCI(vp), state); return err; } #endif /* Pre-Cyclone chips have no documented multicast filter, so the only multicast setting is to receive all multicast frames. At least the chip has a very clean way to set the mode, unlike many others. */ static void set_rx_mode(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int new_mode; if (dev->flags & IFF_PROMISC) { if (vortex_debug > 3) pr_notice("%s: Setting promiscuous mode.\n", dev->name); new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; } else new_mode = SetRxFilter | RxStation | RxBroadcast; iowrite16(new_mode, ioaddr + EL3_CMD); } #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) /* Setup the card so that it can receive frames with an 802.1q VLAN tag. Note that this must be done after each RxReset due to some backwards compatibility logic in the Cyclone and Tornado ASICs */ /* The Ethernet Type used for 802.1q tagged frames */ #define VLAN_ETHER_TYPE 0x8100 static void set_8021q_mode(struct net_device *dev, int enable) { struct vortex_private *vp = netdev_priv(dev); int mac_ctrl; if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { /* cyclone and tornado chipsets can recognize 802.1q * tagged frames and treat them correctly */ int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ if (enable) max_pkt_size += 4; /* 802.1Q VLAN tag */ window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); /* set VlanEtherType to let the hardware checksumming treat tagged frames correctly */ window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); } else { /* on older cards we have to enable large frames */ vp->large_frames = dev->mtu > 1500 || enable; mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); if (vp->large_frames) mac_ctrl |= 0x40; else mac_ctrl &= ~0x40; window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); } } #else static void set_8021q_mode(struct net_device *dev, int enable) { } #endif /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. */ /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues. */ static void mdio_delay(struct vortex_private *vp) { window_read32(vp, 4, Wn4_PhysicalMgmt); } #define MDIO_SHIFT_CLK 0x01 #define MDIO_DIR_WRITE 0x04 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) #define MDIO_DATA_READ 0x02 #define MDIO_ENB_IN 0x00 /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(struct vortex_private *vp, int bits) { /* Establish sync by sending at least 32 logic ones. */ while (-- bits >= 0) { window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { int i; struct vortex_private *vp = netdev_priv(dev); int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; unsigned int retval = 0; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the read command bits out. */ for (i = 14; i >= 0; i--) { int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); retval = (retval << 1) | ((window_read16(vp, 4, Wn4_PhysicalMgmt) & MDIO_DATA_READ) ? 1 : 0); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct vortex_private *vp = netdev_priv(dev); int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; int i; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Leave the interface idle. */ for (i = 1; i >= 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); } /* ACPI: Advanced Configuration and Power Interface. */ /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ static void acpi_set_WOL(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; device_set_wakeup_enable(vp->gendev, vp->enable_wol); if (vp->enable_wol) { /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ window_write16(vp, 2, 7, 0x0c); /* The RxFilter must accept the WOL frames. */ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); iowrite16(RxEnable, ioaddr + EL3_CMD); if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); vp->enable_wol = 0; return; } if (VORTEX_PCI(vp)->current_state < PCI_D3hot) return; /* Change the power state to D3; RxEnable doesn't take effect. */ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); } } static void __devexit vortex_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct vortex_private *vp; if (!dev) { pr_err("vortex_remove_one called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); if (vp->cb_fn_base) pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); unregister_netdev(dev); if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); pci_disable_device(VORTEX_PCI(vp)); } /* Should really use issue_and_wait() here */ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), vp->ioaddr + EL3_CMD); pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); if (vp->must_free_region) release_region(dev->base_addr, vp->io_size); free_netdev(dev); } static struct pci_driver vortex_driver = { .name = "3c59x", .probe = vortex_init_one, .remove = __devexit_p(vortex_remove_one), .id_table = vortex_pci_tbl, .driver.pm = VORTEX_PM_OPS, }; static int vortex_have_pci; static int vortex_have_eisa; static int __init vortex_init(void) { int pci_rc, eisa_rc; pci_rc = pci_register_driver(&vortex_driver); eisa_rc = vortex_eisa_init(); if (pci_rc == 0) vortex_have_pci = 1; if (eisa_rc > 0) vortex_have_eisa = 1; return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; } static void __exit vortex_eisa_cleanup(void) { struct vortex_private *vp; void __iomem *ioaddr; #ifdef CONFIG_EISA /* Take care of the EISA devices */ eisa_driver_unregister(&vortex_eisa_driver); #endif if (compaq_net_device) { vp = netdev_priv(compaq_net_device); ioaddr = ioport_map(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); unregister_netdev(compaq_net_device); iowrite16(TotalReset, ioaddr + EL3_CMD); release_region(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); free_netdev(compaq_net_device); } } static void __exit vortex_cleanup(void) { if (vortex_have_pci) pci_unregister_driver(&vortex_driver); if (vortex_have_eisa) vortex_eisa_cleanup(); } module_init(vortex_init); module_exit(vortex_cleanup);
gpl-2.0
javelinanddart/kernel_samsung_msm8660
drivers/media/video/cpia2/cpia2_usb.c
4790
25426
/**************************************************************************** * * Filename: cpia2_usb.c * * Copyright 2001, STMicrolectronics, Inc. * Contact: steve.miller@st.com * * Description: * This is a USB driver for CPia2 based video cameras. * The infrastructure of this driver is based on the cpia usb driver by * Jochen Scharrlach and Johannes Erdfeldt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Stripped of 2.4 stuff ready for main kernel submit by * Alan Cox <alan@lxorguk.ukuu.org.uk> ****************************************************************************/ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/usb.h> #include "cpia2.h" static int frame_sizes[] = { 0, // USBIF_CMDONLY 0, // USBIF_BULK 128, // USBIF_ISO_1 384, // USBIF_ISO_2 640, // USBIF_ISO_3 768, // USBIF_ISO_4 896, // USBIF_ISO_5 1023, // USBIF_ISO_6 }; #define FRAMES_PER_DESC 10 #define FRAME_SIZE_PER_DESC frame_sizes[cam->cur_alt] static void process_frame(struct camera_data *cam); static void cpia2_usb_complete(struct urb *urb); static int cpia2_usb_probe(struct usb_interface *intf, const struct usb_device_id *id); static void cpia2_usb_disconnect(struct usb_interface *intf); static void free_sbufs(struct camera_data *cam); static void add_APPn(struct camera_data *cam); static void add_COM(struct camera_data *cam); static int submit_urbs(struct camera_data *cam); static int set_alternate(struct camera_data *cam, unsigned int alt); static int configure_transfer_mode(struct camera_data *cam, unsigned int alt); static struct usb_device_id cpia2_id_table[] = { {USB_DEVICE(0x0553, 0x0100)}, {USB_DEVICE(0x0553, 0x0140)}, {USB_DEVICE(0x0553, 0x0151)}, /* STV0676 */ {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, cpia2_id_table); static struct usb_driver cpia2_driver = { .name = "cpia2", .probe = cpia2_usb_probe, .disconnect = cpia2_usb_disconnect, .id_table = cpia2_id_table }; /****************************************************************************** * * process_frame * *****************************************************************************/ static void process_frame(struct camera_data *cam) { static int frame_count; unsigned char *inbuff = cam->workbuff->data; DBG("Processing frame #%d, current:%d\n", cam->workbuff->num, cam->curbuff->num); if(cam->workbuff->length > cam->workbuff->max_length) cam->workbuff->max_length = cam->workbuff->length; if ((inbuff[0] == 0xFF) && (inbuff[1] == 0xD8)) { frame_count++; } else { cam->workbuff->status = FRAME_ERROR; DBG("Start of frame not found\n"); return; } /*** * Now the output buffer should have a JPEG image in it. ***/ if(!cam->first_image_seen) { /* Always skip the first image after streaming * starts. It is almost certainly corrupt. */ cam->first_image_seen = 1; cam->workbuff->status = FRAME_EMPTY; return; } if (cam->workbuff->length > 3) { if(cam->mmapped && cam->workbuff->length < cam->workbuff->max_length) { /* No junk in the buffers */ memset(cam->workbuff->data+cam->workbuff->length, 0, cam->workbuff->max_length- cam->workbuff->length); } cam->workbuff->max_length = cam->workbuff->length; cam->workbuff->status = FRAME_READY; if(!cam->mmapped && cam->num_frames > 2) { /* During normal reading, the most recent * frame will be read. If the current frame * hasn't started reading yet, it will never * be read, so mark it empty. If the buffer is * mmapped, or we have few buffers, we need to * wait for the user to free the buffer. * * NOTE: This is not entirely foolproof with 3 * buffers, but it would take an EXTREMELY * overloaded system to cause problems (possible * image data corruption). Basically, it would * need to take more time to execute cpia2_read * than it would for the camera to send * cam->num_frames-2 frames before problems * could occur. */ cam->curbuff->status = FRAME_EMPTY; } cam->curbuff = cam->workbuff; cam->workbuff = cam->workbuff->next; DBG("Changed buffers, work:%d, current:%d\n", cam->workbuff->num, cam->curbuff->num); return; } else { DBG("Not enough data for an image.\n"); } cam->workbuff->status = FRAME_ERROR; return; } /****************************************************************************** * * add_APPn * * Adds a user specified APPn record *****************************************************************************/ static void add_APPn(struct camera_data *cam) { if(cam->APP_len > 0) { cam->workbuff->data[cam->workbuff->length++] = 0xFF; cam->workbuff->data[cam->workbuff->length++] = 0xE0+cam->APPn; cam->workbuff->data[cam->workbuff->length++] = 0; cam->workbuff->data[cam->workbuff->length++] = cam->APP_len+2; memcpy(cam->workbuff->data+cam->workbuff->length, cam->APP_data, cam->APP_len); cam->workbuff->length += cam->APP_len; } } /****************************************************************************** * * add_COM * * Adds a user specified COM record *****************************************************************************/ static void add_COM(struct camera_data *cam) { if(cam->COM_len > 0) { cam->workbuff->data[cam->workbuff->length++] = 0xFF; cam->workbuff->data[cam->workbuff->length++] = 0xFE; cam->workbuff->data[cam->workbuff->length++] = 0; cam->workbuff->data[cam->workbuff->length++] = cam->COM_len+2; memcpy(cam->workbuff->data+cam->workbuff->length, cam->COM_data, cam->COM_len); cam->workbuff->length += cam->COM_len; } } /****************************************************************************** * * cpia2_usb_complete * * callback when incoming packet is received *****************************************************************************/ static void cpia2_usb_complete(struct urb *urb) { int i; unsigned char *cdata; static int frame_ready = false; struct camera_data *cam = (struct camera_data *) urb->context; if (urb->status!=0) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { DBG("urb->status = %d!\n", urb->status); } DBG("Stopping streaming\n"); return; } if (!cam->streaming || !cam->present || cam->open_count == 0) { LOG("Will now stop the streaming: streaming = %d, " "present=%d, open_count=%d\n", cam->streaming, cam->present, cam->open_count); return; } /*** * Packet collater ***/ //DBG("Collating %d packets\n", urb->number_of_packets); for (i = 0; i < urb->number_of_packets; i++) { u16 checksum, iso_checksum; int j; int n = urb->iso_frame_desc[i].actual_length; int st = urb->iso_frame_desc[i].status; if(cam->workbuff->status == FRAME_READY) { struct framebuf *ptr; /* Try to find an available buffer */ DBG("workbuff full, searching\n"); for (ptr = cam->workbuff->next; ptr != cam->workbuff; ptr = ptr->next) { if (ptr->status == FRAME_EMPTY) { ptr->status = FRAME_READING; ptr->length = 0; break; } } if (ptr == cam->workbuff) break; /* No READING or EMPTY buffers left */ cam->workbuff = ptr; } if (cam->workbuff->status == FRAME_EMPTY || cam->workbuff->status == FRAME_ERROR) { cam->workbuff->status = FRAME_READING; cam->workbuff->length = 0; } //DBG(" Packet %d length = %d, status = %d\n", i, n, st); cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (st) { LOG("cpia2 data error: [%d] len=%d, status = %d\n", i, n, st); if(!ALLOW_CORRUPT) cam->workbuff->status = FRAME_ERROR; continue; } if(n<=2) continue; checksum = 0; for(j=0; j<n-2; ++j) checksum += cdata[j]; iso_checksum = cdata[j] + cdata[j+1]*256; if(checksum != iso_checksum) { LOG("checksum mismatch: [%d] len=%d, calculated = %x, checksum = %x\n", i, n, (int)checksum, (int)iso_checksum); if(!ALLOW_CORRUPT) { cam->workbuff->status = FRAME_ERROR; continue; } } n -= 2; if(cam->workbuff->status != FRAME_READING) { if((0xFF == cdata[0] && 0xD8 == cdata[1]) || (0xD8 == cdata[0] && 0xFF == cdata[1] && 0 != cdata[2])) { /* frame is skipped, but increment total * frame count anyway */ cam->frame_count++; } DBG("workbuff not reading, status=%d\n", cam->workbuff->status); continue; } if (cam->frame_size < cam->workbuff->length + n) { ERR("buffer overflow! length: %d, n: %d\n", cam->workbuff->length, n); cam->workbuff->status = FRAME_ERROR; if(cam->workbuff->length > cam->workbuff->max_length) cam->workbuff->max_length = cam->workbuff->length; continue; } if (cam->workbuff->length == 0) { int data_offset; if ((0xD8 == cdata[0]) && (0xFF == cdata[1])) { data_offset = 1; } else if((0xFF == cdata[0]) && (0xD8 == cdata[1]) && (0xFF == cdata[2])) { data_offset = 2; } else { DBG("Ignoring packet, not beginning!\n"); continue; } DBG("Start of frame pattern found\n"); do_gettimeofday(&cam->workbuff->timestamp); cam->workbuff->seq = cam->frame_count++; cam->workbuff->data[0] = 0xFF; cam->workbuff->data[1] = 0xD8; cam->workbuff->length = 2; add_APPn(cam); add_COM(cam); memcpy(cam->workbuff->data+cam->workbuff->length, cdata+data_offset, n-data_offset); cam->workbuff->length += n-data_offset; } else if (cam->workbuff->length > 0) { memcpy(cam->workbuff->data + cam->workbuff->length, cdata, n); cam->workbuff->length += n; } if ((cam->workbuff->length >= 3) && (cam->workbuff->data[cam->workbuff->length - 3] == 0xFF) && (cam->workbuff->data[cam->workbuff->length - 2] == 0xD9) && (cam->workbuff->data[cam->workbuff->length - 1] == 0xFF)) { frame_ready = true; cam->workbuff->data[cam->workbuff->length - 1] = 0; cam->workbuff->length -= 1; } else if ((cam->workbuff->length >= 2) && (cam->workbuff->data[cam->workbuff->length - 2] == 0xFF) && (cam->workbuff->data[cam->workbuff->length - 1] == 0xD9)) { frame_ready = true; } if (frame_ready) { DBG("Workbuff image size = %d\n",cam->workbuff->length); process_frame(cam); frame_ready = false; if (waitqueue_active(&cam->wq_stream)) wake_up_interruptible(&cam->wq_stream); } } if(cam->streaming) { /* resubmit */ urb->dev = cam->dev; if ((i = usb_submit_urb(urb, GFP_ATOMIC)) != 0) ERR("%s: usb_submit_urb ret %d!\n", __func__, i); } } /****************************************************************************** * * configure_transfer_mode * *****************************************************************************/ static int configure_transfer_mode(struct camera_data *cam, unsigned int alt) { static unsigned char iso_regs[8][4] = { {0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00}, {0xB9, 0x00, 0x00, 0x7E}, {0xB9, 0x00, 0x01, 0x7E}, {0xB9, 0x00, 0x02, 0x7E}, {0xB9, 0x00, 0x02, 0xFE}, {0xB9, 0x00, 0x03, 0x7E}, {0xB9, 0x00, 0x03, 0xFD} }; struct cpia2_command cmd; unsigned char reg; if(!cam->present) return -ENODEV; /*** * Write the isoc registers according to the alternate selected ***/ cmd.direction = TRANSFER_WRITE; cmd.buffer.block_data[0] = iso_regs[alt][0]; cmd.buffer.block_data[1] = iso_regs[alt][1]; cmd.buffer.block_data[2] = iso_regs[alt][2]; cmd.buffer.block_data[3] = iso_regs[alt][3]; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.start = CPIA2_VC_USB_ISOLIM; cmd.reg_count = 4; cpia2_send_command(cam, &cmd); /*** * Enable relevant streams before starting polling. * First read USB Stream Config Register. ***/ cmd.direction = TRANSFER_READ; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.start = CPIA2_VC_USB_STRM; cmd.reg_count = 1; cpia2_send_command(cam, &cmd); reg = cmd.buffer.block_data[0]; /* Clear iso, bulk, and int */ reg &= ~(CPIA2_VC_USB_STRM_BLK_ENABLE | CPIA2_VC_USB_STRM_ISO_ENABLE | CPIA2_VC_USB_STRM_INT_ENABLE); if (alt == USBIF_BULK) { DBG("Enabling bulk xfer\n"); reg |= CPIA2_VC_USB_STRM_BLK_ENABLE; /* Enable Bulk */ cam->xfer_mode = XFER_BULK; } else if (alt >= USBIF_ISO_1) { DBG("Enabling ISOC xfer\n"); reg |= CPIA2_VC_USB_STRM_ISO_ENABLE; cam->xfer_mode = XFER_ISOC; } cmd.buffer.block_data[0] = reg; cmd.direction = TRANSFER_WRITE; cmd.start = CPIA2_VC_USB_STRM; cmd.reg_count = 1; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cpia2_send_command(cam, &cmd); return 0; } /****************************************************************************** * * cpia2_usb_change_streaming_alternate * *****************************************************************************/ int cpia2_usb_change_streaming_alternate(struct camera_data *cam, unsigned int alt) { int ret = 0; if(alt < USBIF_ISO_1 || alt > USBIF_ISO_6) return -EINVAL; if(alt == cam->params.camera_state.stream_mode) return 0; cpia2_usb_stream_pause(cam); configure_transfer_mode(cam, alt); cam->params.camera_state.stream_mode = alt; /* Reset the camera to prevent image quality degradation */ cpia2_reset_camera(cam); cpia2_usb_stream_resume(cam); return ret; } /****************************************************************************** * * set_alternate * *****************************************************************************/ static int set_alternate(struct camera_data *cam, unsigned int alt) { int ret = 0; if(alt == cam->cur_alt) return 0; if (cam->cur_alt != USBIF_CMDONLY) { DBG("Changing from alt %d to %d\n", cam->cur_alt, USBIF_CMDONLY); ret = usb_set_interface(cam->dev, cam->iface, USBIF_CMDONLY); if (ret != 0) return ret; } if (alt != USBIF_CMDONLY) { DBG("Changing from alt %d to %d\n", USBIF_CMDONLY, alt); ret = usb_set_interface(cam->dev, cam->iface, alt); if (ret != 0) return ret; } cam->old_alt = cam->cur_alt; cam->cur_alt = alt; return ret; } /****************************************************************************** * * free_sbufs * * Free all cam->sbuf[]. All non-NULL .data and .urb members that are non-NULL * are assumed to be allocated. Non-NULL .urb members are also assumed to be * submitted (and must therefore be killed before they are freed). *****************************************************************************/ static void free_sbufs(struct camera_data *cam) { int i; for (i = 0; i < NUM_SBUF; i++) { if(cam->sbuf[i].urb) { usb_kill_urb(cam->sbuf[i].urb); usb_free_urb(cam->sbuf[i].urb); cam->sbuf[i].urb = NULL; } if(cam->sbuf[i].data) { kfree(cam->sbuf[i].data); cam->sbuf[i].data = NULL; } } } /******* * Convenience functions *******/ /**************************************************************************** * * write_packet * ***************************************************************************/ static int write_packet(struct usb_device *udev, u8 request, u8 * registers, u16 start, size_t size) { if (!registers || size <= 0) return -EINVAL; return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, start, /* value */ 0, /* index */ registers, /* buffer */ size, HZ); } /**************************************************************************** * * read_packet * ***************************************************************************/ static int read_packet(struct usb_device *udev, u8 request, u8 * registers, u16 start, size_t size) { if (!registers || size <= 0) return -EINVAL; return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE, start, /* value */ 0, /* index */ registers, /* buffer */ size, HZ); } /****************************************************************************** * * cpia2_usb_transfer_cmd * *****************************************************************************/ int cpia2_usb_transfer_cmd(struct camera_data *cam, void *registers, u8 request, u8 start, u8 count, u8 direction) { int err = 0; struct usb_device *udev = cam->dev; if (!udev) { ERR("%s: Internal driver error: udev is NULL\n", __func__); return -EINVAL; } if (!registers) { ERR("%s: Internal driver error: register array is NULL\n", __func__); return -EINVAL; } if (direction == TRANSFER_READ) { err = read_packet(udev, request, (u8 *)registers, start, count); if (err > 0) err = 0; } else if (direction == TRANSFER_WRITE) { err =write_packet(udev, request, (u8 *)registers, start, count); if (err < 0) { LOG("Control message failed, err val = %d\n", err); LOG("Message: request = 0x%0X, start = 0x%0X\n", request, start); LOG("Message: count = %d, register[0] = 0x%0X\n", count, ((unsigned char *) registers)[0]); } else err=0; } else { LOG("Unexpected first byte of direction: %d\n", direction); return -EINVAL; } if(err != 0) LOG("Unexpected error: %d\n", err); return err; } /****************************************************************************** * * submit_urbs * *****************************************************************************/ static int submit_urbs(struct camera_data *cam) { struct urb *urb; int fx, err, i, j; for(i=0; i<NUM_SBUF; ++i) { if (cam->sbuf[i].data) continue; cam->sbuf[i].data = kmalloc(FRAMES_PER_DESC * FRAME_SIZE_PER_DESC, GFP_KERNEL); if (!cam->sbuf[i].data) { while (--i >= 0) { kfree(cam->sbuf[i].data); cam->sbuf[i].data = NULL; } return -ENOMEM; } } /* We double buffer the Isoc lists, and also know the polling * interval is every frame (1 == (1 << (bInterval -1))). */ for(i=0; i<NUM_SBUF; ++i) { if(cam->sbuf[i].urb) { continue; } urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); if (!urb) { ERR("%s: usb_alloc_urb error!\n", __func__); for (j = 0; j < i; j++) usb_free_urb(cam->sbuf[j].urb); return -ENOMEM; } cam->sbuf[i].urb = urb; urb->dev = cam->dev; urb->context = cam; urb->pipe = usb_rcvisocpipe(cam->dev, 1 /*ISOC endpoint*/); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = cam->sbuf[i].data; urb->complete = cpia2_usb_complete; urb->number_of_packets = FRAMES_PER_DESC; urb->interval = 1; urb->transfer_buffer_length = FRAME_SIZE_PER_DESC * FRAMES_PER_DESC; for (fx = 0; fx < FRAMES_PER_DESC; fx++) { urb->iso_frame_desc[fx].offset = FRAME_SIZE_PER_DESC * fx; urb->iso_frame_desc[fx].length = FRAME_SIZE_PER_DESC; } } /* Queue the ISO urbs, and resubmit in the completion handler */ for(i=0; i<NUM_SBUF; ++i) { err = usb_submit_urb(cam->sbuf[i].urb, GFP_KERNEL); if (err) { ERR("usb_submit_urb[%d]() = %d\n", i, err); return err; } } return 0; } /****************************************************************************** * * cpia2_usb_stream_start * *****************************************************************************/ int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate) { int ret; int old_alt; if(cam->streaming) return 0; if (cam->flush) { int i; DBG("Flushing buffers\n"); for(i=0; i<cam->num_frames; ++i) { cam->buffers[i].status = FRAME_EMPTY; cam->buffers[i].length = 0; } cam->curbuff = &cam->buffers[0]; cam->workbuff = cam->curbuff->next; cam->flush = false; } old_alt = cam->params.camera_state.stream_mode; cam->params.camera_state.stream_mode = 0; ret = cpia2_usb_change_streaming_alternate(cam, alternate); if (ret < 0) { int ret2; ERR("cpia2_usb_change_streaming_alternate() = %d!\n", ret); cam->params.camera_state.stream_mode = old_alt; ret2 = set_alternate(cam, USBIF_CMDONLY); if (ret2 < 0) { ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already " "failed. Then tried to call " "set_alternate(USBIF_CMDONLY) = %d.\n", alternate, ret, ret2); } } else { cam->frame_count = 0; cam->streaming = 1; ret = cpia2_usb_stream_resume(cam); } return ret; } /****************************************************************************** * * cpia2_usb_stream_pause * *****************************************************************************/ int cpia2_usb_stream_pause(struct camera_data *cam) { int ret = 0; if(cam->streaming) { ret = set_alternate(cam, USBIF_CMDONLY); free_sbufs(cam); } return ret; } /****************************************************************************** * * cpia2_usb_stream_resume * *****************************************************************************/ int cpia2_usb_stream_resume(struct camera_data *cam) { int ret = 0; if(cam->streaming) { cam->first_image_seen = 0; ret = set_alternate(cam, cam->params.camera_state.stream_mode); if(ret == 0) { ret = submit_urbs(cam); } } return ret; } /****************************************************************************** * * cpia2_usb_stream_stop * *****************************************************************************/ int cpia2_usb_stream_stop(struct camera_data *cam) { int ret; ret = cpia2_usb_stream_pause(cam); cam->streaming = 0; configure_transfer_mode(cam, 0); return ret; } /****************************************************************************** * * cpia2_usb_probe * * Probe and initialize. *****************************************************************************/ static int cpia2_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_interface_descriptor *interface; struct camera_data *cam; int ret; /* A multi-config CPiA2 camera? */ if (udev->descriptor.bNumConfigurations != 1) return -ENODEV; interface = &intf->cur_altsetting->desc; /* If we get to this point, we found a CPiA2 camera */ LOG("CPiA2 USB camera found\n"); if((cam = cpia2_init_camera_struct()) == NULL) return -ENOMEM; cam->dev = udev; cam->iface = interface->bInterfaceNumber; ret = set_alternate(cam, USBIF_CMDONLY); if (ret < 0) { ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret); kfree(cam); return ret; } if ((ret = cpia2_register_camera(cam)) < 0) { ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret); kfree(cam); return ret; } if((ret = cpia2_init_camera(cam)) < 0) { ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret); cpia2_unregister_camera(cam); kfree(cam); return ret; } LOG(" CPiA Version: %d.%02d (%d.%d)\n", cam->params.version.firmware_revision_hi, cam->params.version.firmware_revision_lo, cam->params.version.asic_id, cam->params.version.asic_rev); LOG(" CPiA PnP-ID: %04x:%04x:%04x\n", cam->params.pnp_id.vendor, cam->params.pnp_id.product, cam->params.pnp_id.device_revision); LOG(" SensorID: %d.(version %d)\n", cam->params.version.sensor_flags, cam->params.version.sensor_rev); usb_set_intfdata(intf, cam); return 0; } /****************************************************************************** * * cpia2_disconnect * *****************************************************************************/ static void cpia2_usb_disconnect(struct usb_interface *intf) { struct camera_data *cam = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); cam->present = 0; DBG("Stopping stream\n"); cpia2_usb_stream_stop(cam); DBG("Unregistering camera\n"); cpia2_unregister_camera(cam); if(cam->buffers) { DBG("Wakeup waiting processes\n"); cam->curbuff->status = FRAME_READY; cam->curbuff->length = 0; if (waitqueue_active(&cam->wq_stream)) wake_up_interruptible(&cam->wq_stream); } DBG("Releasing interface\n"); usb_driver_release_interface(&cpia2_driver, intf); if (cam->open_count == 0) { DBG("Freeing camera structure\n"); kfree(cam); } LOG("CPiA2 camera disconnected.\n"); } /****************************************************************************** * * usb_cpia2_init * *****************************************************************************/ int cpia2_usb_init(void) { return usb_register(&cpia2_driver); } /****************************************************************************** * * usb_cpia_cleanup * *****************************************************************************/ void cpia2_usb_cleanup(void) { schedule_timeout(2 * HZ); usb_deregister(&cpia2_driver); }
gpl-2.0
fat-tire/nook_tablet_kernel
drivers/staging/octeon/cvmx-helper-loop.c
4790
2679
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Functions for LOOP initialization, configuration, * and monitoring. */ #include <asm/octeon/octeon.h> #include "cvmx-config.h" #include "cvmx-helper.h" #include "cvmx-pip-defs.h" /** * Probe a LOOP interface and determine the number of ports * connected to it. The LOOP interface should still be down * after this call. * * @interface: Interface to probe * * Returns Number of ports on the interface. Zero to disable. */ int __cvmx_helper_loop_probe(int interface) { union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; int num_ports = 4; int port; /* We need to disable length checking so packet < 64 bytes and jumbo frames don't get errors */ for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } /* Disable FCS stripping for loopback ports */ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); ipd_sub_port_fcs.s.port_bit2 = 0; cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); return num_ports; } /** * Bringup and enable a LOOP interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @interface: Interface to bring up * * Returns Zero on success, negative on failure */ int __cvmx_helper_loop_enable(int interface) { /* Do nothing. */ return 0; }
gpl-2.0
ganjafuzz/PureKernel-v2-CAF
drivers/tty/serial/8250/8250_mca.c
7862
1336
/* * Copyright (C) 2005 Russell King. * Data taken from include/asm-i386/serial.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/mca.h> #include <linux/serial_8250.h> /* * FIXME: Should we be doing AUTO_IRQ here? */ #ifdef CONFIG_SERIAL_8250_DETECT_IRQ #define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ #else #define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST #endif #define PORT(_base,_irq) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = MCA_FLAGS, \ } static struct plat_serial8250_port mca_data[] = { PORT(0x3220, 3), PORT(0x3228, 3), PORT(0x4220, 3), PORT(0x4228, 3), PORT(0x5220, 3), PORT(0x5228, 3), { }, }; static struct platform_device mca_device = { .name = "serial8250", .id = PLAT8250_DEV_MCA, .dev = { .platform_data = mca_data, }, }; static int __init mca_init(void) { if (!MCA_bus) return -ENODEV; return platform_device_register(&mca_device); } module_init(mca_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("8250 serial probe module for MCA ports"); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_oppo_r819
drivers/staging/winbond/mds.c
8118
17881
#include "mds_f.h" #include "mto.h" #include "wbhal.h" #include "wb35tx_f.h" unsigned char Mds_initial(struct wbsoft_priv *adapter) { struct wb35_mds *pMds = &adapter->Mds; pMds->TxPause = false; pMds->TxRTSThreshold = DEFAULT_RTSThreshold; pMds->TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer); } static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *buffer) { struct T00_descriptor *pT00; struct T01_descriptor *pT01; u16 Duration, NextBodyLen, OffsetSize; u8 Rate, i; unsigned char CTS_on = false, RTS_on = false; struct T00_descriptor *pNextT00; u16 BodyLen = 0; unsigned char boGroupAddr = false; OffsetSize = pDes->FragmentThreshold + 32 + 3; OffsetSize &= ~0x03; Rate = pDes->TxRate >> 1; if (!Rate) Rate = 1; pT00 = (struct T00_descriptor *)buffer; pT01 = (struct T01_descriptor *)(buffer+4); pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize); if (buffer[DOT_11_DA_OFFSET+8] & 0x1) /* +8 for USB hdr */ boGroupAddr = true; /****************************************** * Set RTS/CTS mechanism ******************************************/ if (!boGroupAddr) { /* NOTE : If the protection mode is enabled and the MSDU will be fragmented, * the tx rates of MPDUs will all be DSSS rates. So it will not use * CTS-to-self in this case. CTS-To-self will only be used when without * fragmentation. -- 20050112 */ BodyLen = (u16)pT00->T00_frame_length; /* include 802.11 header */ BodyLen += 4; /* CRC */ if (BodyLen >= CURRENT_RTS_THRESHOLD) RTS_on = true; /* Using RTS */ else { if (pT01->T01_modulation_type) { /* Is using OFDM */ if (CURRENT_PROTECT_MECHANISM) /* Is using protect */ CTS_on = true; /* Using CTS */ } } } if (RTS_on || CTS_on) { if (pT01->T01_modulation_type) { /* Is using OFDM */ /* CTS duration * 2 SIFS + DATA transmit time + 1 ACK * ACK Rate : 24 Mega bps * ACK frame length = 14 bytes */ Duration = 2*DEFAULT_SIFSTIME + 2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION + ((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym + ((112 + 22 + 95)/96)*Tsym; } else { /* DSSS */ /* CTS duration * 2 SIFS + DATA transmit time + 1 ACK * Rate : ?? Mega bps * ACK frame length = 14 bytes */ if (pT01->T01_plcp_header_length) /* long preamble */ Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*2; else Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*2; Duration += (((BodyLen + 14)*8 + Rate-1) / Rate + DEFAULT_SIFSTIME*2); } if (RTS_on) { if (pT01->T01_modulation_type) { /* Is using OFDM */ /* CTS + 1 SIFS + CTS duration * CTS Rate : 24 Mega bps * CTS frame length = 14 bytes */ Duration += (DEFAULT_SIFSTIME + PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION + ((112 + 22 + 95)/96)*Tsym); } else { /* CTS + 1 SIFS + CTS duration * CTS Rate : ?? Mega bps * CTS frame length = 14 bytes */ if (pT01->T01_plcp_header_length) /* long preamble */ Duration += LONG_PREAMBLE_PLUS_PLCPHEADER_TIME; else Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME; Duration += (((112 + Rate-1) / Rate) + DEFAULT_SIFSTIME); } } /* Set the value into USB descriptor */ pT01->T01_add_rts = RTS_on ? 1 : 0; pT01->T01_add_cts = CTS_on ? 1 : 0; pT01->T01_rts_cts_duration = Duration; } /****************************************** * Fill the more fragment descriptor ******************************************/ if (boGroupAddr) Duration = 0; else { for (i = pDes->FragmentCount-1; i > 0; i--) { NextBodyLen = (u16)pNextT00->T00_frame_length; NextBodyLen += 4; /* CRC */ if (pT01->T01_modulation_type) { /* OFDM * data transmit time + 3 SIFS + 2 ACK * Rate : ??Mega bps * ACK frame length = 14 bytes, tx rate = 24M */ Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION * 3; Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)/(Rate*4)) * Tsym + (((2*14)*8 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME*3); } else { /* DSSS * data transmit time + 2 ACK + 3 SIFS * Rate : ??Mega bps * ACK frame length = 14 bytes * TODO : */ if (pT01->T01_plcp_header_length) /* long preamble */ Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*3; else Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*3; Duration += (((NextBodyLen + (2*14))*8 + Rate-1) / Rate + DEFAULT_SIFSTIME*3); } ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */ /* ----20061009 add by anson's endian */ pNextT00->value = cpu_to_le32(pNextT00->value); pT01->value = cpu_to_le32(pT01->value); /* ----end 20061009 add by anson's endian */ buffer += OffsetSize; pT01 = (struct T01_descriptor *)(buffer+4); if (i != 1) /* The last fragment will not have the next fragment */ pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize); } /******************************************* * Fill the last fragment descriptor *******************************************/ if (pT01->T01_modulation_type) { /* OFDM * 1 SIFS + 1 ACK * Rate : 24 Mega bps * ACK frame length = 14 bytes */ Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION; /* The Tx rate of ACK use 24M */ Duration += (((112 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME); } else { /* DSSS * 1 ACK + 1 SIFS * Rate : ?? Mega bps * ACK frame length = 14 bytes(112 bits) */ if (pT01->T01_plcp_header_length) /* long preamble */ Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME; else Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME; Duration += ((112 + Rate-1)/Rate + DEFAULT_SIFSTIME); } } ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */ pT00->value = cpu_to_le32(pT00->value); pT01->value = cpu_to_le32(pT01->value); /* --end 20061009 add */ } /* The function return the 4n size of usb pk */ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer) { struct T00_descriptor *pT00; struct wb35_mds *pMds = &adapter->Mds; u8 *buffer; u8 *src_buffer; u8 *pctmp; u16 Size = 0; u16 SizeLeft, CopySize, CopyLeft, stmp; u8 buf_index, FragmentCount = 0; /* Copy fragment body */ buffer = TargetBuffer; /* shift 8B usb + 24B 802.11 */ SizeLeft = pDes->buffer_total_size; buf_index = pDes->buffer_start_index; pT00 = (struct T00_descriptor *)buffer; while (SizeLeft) { pT00 = (struct T00_descriptor *)buffer; CopySize = SizeLeft; if (SizeLeft > pDes->FragmentThreshold) { CopySize = pDes->FragmentThreshold; pT00->T00_frame_length = 24 + CopySize; /* Set USB length */ } else pT00->T00_frame_length = 24 + SizeLeft; /* Set USB length */ SizeLeft -= CopySize; /* 1 Byte operation */ pctmp = (u8 *)(buffer + 8 + DOT_11_SEQUENCE_OFFSET); *pctmp &= 0xf0; *pctmp |= FragmentCount; /* 931130.5.m */ if (!FragmentCount) pT00->T00_first_mpdu = 1; buffer += 32; /* 8B usb + 24B 802.11 header */ Size += 32; /* Copy into buffer */ stmp = CopySize + 3; stmp &= ~0x03; /* 4n Alignment */ Size += stmp; /* Current 4n offset of mpdu */ while (CopySize) { /* Copy body */ src_buffer = pDes->buffer_address[buf_index]; CopyLeft = CopySize; if (CopySize >= pDes->buffer_size[buf_index]) { CopyLeft = pDes->buffer_size[buf_index]; /* Get the next buffer of descriptor */ buf_index++; buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX; } else { u8 *pctmp = pDes->buffer_address[buf_index]; pctmp += CopySize; pDes->buffer_address[buf_index] = pctmp; pDes->buffer_size[buf_index] -= CopySize; } memcpy(buffer, src_buffer, CopyLeft); buffer += CopyLeft; CopySize -= CopyLeft; } /* 931130.5.n */ if (pMds->MicAdd) { if (!SizeLeft) { pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - pMds->MicAdd; pMds->MicWriteSize[pMds->MicWriteIndex] = pMds->MicAdd; pMds->MicAdd = 0; } else if (SizeLeft < 8) { /* 931130.5.p */ pMds->MicAdd = SizeLeft; pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - (8 - SizeLeft); pMds->MicWriteSize[pMds->MicWriteIndex] = 8 - SizeLeft; pMds->MicWriteIndex++; } } /* Does it need to generate the new header for next mpdu? */ if (SizeLeft) { buffer = TargetBuffer + Size; /* Get the next 4n start address */ memcpy(buffer, TargetBuffer, 32); /* Copy 8B USB +24B 802.11 */ pT00 = (struct T00_descriptor *)buffer; pT00->T00_first_mpdu = 0; } FragmentCount++; } pT00->T00_last_mpdu = 1; pT00->T00_IsLastMpdu = 1; buffer = (u8 *)pT00 + 8; /* +8 for USB hdr */ buffer[1] &= ~0x04; /* Clear more frag bit of 802.11 frame control */ pDes->FragmentCount = FragmentCount; /* Update the correct fragment number */ return Size; } static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer) { struct wb35_mds *pMds = &adapter->Mds; u8 *src_buffer = pDes->buffer_address[0]; /* 931130.5.g */ struct T00_descriptor *pT00; struct T01_descriptor *pT01; u16 stmp; u8 i, ctmp1, ctmp2, ctmpf; u16 FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD; stmp = pDes->buffer_total_size; /* * Set USB header 8 byte */ pT00 = (struct T00_descriptor *)TargetBuffer; TargetBuffer += 4; pT01 = (struct T01_descriptor *)TargetBuffer; TargetBuffer += 4; pT00->value = 0; /* Clear */ pT01->value = 0; /* Clear */ pT00->T00_tx_packet_id = pDes->Descriptor_ID; /* Set packet ID */ pT00->T00_header_length = 24; /* Set header length */ pT01->T01_retry_abort_ebable = 1; /* 921013 931130.5.h */ /* Key ID setup */ pT01->T01_wep_id = 0; FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; /* Do not fragment */ /* Copy full data, the 1'st buffer contain all the data 931130.5.j */ memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE); /* Copy header */ pDes->buffer_address[0] = src_buffer + DOT_11_MAC_HEADER_SIZE; pDes->buffer_total_size -= DOT_11_MAC_HEADER_SIZE; pDes->buffer_size[0] = pDes->buffer_total_size; /* Set fragment threshold */ FragmentThreshold -= (DOT_11_MAC_HEADER_SIZE + 4); pDes->FragmentThreshold = FragmentThreshold; /* Set more frag bit */ TargetBuffer[1] |= 0x04; /* Set more frag bit */ /* * Set tx rate */ stmp = *(u16 *)(TargetBuffer+30); /* 2n alignment address */ /* Use basic rate */ ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG; pDes->TxRate = ctmp1; pr_debug("Tx rate =%x\n", ctmp1); pT01->T01_modulation_type = (ctmp1%3) ? 0 : 1; for (i = 0; i < 2; i++) { if (i == 1) ctmp1 = ctmpf; pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; /* backup the ta rate and fall back rate */ if (ctmp1 == 108) ctmp2 = 7; else if (ctmp1 == 96) ctmp2 = 6; /* Rate convert for USB */ else if (ctmp1 == 72) ctmp2 = 5; else if (ctmp1 == 48) ctmp2 = 4; else if (ctmp1 == 36) ctmp2 = 3; else if (ctmp1 == 24) ctmp2 = 2; else if (ctmp1 == 18) ctmp2 = 1; else if (ctmp1 == 12) ctmp2 = 0; else if (ctmp1 == 22) ctmp2 = 3; else if (ctmp1 == 11) ctmp2 = 2; else if (ctmp1 == 4) ctmp2 = 1; else ctmp2 = 0; /* if( ctmp1 == 2 ) or default */ if (i == 0) pT01->T01_transmit_rate = ctmp2; else pT01->T01_fall_back_rate = ctmp2; } /* * Set preamble type */ if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) /* RATE_1M */ pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG; else pDes->PreambleMode = CURRENT_PREAMBLE_MODE; pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */ } static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc) { desc->InternalUsed = desc->buffer_start_index + desc->buffer_number; desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX; desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU; desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len; desc->buffer_total_size += adapter->sMlmeFrame.len; desc->buffer_number++; desc->Type = adapter->sMlmeFrame.DataType; } static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData) { int i; /* Reclaim the data buffer */ for (i = 0; i < MAX_NUM_TX_MMPDU; i++) { if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i])) break; } if (adapter->sMlmeFrame.TxMMPDUInUse[i]) adapter->sMlmeFrame.TxMMPDUInUse[i] = false; else { /* Something wrong PD43 Add debug code here??? */ } } static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK) { /* Reclaim the data buffer */ adapter->sMlmeFrame.len = 0; MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU); /* Return resource */ adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE; } void Mds_Tx(struct wbsoft_priv *adapter) { struct hw_data *pHwData = &adapter->sHwData; struct wb35_mds *pMds = &adapter->Mds; struct wb35_descriptor TxDes; struct wb35_descriptor *pTxDes = &TxDes; u8 *XmitBufAddress; u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold; u8 FillIndex, TxDesIndex, FragmentCount, FillCount; unsigned char BufferFilled = false; if (pMds->TxPause) return; if (!hal_driver_init_OK(pHwData)) return; /* Only one thread can be run here */ if (atomic_inc_return(&pMds->TxThreadCount) != 1) goto cleanup; /* Start to fill the data */ do { FillIndex = pMds->TxFillIndex; if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */ pr_debug("[Mds_Tx] Tx Owner is H/W.\n"); break; } XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); /* Get buffer */ XmitBufSize = 0; FillCount = 0; do { PacketSize = adapter->sMlmeFrame.len; if (!PacketSize) break; /* For Check the buffer resource */ FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD; /* 931130.5.b */ FragmentCount = PacketSize/FragmentThreshold + 1; stmp = PacketSize + FragmentCount*32 + 8; /* 931130.5.c 8:MIC */ if ((XmitBufSize + stmp) >= MAX_USB_TX_BUFFER) { printk("[Mds_Tx] Excess max tx buffer.\n"); break; /* buffer is not enough */ } /* * Start transmitting */ BufferFilled = true; /* Leaves first u8 intact */ memset((u8 *)pTxDes + 1, 0, sizeof(struct wb35_descriptor) - 1); TxDesIndex = pMds->TxDesIndex; /* Get the current ID */ pTxDes->Descriptor_ID = TxDesIndex; pMds->TxDesFrom[TxDesIndex] = 2; /* Storing the information of source coming from */ pMds->TxDesIndex++; pMds->TxDesIndex %= MAX_USB_TX_DESCRIPTOR; MLME_GetNextPacket(adapter, pTxDes); /* Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type */ Mds_HeaderCopy(adapter, pTxDes, XmitBufAddress); /* For speed up Key setting */ if (pTxDes->EapFix) { pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize); pHwData->IsKeyPreSet = 1; } /* Copy (fragment) frame body, and set USB, 802.11 hdr flag */ CurrentSize = Mds_BodyCopy(adapter, pTxDes, XmitBufAddress); /* Set RTS/CTS and Normal duration field into buffer */ Mds_DurationSet(adapter, pTxDes, XmitBufAddress); /* Shift to the next address */ XmitBufSize += CurrentSize; XmitBufAddress += CurrentSize; /* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */ MLME_SendComplete(adapter, 0, true); /* Software TSC count 20060214 */ pMds->TxTsc++; if (pMds->TxTsc == 0) pMds->TxTsc_2++; FillCount++; /* 20060928 */ } while (HAL_USB_MODE_BURST(pHwData)); /* End of multiple MSDU copy loop. false = single true = multiple sending */ /* Move to the next one, if necessary */ if (BufferFilled) { /* size setting */ pMds->TxBufferSize[FillIndex] = XmitBufSize; /* 20060928 set Tx count */ pMds->TxCountInBuffer[FillIndex] = FillCount; /* Set owner flag */ pMds->TxOwner[FillIndex] = 1; pMds->TxFillIndex++; pMds->TxFillIndex %= MAX_USB_TX_BUFFER_NUMBER; BufferFilled = false; } else break; if (!PacketSize) /* No more pk for transmitting */ break; } while (true); /* * Start to send by lower module */ if (!pHwData->IsKeyPreSet) Wb35Tx_start(adapter); cleanup: atomic_dec(&pMds->TxThreadCount); } void Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pT02) { struct wb35_mds *pMds = &adapter->Mds; struct hw_data *pHwData = &adapter->sHwData; u8 PacketId = (u8)pT02->T02_Tx_PktID; unsigned char SendOK = true; u8 RetryCount, TxRate; if (pT02->T02_IgnoreResult) /* Don't care the result */ return; if (pT02->T02_IsLastMpdu) { /* TODO: DTO -- get the retry count and fragment count */ /* Tx rate */ TxRate = pMds->TxRate[PacketId][0]; RetryCount = (u8)pT02->T02_MPDU_Cnt; if (pT02->value & FLAG_ERROR_TX_MASK) { SendOK = false; if (pT02->T02_transmit_abort || pT02->T02_out_of_MaxTxMSDULiftTime) { /* retry error */ pHwData->dto_tx_retry_count += (RetryCount+1); /* [for tx debug] */ if (RetryCount < 7) pHwData->tx_retry_count[RetryCount] += RetryCount; else pHwData->tx_retry_count[7] += RetryCount; pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count); MTO_SetTxCount(adapter, TxRate, RetryCount); } pHwData->dto_tx_frag_count += (RetryCount+1); /* [for tx debug] */ if (pT02->T02_transmit_abort_due_to_TBTT) pHwData->tx_TBTT_start_count++; if (pT02->T02_transmit_without_encryption_due_to_wep_on_false) pHwData->tx_WepOn_false_count++; if (pT02->T02_discard_due_to_null_wep_key) pHwData->tx_Null_key_count++; } else { if (pT02->T02_effective_transmission_rate) pHwData->tx_ETR_count++; MTO_SetTxCount(adapter, TxRate, RetryCount); } /* Clear send result buffer */ pMds->TxResult[PacketId] = 0; } else pMds->TxResult[PacketId] |= ((u16)(pT02->value & 0x0ffff)); }
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_jflte
drivers/char/hw_random/amd-rng.c
10934
3702
/* * RNG driver for AMD RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <asm/io.h> #define PFX KBUILD_MODNAME ": " /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { { PCI_VDEVICE(AMD, 0x7443), 0, }, { PCI_VDEVICE(AMD, 0x746b), 0, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct pci_dev *amd_pdev; static int amd_rng_data_present(struct hwrng *rng, int wait) { u32 pmbase = (u32)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = !!(inl(pmbase + 0xF4) & 1); if (data || !wait) break; udelay(10); } return data; } static int amd_rng_data_read(struct hwrng *rng, u32 *data) { u32 pmbase = (u32)rng->priv; *data = inl(pmbase + 0xF0); return 4; } static int amd_rng_init(struct hwrng *rng) { u8 rnen; pci_read_config_byte(amd_pdev, 0x40, &rnen); rnen |= (1 << 7); /* RNG on */ pci_write_config_byte(amd_pdev, 0x40, rnen); pci_read_config_byte(amd_pdev, 0x41, &rnen); rnen |= (1 << 7); /* PMIO enable */ pci_write_config_byte(amd_pdev, 0x41, rnen); return 0; } static void amd_rng_cleanup(struct hwrng *rng) { u8 rnen; pci_read_config_byte(amd_pdev, 0x40, &rnen); rnen &= ~(1 << 7); /* RNG off */ pci_write_config_byte(amd_pdev, 0x40, rnen); } static struct hwrng amd_rng = { .name = "amd", .init = amd_rng_init, .cleanup = amd_rng_cleanup, .data_present = amd_rng_data_present, .data_read = amd_rng_data_read, }; static int __init mod_init(void) { int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; u32 pmbase; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); if (ent) goto found; } /* Device not found. */ goto out; found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) goto out; err = -EIO; pmbase &= 0x0000FF00; if (pmbase == 0) goto out; if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) { dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n", pmbase + 0xF0); err = -EBUSY; goto out; } amd_rng.priv = (unsigned long)pmbase; amd_pdev = pdev; printk(KERN_INFO "AMD768 RNG detected\n"); err = hwrng_register(&amd_rng); if (err) { printk(KERN_ERR PFX "RNG registering failed (%d)\n", err); release_region(pmbase + 0xF0, 8); goto out; } out: return err; } static void __exit mod_exit(void) { u32 pmbase = (unsigned long)amd_rng.priv; release_region(pmbase + 0xF0, 8); hwrng_unregister(&amd_rng); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("The Linux Kernel team"); MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets"); MODULE_LICENSE("GPL");
gpl-2.0
CryToCry96/android_kernel_huawei_msm7x27a
arch/x86/kernel/cpu/transmeta.c
11190
2945
#include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/msr.h> #include "cpu.h" static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) { u32 xlvl; /* Transmeta-defined flags: level 0x80860001 */ xlvl = cpuid_eax(0x80860000); if ((xlvl & 0xffff0000) == 0x80860000) { if (xlvl >= 0x80860001) c->x86_capability[2] = cpuid_edx(0x80860001); } } static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) { unsigned int cap_mask, uk, max, dummy; unsigned int cms_rev1, cms_rev2; unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; char cpu_info[65]; early_init_transmeta(c); cpu_detect_cache_sizes(c); /* Print CMS and CPU revision */ max = cpuid_eax(0x80860000); cpu_rev = 0; if (max >= 0x80860001) { cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); if (cpu_rev != 0x02000000) { printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", (cpu_rev >> 24) & 0xff, (cpu_rev >> 16) & 0xff, (cpu_rev >> 8) & 0xff, cpu_rev & 0xff, cpu_freq); } } if (max >= 0x80860002) { cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); if (cpu_rev == 0x02000000) { printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", new_cpu_rev, cpu_freq); } printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n", (cms_rev1 >> 24) & 0xff, (cms_rev1 >> 16) & 0xff, (cms_rev1 >> 8) & 0xff, cms_rev1 & 0xff, cms_rev2); } if (max >= 0x80860006) { cpuid(0x80860003, (void *)&cpu_info[0], (void *)&cpu_info[4], (void *)&cpu_info[8], (void *)&cpu_info[12]); cpuid(0x80860004, (void *)&cpu_info[16], (void *)&cpu_info[20], (void *)&cpu_info[24], (void *)&cpu_info[28]); cpuid(0x80860005, (void *)&cpu_info[32], (void *)&cpu_info[36], (void *)&cpu_info[40], (void *)&cpu_info[44]); cpuid(0x80860006, (void *)&cpu_info[48], (void *)&cpu_info[52], (void *)&cpu_info[56], (void *)&cpu_info[60]); cpu_info[64] = '\0'; printk(KERN_INFO "CPU: %s\n", cpu_info); } /* Unhide possibly hidden capability flags */ rdmsr(0x80860004, cap_mask, uk); wrmsr(0x80860004, ~0, uk); c->x86_capability[0] = cpuid_edx(0x00000001); wrmsr(0x80860004, cap_mask, uk); /* All Transmeta CPUs have a constant TSC */ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_SYSCTL /* * randomize_va_space slows us down enormously; * it probably triggers retranslation of x86->native bytecode */ randomize_va_space = 0; #endif } static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { .c_vendor = "Transmeta", .c_ident = { "GenuineTMx86", "TransmetaCPU" }, .c_early_init = early_init_transmeta, .c_init = init_transmeta, .c_x86_vendor = X86_VENDOR_TRANSMETA, }; cpu_dev_register(transmeta_cpu_dev);
gpl-2.0
issi5862/linux-3.17.3-nvdimm-journal
arch/sh/boards/mach-highlander/irq-r7780rp.c
13238
1586
/* * Renesas Solutions Highlander R7780RP-1 Support. * * Copyright (C) 2002 Atom Create Engineering Co., Ltd. * Copyright (C) 2006 Paul Mundt * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/highlander.h> enum { UNUSED = 0, /* board specific interrupt sources */ AX88796, /* Ethernet controller */ PSW, /* Push Switch */ CF, /* Compact Flash */ PCI_A, PCI_B, PCI_C, PCI_D, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(PCI_A, 65), /* dirty: overwrite cpu vectors for pci */ INTC_IRQ(PCI_B, 66), INTC_IRQ(PCI_C, 67), INTC_IRQ(PCI_D, 68), INTC_IRQ(CF, IRQ_CF), INTC_IRQ(PSW, IRQ_PSW), INTC_IRQ(AX88796, IRQ_AX88796), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa5000000, 0, 16, /* IRLMSK */ { PCI_A, PCI_B, PCI_C, PCI_D, CF, 0, 0, 0, 0, 0, 0, 0, 0, 0, PSW, AX88796 } }, }; static unsigned char irl2irq[HL_NR_IRL] __initdata = { 65, 66, 67, 68, IRQ_CF, 0, 0, 0, 0, 0, 0, 0, IRQ_AX88796, IRQ_PSW }; static DECLARE_INTC_DESC(intc_desc, "r7780rp", vectors, NULL, mask_registers, NULL, NULL); unsigned char * __init highlander_plat_irq_setup(void) { if (__raw_readw(0xa5000600)) { printk(KERN_INFO "Using r7780rp interrupt controller.\n"); register_intc_controller(&intc_desc); return irl2irq; } return NULL; }
gpl-2.0
Lloir/htc_kernel_oxp
drivers/video/tegra/dc/ext/cursor.c
183
4583
/* * drivers/video/tegra/dc/ext/cursor.c * * Copyright (c) 2011-2012, NVIDIA CORPORATION, All rights reserved. * * Author: Robert Morell <rmorell@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <video/tegra_dc_ext.h> #include "tegra_dc_ext_priv.h" /* ugh */ #include "../dc_priv.h" #include "../dc_reg.h" int tegra_dc_ext_get_cursor(struct tegra_dc_ext_user *user) { struct tegra_dc_ext *ext = user->ext; int ret = 0; mutex_lock(&ext->cursor.lock); if (!ext->cursor.user) ext->cursor.user = user; else if (ext->cursor.user != user) ret = -EBUSY; mutex_unlock(&ext->cursor.lock); return ret; } int tegra_dc_ext_put_cursor(struct tegra_dc_ext_user *user) { struct tegra_dc_ext *ext = user->ext; int ret = 0; mutex_lock(&ext->cursor.lock); if (ext->cursor.user == user) ext->cursor.user = 0; else ret = -EACCES; mutex_unlock(&ext->cursor.lock); return ret; } static void set_cursor_image_hw(struct tegra_dc *dc, struct tegra_dc_ext_cursor_image *args, dma_addr_t phys_addr) { tegra_dc_writel(dc, CURSOR_COLOR(args->foreground.r, args->foreground.g, args->foreground.b), DC_DISP_CURSOR_FOREGROUND); tegra_dc_writel(dc, CURSOR_COLOR(args->background.r, args->background.g, args->background.b), DC_DISP_CURSOR_BACKGROUND); BUG_ON(phys_addr & ~CURSOR_START_ADDR_MASK); tegra_dc_writel(dc, CURSOR_START_ADDR(((unsigned long) phys_addr)) | ((args->flags & TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) ? CURSOR_SIZE_64 : 0), DC_DISP_CURSOR_START_ADDR); } int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, struct tegra_dc_ext_cursor_image *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc *dc = ext->dc; struct nvmap_handle_ref *handle, *old_handle; dma_addr_t phys_addr; u32 size; int ret; if (!user->nvmap) return -EFAULT; size = args->flags & (TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 | TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64); if (size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32 && size != TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64) return -EINVAL; mutex_lock(&ext->cursor.lock); if (ext->cursor.user != user) { ret = -EACCES; goto unlock; } if (!ext->enabled) { ret = -ENXIO; goto unlock; } old_handle = ext->cursor.cur_handle; ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr); if (ret) goto unlock; ext->cursor.cur_handle = handle; mutex_lock(&dc->lock); set_cursor_image_hw(dc, args, phys_addr); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* XXX sync here? */ mutex_unlock(&dc->lock); mutex_unlock(&ext->cursor.lock); if (old_handle) { nvmap_unpin(ext->nvmap, old_handle); nvmap_free(ext->nvmap, old_handle); } return 0; unlock: mutex_unlock(&ext->cursor.lock); return ret; } int tegra_dc_ext_set_cursor(struct tegra_dc_ext_user *user, struct tegra_dc_ext_cursor *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc *dc = ext->dc; u32 win_options; bool enable; int ret; mutex_lock(&ext->cursor.lock); if (ext->cursor.user != user) { ret = -EACCES; goto unlock; } if (!ext->enabled) { ret = -ENXIO; goto unlock; } enable = !!(args->flags & TEGRA_DC_EXT_CURSOR_FLAGS_VISIBLE); mutex_lock(&dc->lock); win_options = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!!(win_options & CURSOR_ENABLE) != enable) { win_options &= ~CURSOR_ENABLE; if (enable) win_options |= CURSOR_ENABLE; tegra_dc_writel(dc, win_options, DC_DISP_DISP_WIN_OPTIONS); } tegra_dc_writel(dc, CURSOR_POSITION(args->x, args->y), DC_DISP_CURSOR_POSITION); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* TODO: need to sync here? hopefully can avoid this, but need to * figure out interaction w/ rest of GENERAL_ACT_REQ */ mutex_unlock(&dc->lock); mutex_unlock(&ext->cursor.lock); return 0; unlock: mutex_unlock(&ext->cursor.lock); return ret; }
gpl-2.0
jamison904/android_kernel_samsung_trlte
drivers/tty/ehv_bytechan.c
2231
23538
/* ePAPR hypervisor byte channel device driver * * Copyright 2009-2011 Freescale Semiconductor, Inc. * * Author: Timur Tabi <timur@freescale.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * This driver support three distinct interfaces, all of which are related to * ePAPR hypervisor byte channels. * * 1) An early-console (udbg) driver. This provides early console output * through a byte channel. The byte channel handle must be specified in a * Kconfig option. * * 2) A normal console driver. Output is sent to the byte channel designated * for stdout in the device tree. The console driver is for handling kernel * printk calls. * * 3) A tty driver, which is used to handle user-space input and output. The * byte channel used for the console is designated as the default tty. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/poll.h> #include <asm/epapr_hcalls.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/cdev.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/circ_buf.h> #include <asm/udbg.h> /* The size of the transmit circular buffer. This must be a power of two. */ #define BUF_SIZE 2048 /* Per-byte channel private data */ struct ehv_bc_data { struct device *dev; struct tty_port port; uint32_t handle; unsigned int rx_irq; unsigned int tx_irq; spinlock_t lock; /* lock for transmit buffer */ unsigned char buf[BUF_SIZE]; /* transmit circular buffer */ unsigned int head; /* circular buffer head */ unsigned int tail; /* circular buffer tail */ int tx_irq_enabled; /* true == TX interrupt is enabled */ }; /* Array of byte channel objects */ static struct ehv_bc_data *bcs; /* Byte channel handle for stdout (and stdin), taken from device tree */ static unsigned int stdout_bc; /* Virtual IRQ for the byte channel handle for stdin, taken from device tree */ static unsigned int stdout_irq; /**************************** SUPPORT FUNCTIONS ****************************/ /* * Enable the transmit interrupt * * Unlike a serial device, byte channels have no mechanism for disabling their * own receive or transmit interrupts. To emulate that feature, we toggle * the IRQ in the kernel. * * We cannot just blindly call enable_irq() or disable_irq(), because these * calls are reference counted. This means that we cannot call enable_irq() * if interrupts are already enabled. This can happen in two situations: * * 1. The tty layer makes two back-to-back calls to ehv_bc_tty_write() * 2. A transmit interrupt occurs while executing ehv_bc_tx_dequeue() * * To work around this, we keep a flag to tell us if the IRQ is enabled or not. */ static void enable_tx_interrupt(struct ehv_bc_data *bc) { if (!bc->tx_irq_enabled) { enable_irq(bc->tx_irq); bc->tx_irq_enabled = 1; } } static void disable_tx_interrupt(struct ehv_bc_data *bc) { if (bc->tx_irq_enabled) { disable_irq_nosync(bc->tx_irq); bc->tx_irq_enabled = 0; } } /* * find the byte channel handle to use for the console * * The byte channel to be used for the console is specified via a "stdout" * property in the /chosen node. * * For compatible with legacy device trees, we also look for a "stdout" alias. */ static int find_console_handle(void) { struct device_node *np, *np2; const char *sprop = NULL; const uint32_t *iprop; np = of_find_node_by_path("/chosen"); if (np) sprop = of_get_property(np, "stdout-path", NULL); if (!np || !sprop) { of_node_put(np); np = of_find_node_by_name(NULL, "aliases"); if (np) sprop = of_get_property(np, "stdout", NULL); } if (!sprop) { of_node_put(np); return 0; } /* We don't care what the aliased node is actually called. We only * care if it's compatible with "epapr,hv-byte-channel", because that * indicates that it's a byte channel node. We use a temporary * variable, 'np2', because we can't release 'np' until we're done with * 'sprop'. */ np2 = of_find_node_by_path(sprop); of_node_put(np); np = np2; if (!np) { pr_warning("ehv-bc: stdout node '%s' does not exist\n", sprop); return 0; } /* Is it a byte channel? */ if (!of_device_is_compatible(np, "epapr,hv-byte-channel")) { of_node_put(np); return 0; } stdout_irq = irq_of_parse_and_map(np, 0); if (stdout_irq == NO_IRQ) { pr_err("ehv-bc: no 'interrupts' property in %s node\n", sprop); of_node_put(np); return 0; } /* * The 'hv-handle' property contains the handle for this byte channel. */ iprop = of_get_property(np, "hv-handle", NULL); if (!iprop) { pr_err("ehv-bc: no 'hv-handle' property in %s node\n", np->name); of_node_put(np); return 0; } stdout_bc = be32_to_cpu(*iprop); of_node_put(np); return 1; } /*************************** EARLY CONSOLE DRIVER ***************************/ #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC /* * send a byte to a byte channel, wait if necessary * * This function sends a byte to a byte channel, and it waits and * retries if the byte channel is full. It returns if the character * has been sent, or if some error has occurred. * */ static void byte_channel_spin_send(const char data) { int ret, count; do { count = 1; ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, &count, &data); } while (ret == EV_EAGAIN); } /* * The udbg subsystem calls this function to display a single character. * We convert CR to a CR/LF. */ static void ehv_bc_udbg_putc(char c) { if (c == '\n') byte_channel_spin_send('\r'); byte_channel_spin_send(c); } /* * early console initialization * * PowerPC kernels support an early printk console, also known as udbg. * This function must be called via the ppc_md.init_early function pointer. * At this point, the device tree has been unflattened, so we can obtain the * byte channel handle for stdout. * * We only support displaying of characters (putc). We do not support * keyboard input. */ void __init udbg_init_ehv_bc(void) { unsigned int rx_count, tx_count; unsigned int ret; /* Verify the byte channel handle */ ret = ev_byte_channel_poll(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, &rx_count, &tx_count); if (ret) return; udbg_putc = ehv_bc_udbg_putc; register_early_udbg_console(); udbg_printf("ehv-bc: early console using byte channel handle %u\n", CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE); } #endif /****************************** CONSOLE DRIVER ******************************/ static struct tty_driver *ehv_bc_driver; /* * Byte channel console sending worker function. * * For consoles, if the output buffer is full, we should just spin until it * clears. */ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, unsigned int count) { unsigned int len; int ret = 0; while (count) { len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); do { ret = ev_byte_channel_send(handle, &len, s); } while (ret == EV_EAGAIN); count -= len; s += len; } return ret; } /* * write a string to the console * * This function gets called to write a string from the kernel, typically from * a printk(). This function spins until all data is written. * * We copy the data to a temporary buffer because we need to insert a \r in * front of every \n. It's more efficient to copy the data to the buffer than * it is to make multiple hcalls for each character or each newline. */ static void ehv_bc_console_write(struct console *co, const char *s, unsigned int count) { char s2[EV_BYTE_CHANNEL_MAX_BYTES]; unsigned int i, j = 0; char c; for (i = 0; i < count; i++) { c = *s++; if (c == '\n') s2[j++] = '\r'; s2[j++] = c; if (j >= (EV_BYTE_CHANNEL_MAX_BYTES - 1)) { if (ehv_bc_console_byte_channel_send(stdout_bc, s2, j)) return; j = 0; } } if (j) ehv_bc_console_byte_channel_send(stdout_bc, s2, j); } /* * When /dev/console is opened, the kernel iterates the console list looking * for one with ->device and then calls that method. On success, it expects * the passed-in int* to contain the minor number to use. */ static struct tty_driver *ehv_bc_console_device(struct console *co, int *index) { *index = co->index; return ehv_bc_driver; } static struct console ehv_bc_console = { .name = "ttyEHV", .write = ehv_bc_console_write, .device = ehv_bc_console_device, .flags = CON_PRINTBUFFER | CON_ENABLED, }; /* * Console initialization * * This is the first function that is called after the device tree is * available, so here is where we determine the byte channel handle and IRQ for * stdout/stdin, even though that information is used by the tty and character * drivers. */ static int __init ehv_bc_console_init(void) { if (!find_console_handle()) { pr_debug("ehv-bc: stdout is not a byte channel\n"); return -ENODEV; } #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC /* Print a friendly warning if the user chose the wrong byte channel * handle for udbg. */ if (stdout_bc != CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE) pr_warning("ehv-bc: udbg handle %u is not the stdout handle\n", CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE); #endif /* add_preferred_console() must be called before register_console(), otherwise it won't work. However, we don't want to enumerate all the byte channels here, either, since we only care about one. */ add_preferred_console(ehv_bc_console.name, ehv_bc_console.index, NULL); register_console(&ehv_bc_console); pr_info("ehv-bc: registered console driver for byte channel %u\n", stdout_bc); return 0; } console_initcall(ehv_bc_console_init); /******************************** TTY DRIVER ********************************/ /* * byte channel receive interupt handler * * This ISR is called whenever data is available on a byte channel. */ static irqreturn_t ehv_bc_tty_rx_isr(int irq, void *data) { struct ehv_bc_data *bc = data; unsigned int rx_count, tx_count, len; int count; char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; int ret; /* Find out how much data needs to be read, and then ask the TTY layer * if it can handle that much. We want to ensure that every byte we * read from the byte channel will be accepted by the TTY layer. */ ev_byte_channel_poll(bc->handle, &rx_count, &tx_count); count = tty_buffer_request_room(&bc->port, rx_count); /* 'count' is the maximum amount of data the TTY layer can accept at * this time. However, during testing, I was never able to get 'count' * to be less than 'rx_count'. I'm not sure whether I'm calling it * correctly. */ while (count > 0) { len = min_t(unsigned int, count, sizeof(buffer)); /* Read some data from the byte channel. This function will * never return more than EV_BYTE_CHANNEL_MAX_BYTES bytes. */ ev_byte_channel_receive(bc->handle, &len, buffer); /* 'len' is now the amount of data that's been received. 'len' * can't be zero, and most likely it's equal to one. */ /* Pass the received data to the tty layer. */ ret = tty_insert_flip_string(&bc->port, buffer, len); /* 'ret' is the number of bytes that the TTY layer accepted. * If it's not equal to 'len', then it means the buffer is * full, which should never happen. If it does happen, we can * exit gracefully, but we drop the last 'len - ret' characters * that we read from the byte channel. */ if (ret != len) break; count -= len; } /* Tell the tty layer that we're done. */ tty_flip_buffer_push(&bc->port); return IRQ_HANDLED; } /* * dequeue the transmit buffer to the hypervisor * * This function, which can be called in interrupt context, dequeues as much * data as possible from the transmit buffer to the byte channel. */ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) { unsigned int count; unsigned int len, ret; unsigned long flags; do { spin_lock_irqsave(&bc->lock, flags); len = min_t(unsigned int, CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), EV_BYTE_CHANNEL_MAX_BYTES); ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ if (!ret || (ret == EV_EAGAIN)) bc->tail = (bc->tail + len) & (BUF_SIZE - 1); count = CIRC_CNT(bc->head, bc->tail, BUF_SIZE); spin_unlock_irqrestore(&bc->lock, flags); } while (count && !ret); spin_lock_irqsave(&bc->lock, flags); if (CIRC_CNT(bc->head, bc->tail, BUF_SIZE)) /* * If we haven't emptied the buffer, then enable the TX IRQ. * We'll get an interrupt when there's more room in the * hypervisor's output buffer. */ enable_tx_interrupt(bc); else disable_tx_interrupt(bc); spin_unlock_irqrestore(&bc->lock, flags); } /* * byte channel transmit interupt handler * * This ISR is called whenever space becomes available for transmitting * characters on a byte channel. */ static irqreturn_t ehv_bc_tty_tx_isr(int irq, void *data) { struct ehv_bc_data *bc = data; ehv_bc_tx_dequeue(bc); tty_port_tty_wakeup(&bc->port); return IRQ_HANDLED; } /* * This function is called when the tty layer has data for us send. We store * the data first in a circular buffer, and then dequeue as much of that data * as possible. * * We don't need to worry about whether there is enough room in the buffer for * all the data. The purpose of ehv_bc_tty_write_room() is to tell the tty * layer how much data it can safely send to us. We guarantee that * ehv_bc_tty_write_room() will never lie, so the tty layer will never send us * too much data. */ static int ehv_bc_tty_write(struct tty_struct *ttys, const unsigned char *s, int count) { struct ehv_bc_data *bc = ttys->driver_data; unsigned long flags; unsigned int len; unsigned int written = 0; while (1) { spin_lock_irqsave(&bc->lock, flags); len = CIRC_SPACE_TO_END(bc->head, bc->tail, BUF_SIZE); if (count < len) len = count; if (len) { memcpy(bc->buf + bc->head, s, len); bc->head = (bc->head + len) & (BUF_SIZE - 1); } spin_unlock_irqrestore(&bc->lock, flags); if (!len) break; s += len; count -= len; written += len; } ehv_bc_tx_dequeue(bc); return written; } /* * This function can be called multiple times for a given tty_struct, which is * why we initialize bc->ttys in ehv_bc_tty_port_activate() instead. * * The tty layer will still call this function even if the device was not * registered (i.e. tty_register_device() was not called). This happens * because tty_register_device() is optional and some legacy drivers don't * use it. So we need to check for that. */ static int ehv_bc_tty_open(struct tty_struct *ttys, struct file *filp) { struct ehv_bc_data *bc = &bcs[ttys->index]; if (!bc->dev) return -ENODEV; return tty_port_open(&bc->port, ttys, filp); } /* * Amazingly, if ehv_bc_tty_open() returns an error code, the tty layer will * still call this function to close the tty device. So we can't assume that * the tty port has been initialized. */ static void ehv_bc_tty_close(struct tty_struct *ttys, struct file *filp) { struct ehv_bc_data *bc = &bcs[ttys->index]; if (bc->dev) tty_port_close(&bc->port, ttys, filp); } /* * Return the amount of space in the output buffer * * This is actually a contract between the driver and the tty layer outlining * how much write room the driver can guarantee will be sent OR BUFFERED. This * driver MUST honor the return value. */ static int ehv_bc_tty_write_room(struct tty_struct *ttys) { struct ehv_bc_data *bc = ttys->driver_data; unsigned long flags; int count; spin_lock_irqsave(&bc->lock, flags); count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE); spin_unlock_irqrestore(&bc->lock, flags); return count; } /* * Stop sending data to the tty layer * * This function is called when the tty layer's input buffers are getting full, * so the driver should stop sending it data. The easiest way to do this is to * disable the RX IRQ, which will prevent ehv_bc_tty_rx_isr() from being * called. * * The hypervisor will continue to queue up any incoming data. If there is any * data in the queue when the RX interrupt is enabled, we'll immediately get an * RX interrupt. */ static void ehv_bc_tty_throttle(struct tty_struct *ttys) { struct ehv_bc_data *bc = ttys->driver_data; disable_irq(bc->rx_irq); } /* * Resume sending data to the tty layer * * This function is called after previously calling ehv_bc_tty_throttle(). The * tty layer's input buffers now have more room, so the driver can resume * sending it data. */ static void ehv_bc_tty_unthrottle(struct tty_struct *ttys) { struct ehv_bc_data *bc = ttys->driver_data; /* If there is any data in the queue when the RX interrupt is enabled, * we'll immediately get an RX interrupt. */ enable_irq(bc->rx_irq); } static void ehv_bc_tty_hangup(struct tty_struct *ttys) { struct ehv_bc_data *bc = ttys->driver_data; ehv_bc_tx_dequeue(bc); tty_port_hangup(&bc->port); } /* * TTY driver operations * * If we could ask the hypervisor how much data is still in the TX buffer, or * at least how big the TX buffers are, then we could implement the * .wait_until_sent and .chars_in_buffer functions. */ static const struct tty_operations ehv_bc_ops = { .open = ehv_bc_tty_open, .close = ehv_bc_tty_close, .write = ehv_bc_tty_write, .write_room = ehv_bc_tty_write_room, .throttle = ehv_bc_tty_throttle, .unthrottle = ehv_bc_tty_unthrottle, .hangup = ehv_bc_tty_hangup, }; /* * initialize the TTY port * * This function will only be called once, no matter how many times * ehv_bc_tty_open() is called. That's why we register the ISR here, and also * why we initialize tty_struct-related variables here. */ static int ehv_bc_tty_port_activate(struct tty_port *port, struct tty_struct *ttys) { struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port); int ret; ttys->driver_data = bc; ret = request_irq(bc->rx_irq, ehv_bc_tty_rx_isr, 0, "ehv-bc", bc); if (ret < 0) { dev_err(bc->dev, "could not request rx irq %u (ret=%i)\n", bc->rx_irq, ret); return ret; } /* request_irq also enables the IRQ */ bc->tx_irq_enabled = 1; ret = request_irq(bc->tx_irq, ehv_bc_tty_tx_isr, 0, "ehv-bc", bc); if (ret < 0) { dev_err(bc->dev, "could not request tx irq %u (ret=%i)\n", bc->tx_irq, ret); free_irq(bc->rx_irq, bc); return ret; } /* The TX IRQ is enabled only when we can't write all the data to the * byte channel at once, so by default it's disabled. */ disable_tx_interrupt(bc); return 0; } static void ehv_bc_tty_port_shutdown(struct tty_port *port) { struct ehv_bc_data *bc = container_of(port, struct ehv_bc_data, port); free_irq(bc->tx_irq, bc); free_irq(bc->rx_irq, bc); } static const struct tty_port_operations ehv_bc_tty_port_ops = { .activate = ehv_bc_tty_port_activate, .shutdown = ehv_bc_tty_port_shutdown, }; static int ehv_bc_tty_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct ehv_bc_data *bc; const uint32_t *iprop; unsigned int handle; int ret; static unsigned int index = 1; unsigned int i; iprop = of_get_property(np, "hv-handle", NULL); if (!iprop) { dev_err(&pdev->dev, "no 'hv-handle' property in %s node\n", np->name); return -ENODEV; } /* We already told the console layer that the index for the console * device is zero, so we need to make sure that we use that index when * we probe the console byte channel node. */ handle = be32_to_cpu(*iprop); i = (handle == stdout_bc) ? 0 : index++; bc = &bcs[i]; bc->handle = handle; bc->head = 0; bc->tail = 0; spin_lock_init(&bc->lock); bc->rx_irq = irq_of_parse_and_map(np, 0); bc->tx_irq = irq_of_parse_and_map(np, 1); if ((bc->rx_irq == NO_IRQ) || (bc->tx_irq == NO_IRQ)) { dev_err(&pdev->dev, "no 'interrupts' property in %s node\n", np->name); ret = -ENODEV; goto error; } tty_port_init(&bc->port); bc->port.ops = &ehv_bc_tty_port_ops; bc->dev = tty_port_register_device(&bc->port, ehv_bc_driver, i, &pdev->dev); if (IS_ERR(bc->dev)) { ret = PTR_ERR(bc->dev); dev_err(&pdev->dev, "could not register tty (ret=%i)\n", ret); goto error; } dev_set_drvdata(&pdev->dev, bc); dev_info(&pdev->dev, "registered /dev/%s%u for byte channel %u\n", ehv_bc_driver->name, i, bc->handle); return 0; error: tty_port_destroy(&bc->port); irq_dispose_mapping(bc->tx_irq); irq_dispose_mapping(bc->rx_irq); memset(bc, 0, sizeof(struct ehv_bc_data)); return ret; } static int ehv_bc_tty_remove(struct platform_device *pdev) { struct ehv_bc_data *bc = dev_get_drvdata(&pdev->dev); tty_unregister_device(ehv_bc_driver, bc - bcs); tty_port_destroy(&bc->port); irq_dispose_mapping(bc->tx_irq); irq_dispose_mapping(bc->rx_irq); return 0; } static const struct of_device_id ehv_bc_tty_of_ids[] = { { .compatible = "epapr,hv-byte-channel" }, {} }; static struct platform_driver ehv_bc_tty_driver = { .driver = { .owner = THIS_MODULE, .name = "ehv-bc", .of_match_table = ehv_bc_tty_of_ids, }, .probe = ehv_bc_tty_probe, .remove = ehv_bc_tty_remove, }; /** * ehv_bc_init - ePAPR hypervisor byte channel driver initialization * * This function is called when this module is loaded. */ static int __init ehv_bc_init(void) { struct device_node *np; unsigned int count = 0; /* Number of elements in bcs[] */ int ret; pr_info("ePAPR hypervisor byte channel driver\n"); /* Count the number of byte channels */ for_each_compatible_node(np, NULL, "epapr,hv-byte-channel") count++; if (!count) return -ENODEV; /* The array index of an element in bcs[] is the same as the tty index * for that element. If you know the address of an element in the * array, then you can use pointer math (e.g. "bc - bcs") to get its * tty index. */ bcs = kzalloc(count * sizeof(struct ehv_bc_data), GFP_KERNEL); if (!bcs) return -ENOMEM; ehv_bc_driver = alloc_tty_driver(count); if (!ehv_bc_driver) { ret = -ENOMEM; goto error; } ehv_bc_driver->driver_name = "ehv-bc"; ehv_bc_driver->name = ehv_bc_console.name; ehv_bc_driver->type = TTY_DRIVER_TYPE_CONSOLE; ehv_bc_driver->subtype = SYSTEM_TYPE_CONSOLE; ehv_bc_driver->init_termios = tty_std_termios; ehv_bc_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(ehv_bc_driver, &ehv_bc_ops); ret = tty_register_driver(ehv_bc_driver); if (ret) { pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); goto error; } ret = platform_driver_register(&ehv_bc_tty_driver); if (ret) { pr_err("ehv-bc: could not register platform driver (ret=%i)\n", ret); goto error; } return 0; error: if (ehv_bc_driver) { tty_unregister_driver(ehv_bc_driver); put_tty_driver(ehv_bc_driver); } kfree(bcs); return ret; } /** * ehv_bc_exit - ePAPR hypervisor byte channel driver termination * * This function is called when this driver is unloaded. */ static void __exit ehv_bc_exit(void) { platform_driver_unregister(&ehv_bc_tty_driver); tty_unregister_driver(ehv_bc_driver); put_tty_driver(ehv_bc_driver); kfree(bcs); } module_init(ehv_bc_init); module_exit(ehv_bc_exit); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("ePAPR hypervisor byte channel driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
intel-linux-graphics/drm-intel
arch/mips/pci/ops-bcm63xx.c
2231
12348
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/io.h> #include "pci-bcm63xx.h" /* * swizzle 32bits data to return only the needed part */ static int postprocess_read(u32 data, int where, unsigned int size) { u32 ret; ret = 0; switch (size) { case 1: ret = (data >> ((where & 3) << 3)) & 0xff; break; case 2: ret = (data >> ((where & 3) << 3)) & 0xffff; break; case 4: ret = data; break; } return ret; } static int preprocess_write(u32 orig_data, u32 val, int where, unsigned int size) { u32 ret; ret = 0; switch (size) { case 1: ret = (orig_data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); break; case 2: ret = (orig_data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); break; case 4: ret = val; break; } return ret; } /* * setup hardware for a configuration cycle with given parameters */ static int bcm63xx_setup_cfg_access(int type, unsigned int busn, unsigned int devfn, int where) { unsigned int slot, func, reg; u32 val; slot = PCI_SLOT(devfn); func = PCI_FUNC(devfn); reg = where >> 2; /* sanity check */ if (slot > (MPI_L2PCFG_DEVNUM_MASK >> MPI_L2PCFG_DEVNUM_SHIFT)) return 1; if (func > (MPI_L2PCFG_FUNC_MASK >> MPI_L2PCFG_FUNC_SHIFT)) return 1; if (reg > (MPI_L2PCFG_REG_MASK >> MPI_L2PCFG_REG_SHIFT)) return 1; /* ok, setup config access */ val = (reg << MPI_L2PCFG_REG_SHIFT); val |= (func << MPI_L2PCFG_FUNC_SHIFT); val |= (slot << MPI_L2PCFG_DEVNUM_SHIFT); val |= MPI_L2PCFG_CFG_USEREG_MASK; val |= MPI_L2PCFG_CFG_SEL_MASK; /* type 0 cycle for local bus, type 1 cycle for anything else */ if (type != 0) { /* FIXME: how to specify bus ??? */ val |= (1 << MPI_L2PCFG_CFG_TYPE_SHIFT); } bcm_mpi_writel(val, MPI_L2PCFG_REG); return 0; } static int bcm63xx_do_cfg_read(int type, unsigned int busn, unsigned int devfn, int where, int size, u32 *val) { u32 data; /* two phase cycle, first we write address, then read data at * another location, caller already has a spinlock so no need * to add one here */ if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) return PCIBIOS_DEVICE_NOT_FOUND; iob(); data = le32_to_cpu(__raw_readl(pci_iospace_start)); /* restore IO space normal behaviour */ bcm_mpi_writel(0, MPI_L2PCFG_REG); *val = postprocess_read(data, where, size); return PCIBIOS_SUCCESSFUL; } static int bcm63xx_do_cfg_write(int type, unsigned int busn, unsigned int devfn, int where, int size, u32 val) { u32 data; /* two phase cycle, first we write address, then write data to * another location, caller already has a spinlock so no need * to add one here */ if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) return PCIBIOS_DEVICE_NOT_FOUND; iob(); data = le32_to_cpu(__raw_readl(pci_iospace_start)); data = preprocess_write(data, val, where, size); __raw_writel(cpu_to_le32(data), pci_iospace_start); wmb(); /* no way to know the access is done, we have to wait */ udelay(500); /* restore IO space normal behaviour */ bcm_mpi_writel(0, MPI_L2PCFG_REG); return PCIBIOS_SUCCESSFUL; } static int bcm63xx_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { int type; type = bus->parent ? 1 : 0; if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) return PCIBIOS_DEVICE_NOT_FOUND; return bcm63xx_do_cfg_read(type, bus->number, devfn, where, size, val); } static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { int type; type = bus->parent ? 1 : 0; if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) return PCIBIOS_DEVICE_NOT_FOUND; return bcm63xx_do_cfg_write(type, bus->number, devfn, where, size, val); } struct pci_ops bcm63xx_pci_ops = { .read = bcm63xx_pci_read, .write = bcm63xx_pci_write }; #ifdef CONFIG_CARDBUS /* * emulate configuration read access on a cardbus bridge */ #define FAKE_CB_BRIDGE_SLOT 0x1e static int fake_cb_bridge_bus_number = -1; static struct { u16 pci_command; u8 cb_latency; u8 subordinate_busn; u8 cardbus_busn; u8 pci_busn; int bus_assigned; u16 bridge_control; u32 mem_base0; u32 mem_limit0; u32 mem_base1; u32 mem_limit1; u32 io_base0; u32 io_limit0; u32 io_base1; u32 io_limit1; } fake_cb_bridge_regs; static int fake_cb_bridge_read(int where, int size, u32 *val) { unsigned int reg; u32 data; data = 0; reg = where >> 2; switch (reg) { case (PCI_VENDOR_ID >> 2): case (PCI_CB_SUBSYSTEM_VENDOR_ID >> 2): /* create dummy vendor/device id from our cpu id */ data = (bcm63xx_get_cpu_id() << 16) | PCI_VENDOR_ID_BROADCOM; break; case (PCI_COMMAND >> 2): data = (PCI_STATUS_DEVSEL_SLOW << 16); data |= fake_cb_bridge_regs.pci_command; break; case (PCI_CLASS_REVISION >> 2): data = (PCI_CLASS_BRIDGE_CARDBUS << 16); break; case (PCI_CACHE_LINE_SIZE >> 2): data = (PCI_HEADER_TYPE_CARDBUS << 16); break; case (PCI_INTERRUPT_LINE >> 2): /* bridge control */ data = (fake_cb_bridge_regs.bridge_control << 16); /* pin:intA line:0xff */ data |= (0x1 << 8) | 0xff; break; case (PCI_CB_PRIMARY_BUS >> 2): data = (fake_cb_bridge_regs.cb_latency << 24); data |= (fake_cb_bridge_regs.subordinate_busn << 16); data |= (fake_cb_bridge_regs.cardbus_busn << 8); data |= fake_cb_bridge_regs.pci_busn; break; case (PCI_CB_MEMORY_BASE_0 >> 2): data = fake_cb_bridge_regs.mem_base0; break; case (PCI_CB_MEMORY_LIMIT_0 >> 2): data = fake_cb_bridge_regs.mem_limit0; break; case (PCI_CB_MEMORY_BASE_1 >> 2): data = fake_cb_bridge_regs.mem_base1; break; case (PCI_CB_MEMORY_LIMIT_1 >> 2): data = fake_cb_bridge_regs.mem_limit1; break; case (PCI_CB_IO_BASE_0 >> 2): /* | 1 for 32bits io support */ data = fake_cb_bridge_regs.io_base0 | 0x1; break; case (PCI_CB_IO_LIMIT_0 >> 2): data = fake_cb_bridge_regs.io_limit0; break; case (PCI_CB_IO_BASE_1 >> 2): /* | 1 for 32bits io support */ data = fake_cb_bridge_regs.io_base1 | 0x1; break; case (PCI_CB_IO_LIMIT_1 >> 2): data = fake_cb_bridge_regs.io_limit1; break; } *val = postprocess_read(data, where, size); return PCIBIOS_SUCCESSFUL; } /* * emulate configuration write access on a cardbus bridge */ static int fake_cb_bridge_write(int where, int size, u32 val) { unsigned int reg; u32 data, tmp; int ret; ret = fake_cb_bridge_read((where & ~0x3), 4, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; data = preprocess_write(data, val, where, size); reg = where >> 2; switch (reg) { case (PCI_COMMAND >> 2): fake_cb_bridge_regs.pci_command = (data & 0xffff); break; case (PCI_CB_PRIMARY_BUS >> 2): fake_cb_bridge_regs.cb_latency = (data >> 24) & 0xff; fake_cb_bridge_regs.subordinate_busn = (data >> 16) & 0xff; fake_cb_bridge_regs.cardbus_busn = (data >> 8) & 0xff; fake_cb_bridge_regs.pci_busn = data & 0xff; if (fake_cb_bridge_regs.cardbus_busn) fake_cb_bridge_regs.bus_assigned = 1; break; case (PCI_INTERRUPT_LINE >> 2): tmp = (data >> 16) & 0xffff; /* disable memory prefetch support */ tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; fake_cb_bridge_regs.bridge_control = tmp; break; case (PCI_CB_MEMORY_BASE_0 >> 2): fake_cb_bridge_regs.mem_base0 = data; break; case (PCI_CB_MEMORY_LIMIT_0 >> 2): fake_cb_bridge_regs.mem_limit0 = data; break; case (PCI_CB_MEMORY_BASE_1 >> 2): fake_cb_bridge_regs.mem_base1 = data; break; case (PCI_CB_MEMORY_LIMIT_1 >> 2): fake_cb_bridge_regs.mem_limit1 = data; break; case (PCI_CB_IO_BASE_0 >> 2): fake_cb_bridge_regs.io_base0 = data; break; case (PCI_CB_IO_LIMIT_0 >> 2): fake_cb_bridge_regs.io_limit0 = data; break; case (PCI_CB_IO_BASE_1 >> 2): fake_cb_bridge_regs.io_base1 = data; break; case (PCI_CB_IO_LIMIT_1 >> 2): fake_cb_bridge_regs.io_limit1 = data; break; } return PCIBIOS_SUCCESSFUL; } static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { /* snoop access to slot 0x1e on root bus, we fake a cardbus * bridge at this location */ if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { fake_cb_bridge_bus_number = bus->number; return fake_cb_bridge_read(where, size, val); } /* a configuration cycle for the device behind the cardbus * bridge is actually done as a type 0 cycle on the primary * bus. This means that only one device can be on the cardbus * bus */ if (fake_cb_bridge_regs.bus_assigned && bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(devfn) == 0) return bcm63xx_do_cfg_read(0, 0, PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), where, size, val); return PCIBIOS_DEVICE_NOT_FOUND; } static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { fake_cb_bridge_bus_number = bus->number; return fake_cb_bridge_write(where, size, val); } if (fake_cb_bridge_regs.bus_assigned && bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(devfn) == 0) return bcm63xx_do_cfg_write(0, 0, PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), where, size, val); return PCIBIOS_DEVICE_NOT_FOUND; } struct pci_ops bcm63xx_cb_ops = { .read = bcm63xx_cb_read, .write = bcm63xx_cb_write, }; /* * only one IO window, so it cannot be shared by PCI and cardbus, use * fixup to choose and detect unhandled configuration */ static void bcm63xx_fixup(struct pci_dev *dev) { static int io_window = -1; int i, found, new_io_window; u32 val; /* look for any io resource */ found = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO) { found = 1; break; } } if (!found) return; /* skip our fake bus with only cardbus bridge on it */ if (dev->bus->number == fake_cb_bridge_bus_number) return; /* find on which bus the device is */ if (fake_cb_bridge_regs.bus_assigned && dev->bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(dev->devfn) == 0) new_io_window = 1; else new_io_window = 0; if (new_io_window == io_window) return; if (io_window != -1) { printk(KERN_ERR "bcm63xx: both PCI and cardbus devices " "need IO, which hardware cannot do\n"); return; } printk(KERN_INFO "bcm63xx: PCI IO window assigned to %s\n", (new_io_window == 0) ? "PCI" : "cardbus"); val = bcm_mpi_readl(MPI_L2PIOREMAP_REG); if (io_window) val |= MPI_L2PREMAP_IS_CARDBUS_MASK; else val &= ~MPI_L2PREMAP_IS_CARDBUS_MASK; bcm_mpi_writel(val, MPI_L2PIOREMAP_REG); io_window = new_io_window; } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup); #endif static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn) { switch (bus->number) { case PCIE_BUS_BRIDGE: return (PCI_SLOT(devfn) == 0); case PCIE_BUS_DEVICE: if (PCI_SLOT(devfn) == 0) return bcm_pcie_readl(PCIE_DLSTATUS_REG) & DLSTATUS_PHYLINKUP; default: return false; } } static int bcm63xx_pcie_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 data; u32 reg = where & ~3; if (!bcm63xx_pcie_can_access(bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == PCIE_BUS_DEVICE) reg += PCIE_DEVICE_OFFSET; data = bcm_pcie_readl(reg); *val = postprocess_read(data, where, size); return PCIBIOS_SUCCESSFUL; } static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 data; u32 reg = where & ~3; if (!bcm63xx_pcie_can_access(bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == PCIE_BUS_DEVICE) reg += PCIE_DEVICE_OFFSET; data = bcm_pcie_readl(reg); data = preprocess_write(data, val, where, size); bcm_pcie_writel(data, reg); return PCIBIOS_SUCCESSFUL; } struct pci_ops bcm63xx_pcie_ops = { .read = bcm63xx_pcie_read, .write = bcm63xx_pcie_write };
gpl-2.0
sandymanu/sandy_oneplus2_msm8994
drivers/gpio/gpio-rc5t583.c
2487
5357
/* * GPIO driver for RICOH583 power management chip. * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * Author: Laxman dewangan <ldewangan@nvidia.com> * * Based on code * Copyright (C) 2011 RICOH COMPANY,LTD * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/mfd/rc5t583.h> struct rc5t583_gpio { struct gpio_chip gpio_chip; struct rc5t583 *rc5t583; }; static inline struct rc5t583_gpio *to_rc5t583_gpio(struct gpio_chip *chip) { return container_of(chip, struct rc5t583_gpio, gpio_chip); } static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; uint8_t val = 0; int ret; ret = rc5t583_read(parent, RC5T583_GPIO_MON_IOIN, &val); if (ret < 0) return ret; return !!(val & BIT(offset)); } static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; if (val) rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset)); else rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset)); } static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; int ret; ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset)); if (ret < 0) return ret; /* Set pin to gpio mode */ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset)); } static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset, int value) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; int ret; rc5t583_gpio_set(gc, offset, value); ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset)); if (ret < 0) return ret; /* Set pin to gpio mode */ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset)); } static int rc5t583_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); if ((offset >= 0) && (offset < 8)) return rc5t583_gpio->rc5t583->irq_base + RC5T583_IRQ_GPIO0 + offset; return -EINVAL; } static void rc5t583_gpio_free(struct gpio_chip *gc, unsigned offset) { struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc); struct device *parent = rc5t583_gpio->rc5t583->dev; rc5t583_set_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset)); } static int rc5t583_gpio_probe(struct platform_device *pdev) { struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent); struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev); struct rc5t583_gpio *rc5t583_gpio; rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio), GFP_KERNEL); if (!rc5t583_gpio) { dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed"); return -ENOMEM; } rc5t583_gpio->gpio_chip.label = "gpio-rc5t583", rc5t583_gpio->gpio_chip.owner = THIS_MODULE, rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free, rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input, rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output, rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set, rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get, rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq, rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO, rc5t583_gpio->gpio_chip.can_sleep = 1, rc5t583_gpio->gpio_chip.dev = &pdev->dev; rc5t583_gpio->gpio_chip.base = -1; rc5t583_gpio->rc5t583 = rc5t583; if (pdata && pdata->gpio_base) rc5t583_gpio->gpio_chip.base = pdata->gpio_base; platform_set_drvdata(pdev, rc5t583_gpio); return gpiochip_add(&rc5t583_gpio->gpio_chip); } static int rc5t583_gpio_remove(struct platform_device *pdev) { struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev); return gpiochip_remove(&rc5t583_gpio->gpio_chip); } static struct platform_driver rc5t583_gpio_driver = { .driver = { .name = "rc5t583-gpio", .owner = THIS_MODULE, }, .probe = rc5t583_gpio_probe, .remove = rc5t583_gpio_remove, }; static int __init rc5t583_gpio_init(void) { return platform_driver_register(&rc5t583_gpio_driver); } subsys_initcall(rc5t583_gpio_init); static void __exit rc5t583_gpio_exit(void) { platform_driver_unregister(&rc5t583_gpio_driver); } module_exit(rc5t583_gpio_exit); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_DESCRIPTION("GPIO interface for RC5T583"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:rc5t583-gpio");
gpl-2.0
jyunyen/Nexus7_Kernal
fs/nfs/callback_xdr.c
2743
25269
/* * linux/fs/nfs/callback_xdr.c * * Copyright (C) 2004 Trond Myklebust * * NFSv4 callback encode/decode procedures */ #include <linux/kernel.h> #include <linux/sunrpc/svc.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/slab.h> #include <linux/sunrpc/bc_xprt.h> #include "nfs4_fs.h" #include "callback.h" #include "internal.h" #define CB_OP_TAGLEN_MAXSZ (512) #define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) #define CB_OP_GETATTR_BITMAP_MAXSZ (4) #define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ CB_OP_GETATTR_BITMAP_MAXSZ + \ 2 + 2 + 3 + 3) #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #if defined(CONFIG_NFS_V4_1) #define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 4 + 1 + 3) #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #endif /* CONFIG_NFS_V4_1 */ #define NFSDBG_FACILITY NFSDBG_CALLBACK /* Internal error code */ #define NFS4ERR_RESOURCE_HDR 11050 typedef __be32 (*callback_process_op_t)(void *, void *, struct cb_process_state *); typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); struct callback_op { callback_process_op_t process_op; callback_decode_arg_t decode_args; callback_encode_res_t encode_res; long res_maxsize; }; static struct callback_op callback_ops[]; static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp) { return htonl(NFS4_OK); } static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) { __be32 *p; p = xdr_inline_decode(xdr, nbytes); if (unlikely(p == NULL)) printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n"); return p; } static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str) { __be32 *p; p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *len = ntohl(*p); if (*len != 0) { p = read_buf(xdr, *len); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *str = (const char *)p; } else *str = NULL; return 0; } static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p; p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); fh->size = ntohl(*p); if (fh->size > NFS4_FHSIZE) return htonl(NFS4ERR_BADHANDLE); p = read_buf(xdr, fh->size); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); memcpy(&fh->data[0], p, fh->size); memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size); return 0; } static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) { __be32 *p; unsigned int attrlen; p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); attrlen = ntohl(*p); p = read_buf(xdr, attrlen << 2); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); if (likely(attrlen > 0)) bitmap[0] = ntohl(*p++); if (attrlen > 1) bitmap[1] = ntohl(*p); return 0; } static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) { __be32 *p; p = read_buf(xdr, 16); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); memcpy(stateid->data, p, 16); return 0; } static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr) { __be32 *p; __be32 status; status = decode_string(xdr, &hdr->taglen, &hdr->tag); if (unlikely(status != 0)) return status; /* We do not like overly long tags! */ if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) { printk("NFSv4 CALLBACK %s: client sent tag of length %u\n", __func__, hdr->taglen); return htonl(NFS4ERR_RESOURCE); } p = read_buf(xdr, 12); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); hdr->minorversion = ntohl(*p++); /* Check minor version is zero or one. */ if (hdr->minorversion <= 1) { hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ } else { printk(KERN_WARNING "%s: NFSv4 server callback with " "illegal minor version %u!\n", __func__, hdr->minorversion); return htonl(NFS4ERR_MINOR_VERS_MISMATCH); } hdr->nops = ntohl(*p); dprintk("%s: minorversion %d nops %d\n", __func__, hdr->minorversion, hdr->nops); return 0; } static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) { __be32 *p; p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE_HDR); *op = ntohl(*p); return 0; } static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args) { __be32 status; status = decode_fh(xdr, &args->fh); if (unlikely(status != 0)) goto out; args->addr = svc_addr(rqstp); status = decode_bitmap(xdr, args->bitmap); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; } static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args) { __be32 *p; __be32 status; args->addr = svc_addr(rqstp); status = decode_stateid(xdr, &args->stateid); if (unlikely(status != 0)) goto out; p = read_buf(xdr, 4); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_RESOURCE); goto out; } args->truncate = ntohl(*p); status = decode_fh(xdr, &args->fh); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; } #if defined(CONFIG_NFS_V4_1) static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_layoutrecallargs *args) { __be32 *p; __be32 status = 0; uint32_t iomode; args->cbl_addr = svc_addr(rqstp); p = read_buf(xdr, 4 * sizeof(uint32_t)); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto out; } args->cbl_layout_type = ntohl(*p++); /* Depite the spec's xdr, iomode really belongs in the FILE switch, * as it is unusable and ignored with the other types. */ iomode = ntohl(*p++); args->cbl_layoutchanged = ntohl(*p++); args->cbl_recall_type = ntohl(*p++); if (args->cbl_recall_type == RETURN_FILE) { args->cbl_range.iomode = iomode; status = decode_fh(xdr, &args->cbl_fh); if (unlikely(status != 0)) goto out; p = read_buf(xdr, 2 * sizeof(uint64_t)); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto out; } p = xdr_decode_hyper(p, &args->cbl_range.offset); p = xdr_decode_hyper(p, &args->cbl_range.length); status = decode_stateid(xdr, &args->cbl_stateid); if (unlikely(status != 0)) goto out; } else if (args->cbl_recall_type == RETURN_FSID) { p = read_buf(xdr, 2 * sizeof(uint64_t)); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto out; } p = xdr_decode_hyper(p, &args->cbl_fsid.major); p = xdr_decode_hyper(p, &args->cbl_fsid.minor); } else if (args->cbl_recall_type != RETURN_ALL) { status = htonl(NFS4ERR_BADXDR); goto out; } dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n", __func__, args->cbl_layout_type, iomode, args->cbl_layoutchanged, args->cbl_recall_type); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; } static __be32 decode_devicenotify_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_devicenotifyargs *args) { __be32 *p; __be32 status = 0; u32 tmp; int n, i; args->ndevs = 0; /* Num of device notifications */ p = read_buf(xdr, sizeof(uint32_t)); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto out; } n = ntohl(*p++); if (n <= 0) goto out; args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { status = htonl(NFS4ERR_DELAY); goto out; } /* Decode each dev notification */ for (i = 0; i < n; i++) { struct cb_devicenotifyitem *dev = &args->devs[i]; p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto err; } tmp = ntohl(*p++); /* bitmap size */ if (tmp != 1) { status = htonl(NFS4ERR_INVAL); goto err; } dev->cbd_notify_type = ntohl(*p++); if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE && dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) { status = htonl(NFS4ERR_INVAL); goto err; } tmp = ntohl(*p++); /* opaque size */ if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) && (tmp != NFS4_DEVICEID4_SIZE + 8)) || ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) && (tmp != NFS4_DEVICEID4_SIZE + 4))) { status = htonl(NFS4ERR_INVAL); goto err; } dev->cbd_layout_type = ntohl(*p++); memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE); p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) { p = read_buf(xdr, sizeof(uint32_t)); if (unlikely(p == NULL)) { status = htonl(NFS4ERR_BADXDR); goto err; } dev->cbd_immediate = ntohl(*p++); } else { dev->cbd_immediate = 0; } args->ndevs++; dprintk("%s: type %d layout 0x%x immediate %d\n", __func__, dev->cbd_notify_type, dev->cbd_layout_type, dev->cbd_immediate); } out: dprintk("%s: status %d ndevs %d\n", __func__, ntohl(status), args->ndevs); return status; err: kfree(args->devs); goto out; } static __be32 decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid) { __be32 *p; int len = NFS4_MAX_SESSIONID_LEN; p = read_buf(xdr, len); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); memcpy(sid->data, p, len); return 0; } static __be32 decode_rc_list(struct xdr_stream *xdr, struct referring_call_list *rc_list) { __be32 *p; int i; __be32 status; status = decode_sessionid(xdr, &rc_list->rcl_sessionid); if (status) goto out; status = htonl(NFS4ERR_RESOURCE); p = read_buf(xdr, sizeof(uint32_t)); if (unlikely(p == NULL)) goto out; rc_list->rcl_nrefcalls = ntohl(*p++); if (rc_list->rcl_nrefcalls) { p = read_buf(xdr, rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); if (unlikely(p == NULL)) goto out; rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * sizeof(*rc_list->rcl_refcalls), GFP_KERNEL); if (unlikely(rc_list->rcl_refcalls == NULL)) goto out; for (i = 0; i < rc_list->rcl_nrefcalls; i++) { rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++); rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++); } } status = 0; out: return status; } static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_sequenceargs *args) { __be32 *p; int i; __be32 status; status = decode_sessionid(xdr, &args->csa_sessionid); if (status) goto out; status = htonl(NFS4ERR_RESOURCE); p = read_buf(xdr, 5 * sizeof(uint32_t)); if (unlikely(p == NULL)) goto out; args->csa_addr = svc_addr(rqstp); args->csa_sequenceid = ntohl(*p++); args->csa_slotid = ntohl(*p++); args->csa_highestslotid = ntohl(*p++); args->csa_cachethis = ntohl(*p++); args->csa_nrclists = ntohl(*p++); args->csa_rclists = NULL; if (args->csa_nrclists) { args->csa_rclists = kmalloc(args->csa_nrclists * sizeof(*args->csa_rclists), GFP_KERNEL); if (unlikely(args->csa_rclists == NULL)) goto out; for (i = 0; i < args->csa_nrclists; i++) { status = decode_rc_list(xdr, &args->csa_rclists[i]); if (status) goto out_free; } } status = 0; dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u " "highestslotid %u cachethis %d nrclists %u\n", __func__, ((u32 *)&args->csa_sessionid)[0], ((u32 *)&args->csa_sessionid)[1], ((u32 *)&args->csa_sessionid)[2], ((u32 *)&args->csa_sessionid)[3], args->csa_sequenceid, args->csa_slotid, args->csa_highestslotid, args->csa_cachethis, args->csa_nrclists); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; out_free: for (i = 0; i < args->csa_nrclists; i++) kfree(args->csa_rclists[i].rcl_refcalls); kfree(args->csa_rclists); goto out; } static __be32 decode_recallany_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallanyargs *args) { __be32 *p; args->craa_addr = svc_addr(rqstp); p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_BADXDR); args->craa_objs_to_keep = ntohl(*p++); p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_BADXDR); args->craa_type_mask = ntohl(*p); return 0; } static __be32 decode_recallslot_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallslotargs *args) { __be32 *p; args->crsa_addr = svc_addr(rqstp); p = read_buf(xdr, 4); if (unlikely(p == NULL)) return htonl(NFS4ERR_BADXDR); args->crsa_target_max_slots = ntohl(*p++); return 0; } #endif /* CONFIG_NFS_V4_1 */ static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { __be32 *p; p = xdr_reserve_space(xdr, 4 + len); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); xdr_encode_opaque(p, str, len); return 0; } #define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) #define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, __be32 **savep) { __be32 bm[2]; __be32 *p; bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0); bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1); if (bm[1] != 0) { p = xdr_reserve_space(xdr, 16); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *p++ = htonl(2); *p++ = bm[0]; *p++ = bm[1]; } else if (bm[0] != 0) { p = xdr_reserve_space(xdr, 12); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *p++ = htonl(1); *p++ = bm[0]; } else { p = xdr_reserve_space(xdr, 8); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *p++ = htonl(0); } *savep = p; return 0; } static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change) { __be32 *p; if (!(bitmap[0] & FATTR4_WORD0_CHANGE)) return 0; p = xdr_reserve_space(xdr, 8); if (unlikely(!p)) return htonl(NFS4ERR_RESOURCE); p = xdr_encode_hyper(p, change); return 0; } static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size) { __be32 *p; if (!(bitmap[0] & FATTR4_WORD0_SIZE)) return 0; p = xdr_reserve_space(xdr, 8); if (unlikely(!p)) return htonl(NFS4ERR_RESOURCE); p = xdr_encode_hyper(p, size); return 0; } static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec *time) { __be32 *p; p = xdr_reserve_space(xdr, 12); if (unlikely(!p)) return htonl(NFS4ERR_RESOURCE); p = xdr_encode_hyper(p, time->tv_sec); *p = htonl(time->tv_nsec); return 0; } static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) { if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) return 0; return encode_attr_time(xdr,time); } static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time) { if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) return 0; return encode_attr_time(xdr,time); } static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr) { __be32 status; hdr->status = xdr_reserve_space(xdr, 4); if (unlikely(hdr->status == NULL)) return htonl(NFS4ERR_RESOURCE); status = encode_string(xdr, hdr->taglen, hdr->tag); if (unlikely(status != 0)) return status; hdr->nops = xdr_reserve_space(xdr, 4); if (unlikely(hdr->nops == NULL)) return htonl(NFS4ERR_RESOURCE); return 0; } static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res) { __be32 *p; p = xdr_reserve_space(xdr, 8); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE_HDR); *p++ = htonl(op); *p = res; return 0; } static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res) { __be32 *savep = NULL; __be32 status = res->status; if (unlikely(status != 0)) goto out; status = encode_attr_bitmap(xdr, res->bitmap, &savep); if (unlikely(status != 0)) goto out; status = encode_attr_change(xdr, res->bitmap, res->change_attr); if (unlikely(status != 0)) goto out; status = encode_attr_size(xdr, res->bitmap, res->size); if (unlikely(status != 0)) goto out; status = encode_attr_ctime(xdr, res->bitmap, &res->ctime); if (unlikely(status != 0)) goto out; status = encode_attr_mtime(xdr, res->bitmap, &res->mtime); *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1))); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; } #if defined(CONFIG_NFS_V4_1) static __be32 encode_sessionid(struct xdr_stream *xdr, const struct nfs4_sessionid *sid) { __be32 *p; int len = NFS4_MAX_SESSIONID_LEN; p = xdr_reserve_space(xdr, len); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); memcpy(p, sid, len); return 0; } static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_sequenceres *res) { __be32 *p; unsigned status = res->csr_status; if (unlikely(status != 0)) goto out; encode_sessionid(xdr, &res->csr_sessionid); p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t)); if (unlikely(p == NULL)) return htonl(NFS4ERR_RESOURCE); *p++ = htonl(res->csr_sequenceid); *p++ = htonl(res->csr_slotid); *p++ = htonl(res->csr_highestslotid); *p++ = htonl(res->csr_target_highestslotid); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; } static __be32 preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) { if (op_nr == OP_CB_SEQUENCE) { if (nop != 0) return htonl(NFS4ERR_SEQUENCE_POS); } else { if (nop == 0) return htonl(NFS4ERR_OP_NOT_IN_SESSION); } switch (op_nr) { case OP_CB_GETATTR: case OP_CB_RECALL: case OP_CB_SEQUENCE: case OP_CB_RECALL_ANY: case OP_CB_RECALL_SLOT: case OP_CB_LAYOUTRECALL: case OP_CB_NOTIFY_DEVICEID: *op = &callback_ops[op_nr]; break; case OP_CB_NOTIFY: case OP_CB_PUSH_DELEG: case OP_CB_RECALLABLE_OBJ_AVAIL: case OP_CB_WANTS_CANCELLED: case OP_CB_NOTIFY_LOCK: return htonl(NFS4ERR_NOTSUPP); default: return htonl(NFS4ERR_OP_ILLEGAL); } return htonl(NFS_OK); } static void nfs4_callback_free_slot(struct nfs4_session *session) { struct nfs4_slot_table *tbl = &session->bc_slot_table; spin_lock(&tbl->slot_tbl_lock); /* * Let the state manager know callback processing done. * A single slot, so highest used slotid is either 0 or -1 */ tbl->highest_used_slotid = -1; nfs4_check_drain_bc_complete(session); spin_unlock(&tbl->slot_tbl_lock); } static void nfs4_cb_free_slot(struct cb_process_state *cps) { if (cps->slotid != -1) nfs4_callback_free_slot(cps->clp->cl_session); } #else /* CONFIG_NFS_V4_1 */ static __be32 preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) { return htonl(NFS4ERR_MINOR_VERS_MISMATCH); } static void nfs4_cb_free_slot(struct cb_process_state *cps) { } #endif /* CONFIG_NFS_V4_1 */ static __be32 preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) { switch (op_nr) { case OP_CB_GETATTR: case OP_CB_RECALL: *op = &callback_ops[op_nr]; break; default: return htonl(NFS4ERR_OP_ILLEGAL); } return htonl(NFS_OK); } static __be32 process_op(uint32_t minorversion, int nop, struct svc_rqst *rqstp, struct xdr_stream *xdr_in, void *argp, struct xdr_stream *xdr_out, void *resp, struct cb_process_state *cps) { struct callback_op *op = &callback_ops[0]; unsigned int op_nr; __be32 status; long maxlen; __be32 res; dprintk("%s: start\n", __func__); status = decode_op_hdr(xdr_in, &op_nr); if (unlikely(status)) return status; dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", __func__, minorversion, nop, op_nr); status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) : preprocess_nfs4_op(op_nr, &op); if (status == htonl(NFS4ERR_OP_ILLEGAL)) op_nr = OP_CB_ILLEGAL; if (status) goto encode_hdr; if (cps->drc_status) { status = cps->drc_status; goto encode_hdr; } maxlen = xdr_out->end - xdr_out->p; if (maxlen > 0 && maxlen < PAGE_SIZE) { status = op->decode_args(rqstp, xdr_in, argp); if (likely(status == 0)) status = op->process_op(argp, resp, cps); } else status = htonl(NFS4ERR_RESOURCE); encode_hdr: res = encode_op_hdr(xdr_out, op_nr, status); if (unlikely(res)) return res; if (op->encode_res != NULL && status == 0) status = op->encode_res(rqstp, xdr_out, resp); dprintk("%s: done, status = %d\n", __func__, ntohl(status)); return status; } /* * Decode, process and encode a COMPOUND */ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp) { struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; __be32 *p, status; struct cb_process_state cps = { .drc_status = 0, .clp = NULL, .slotid = -1, }; unsigned int nops = 0; dprintk("%s: start\n", __func__); xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); xdr_init_encode(&xdr_out, &rqstp->rq_res, p); status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); if (status == __constant_htonl(NFS4ERR_RESOURCE)) return rpc_garbage_args; if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) return rpc_drop_reply; } hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) return rpc_system_err; while (status == 0 && nops != hdr_arg.nops) { status = process_op(hdr_arg.minorversion, nops, rqstp, &xdr_in, argp, &xdr_out, resp, &cps); nops++; } /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return * resource error in cb_compound status without returning op */ if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { status = htonl(NFS4ERR_RESOURCE); nops--; } *hdr_res.status = status; *hdr_res.nops = htonl(nops); nfs4_cb_free_slot(&cps); nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; } /* * Define NFS4 callback COMPOUND ops. */ static struct callback_op callback_ops[] = { [0] = { .res_maxsize = CB_OP_HDR_RES_MAXSZ, }, [OP_CB_GETATTR] = { .process_op = (callback_process_op_t)nfs4_callback_getattr, .decode_args = (callback_decode_arg_t)decode_getattr_args, .encode_res = (callback_encode_res_t)encode_getattr_res, .res_maxsize = CB_OP_GETATTR_RES_MAXSZ, }, [OP_CB_RECALL] = { .process_op = (callback_process_op_t)nfs4_callback_recall, .decode_args = (callback_decode_arg_t)decode_recall_args, .res_maxsize = CB_OP_RECALL_RES_MAXSZ, }, #if defined(CONFIG_NFS_V4_1) [OP_CB_LAYOUTRECALL] = { .process_op = (callback_process_op_t)nfs4_callback_layoutrecall, .decode_args = (callback_decode_arg_t)decode_layoutrecall_args, .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ, }, [OP_CB_NOTIFY_DEVICEID] = { .process_op = (callback_process_op_t)nfs4_callback_devicenotify, .decode_args = (callback_decode_arg_t)decode_devicenotify_args, .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ, }, [OP_CB_SEQUENCE] = { .process_op = (callback_process_op_t)nfs4_callback_sequence, .decode_args = (callback_decode_arg_t)decode_cb_sequence_args, .encode_res = (callback_encode_res_t)encode_cb_sequence_res, .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ, }, [OP_CB_RECALL_ANY] = { .process_op = (callback_process_op_t)nfs4_callback_recallany, .decode_args = (callback_decode_arg_t)decode_recallany_args, .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, }, [OP_CB_RECALL_SLOT] = { .process_op = (callback_process_op_t)nfs4_callback_recallslot, .decode_args = (callback_decode_arg_t)decode_recallslot_args, .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, }, #endif /* CONFIG_NFS_V4_1 */ }; /* * Define NFS4 callback procedures */ static struct svc_procedure nfs4_callback_procedures1[] = { [CB_NULL] = { .pc_func = nfs4_callback_null, .pc_decode = (kxdrproc_t)nfs4_decode_void, .pc_encode = (kxdrproc_t)nfs4_encode_void, .pc_xdrressize = 1, }, [CB_COMPOUND] = { .pc_func = nfs4_callback_compound, .pc_encode = (kxdrproc_t)nfs4_encode_void, .pc_argsize = 256, .pc_ressize = 256, .pc_xdrressize = NFS4_CALLBACK_BUFSIZE, } }; struct svc_version nfs4_callback_version1 = { .vs_vers = 1, .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), .vs_proc = nfs4_callback_procedures1, .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, .vs_dispatch = NULL, .vs_hidden = 1, }; struct svc_version nfs4_callback_version4 = { .vs_vers = 4, .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1), .vs_proc = nfs4_callback_procedures1, .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, .vs_dispatch = NULL, };
gpl-2.0
parheliamm/i939u2
drivers/staging/comedi/drivers/daqboard2000.c
3255
27245
/* comedi/drivers/daqboard2000.c hardware driver for IOtech DAQboard/2000 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: daqboard2000 Description: IOTech DAQBoard/2000 Author: Anders Blomdell <anders.blomdell@control.lth.se> Status: works Updated: Mon, 14 Apr 2008 15:28:52 +0100 Devices: [IOTech] DAQBoard/2000 (daqboard2000) Much of the functionality of this driver was determined from reading the source code for the Windows driver. The FPGA on the board requires initialization code, which can be loaded by comedi_config using the -i option. The initialization code is available from http://www.comedi.org in the comedi_nonfree_firmware tarball. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ /* This card was obviously never intended to leave the Windows world, since it lacked all kind of hardware documentation (except for cable pinouts, plug and pray has something to catch up with yet). With some help from our swedish distributor, we got the Windows sourcecode for the card, and here are the findings so far. 1. A good document that describes the PCI interface chip is 9080db-106.pdf available from http://www.plxtech.com/products/io/pci9080 2. The initialization done so far is: a. program the FPGA (windows code sans a lot of error messages) b. 3. Analog out seems to work OK with DAC's disabled, if DAC's are enabled, you have to output values to all enabled DAC's until result appears, I guess that it has something to do with pacer clocks, but the source gives me no clues. I'll keep it simple so far. 4. Analog in. Each channel in the scanlist seems to be controlled by four control words: Word0: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Word1: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | +------+------+ | | | | +-- Digital input (??) | | | | +---- 10 us settling time | | | +------ Suspend acquisition (last to scan) | | +-------- Simultaneous sample and hold | +---------- Signed data format +------------------------- Correction offset low Word2: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | | +-----+ +--+--+ +++ +++ +--+--+ | | | | +----- Expansion channel | | | +----------- Expansion gain | | +--------------- Channel (low) | +--------------------- Correction offset high +----------------------------- Correction gain low Word3: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | +------+------+ | | +-+-+ | | +-- Low bank enable | | | | | +---- High bank enable | | | | +------ Hi/low select | | | +---------- Gain (1,?,2,4,8,16,32,64) | | +-------------- differential/single ended | +---------------- Unipolar +------------------------- Correction gain high 999. The card seems to have an incredible amount of capabilities, but trying to reverse engineer them from the Windows source is beyond my patience. */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "comedi_pci.h" #include "8255.h" #define DAQBOARD2000_SUBSYSTEM_IDS2 0x00021616 /* Daqboard/2000 - 2 Dacs */ #define DAQBOARD2000_SUBSYSTEM_IDS4 0x00041616 /* Daqboard/2000 - 4 Dacs */ #define DAQBOARD2000_DAQ_SIZE 0x1002 #define DAQBOARD2000_PLX_SIZE 0x100 /* Initialization bits for the Serial EEPROM Control Register */ #define DAQBOARD2000_SECRProgPinHi 0x8001767e #define DAQBOARD2000_SECRProgPinLo 0x8000767e #define DAQBOARD2000_SECRLocalBusHi 0xc000767e #define DAQBOARD2000_SECRLocalBusLo 0x8000767e #define DAQBOARD2000_SECRReloadHi 0xa000767e #define DAQBOARD2000_SECRReloadLo 0x8000767e /* SECR status bits */ #define DAQBOARD2000_EEPROM_PRESENT 0x10000000 /* CPLD status bits */ #define DAQBOARD2000_CPLD_INIT 0x0002 #define DAQBOARD2000_CPLD_DONE 0x0004 /* Available ranges */ static const struct comedi_lrange range_daqboard2000_ai = { 13, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25), RANGE(-0.625, 0.625), RANGE(-0.3125, 0.3125), RANGE(-0.156, 0.156), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25), RANGE(0, 0.625), RANGE(0, 0.3125) } }; static const struct comedi_lrange range_daqboard2000_ao = { 1, { RANGE(-10, 10) } }; struct daqboard2000_hw { volatile u16 acqControl; /* 0x00 */ volatile u16 acqScanListFIFO; /* 0x02 */ volatile u32 acqPacerClockDivLow; /* 0x04 */ volatile u16 acqScanCounter; /* 0x08 */ volatile u16 acqPacerClockDivHigh; /* 0x0a */ volatile u16 acqTriggerCount; /* 0x0c */ volatile u16 fill2; /* 0x0e */ volatile u16 acqResultsFIFO; /* 0x10 */ volatile u16 fill3; /* 0x12 */ volatile u16 acqResultsShadow; /* 0x14 */ volatile u16 fill4; /* 0x16 */ volatile u16 acqAdcResult; /* 0x18 */ volatile u16 fill5; /* 0x1a */ volatile u16 dacScanCounter; /* 0x1c */ volatile u16 fill6; /* 0x1e */ volatile u16 dacControl; /* 0x20 */ volatile u16 fill7; /* 0x22 */ volatile s16 dacFIFO; /* 0x24 */ volatile u16 fill8[2]; /* 0x26 */ volatile u16 dacPacerClockDiv; /* 0x2a */ volatile u16 refDacs; /* 0x2c */ volatile u16 fill9; /* 0x2e */ volatile u16 dioControl; /* 0x30 */ volatile s16 dioP3hsioData; /* 0x32 */ volatile u16 dioP3Control; /* 0x34 */ volatile u16 calEepromControl; /* 0x36 */ volatile s16 dacSetting[4]; /* 0x38 */ volatile s16 dioP2ExpansionIO8Bit[32]; /* 0x40 */ volatile u16 ctrTmrControl; /* 0x80 */ volatile u16 fill10[3]; /* 0x82 */ volatile s16 ctrInput[4]; /* 0x88 */ volatile u16 fill11[8]; /* 0x90 */ volatile u16 timerDivisor[2]; /* 0xa0 */ volatile u16 fill12[6]; /* 0xa4 */ volatile u16 dmaControl; /* 0xb0 */ volatile u16 trigControl; /* 0xb2 */ volatile u16 fill13[2]; /* 0xb4 */ volatile u16 calEeprom; /* 0xb8 */ volatile u16 acqDigitalMark; /* 0xba */ volatile u16 trigDacs; /* 0xbc */ volatile u16 fill14; /* 0xbe */ volatile s16 dioP2ExpansionIO16Bit[32]; /* 0xc0 */ }; /* Scan Sequencer programming */ #define DAQBOARD2000_SeqStartScanList 0x0011 #define DAQBOARD2000_SeqStopScanList 0x0010 /* Prepare for acquisition */ #define DAQBOARD2000_AcqResetScanListFifo 0x0004 #define DAQBOARD2000_AcqResetResultsFifo 0x0002 #define DAQBOARD2000_AcqResetConfigPipe 0x0001 /* Acqusition status bits */ #define DAQBOARD2000_AcqResultsFIFOMore1Sample 0x0001 #define DAQBOARD2000_AcqResultsFIFOHasValidData 0x0002 #define DAQBOARD2000_AcqResultsFIFOOverrun 0x0004 #define DAQBOARD2000_AcqLogicScanning 0x0008 #define DAQBOARD2000_AcqConfigPipeFull 0x0010 #define DAQBOARD2000_AcqScanListFIFOEmpty 0x0020 #define DAQBOARD2000_AcqAdcNotReady 0x0040 #define DAQBOARD2000_ArbitrationFailure 0x0080 #define DAQBOARD2000_AcqPacerOverrun 0x0100 #define DAQBOARD2000_DacPacerOverrun 0x0200 #define DAQBOARD2000_AcqHardwareError 0x01c0 /* Scan Sequencer programming */ #define DAQBOARD2000_SeqStartScanList 0x0011 #define DAQBOARD2000_SeqStopScanList 0x0010 /* Pacer Clock Control */ #define DAQBOARD2000_AdcPacerInternal 0x0030 #define DAQBOARD2000_AdcPacerExternal 0x0032 #define DAQBOARD2000_AdcPacerEnable 0x0031 #define DAQBOARD2000_AdcPacerEnableDacPacer 0x0034 #define DAQBOARD2000_AdcPacerDisable 0x0030 #define DAQBOARD2000_AdcPacerNormalMode 0x0060 #define DAQBOARD2000_AdcPacerCompatibilityMode 0x0061 #define DAQBOARD2000_AdcPacerInternalOutEnable 0x0008 #define DAQBOARD2000_AdcPacerExternalRising 0x0100 /* DAC status */ #define DAQBOARD2000_DacFull 0x0001 #define DAQBOARD2000_RefBusy 0x0002 #define DAQBOARD2000_TrgBusy 0x0004 #define DAQBOARD2000_CalBusy 0x0008 #define DAQBOARD2000_Dac0Busy 0x0010 #define DAQBOARD2000_Dac1Busy 0x0020 #define DAQBOARD2000_Dac2Busy 0x0040 #define DAQBOARD2000_Dac3Busy 0x0080 /* DAC control */ #define DAQBOARD2000_Dac0Enable 0x0021 #define DAQBOARD2000_Dac1Enable 0x0031 #define DAQBOARD2000_Dac2Enable 0x0041 #define DAQBOARD2000_Dac3Enable 0x0051 #define DAQBOARD2000_DacEnableBit 0x0001 #define DAQBOARD2000_Dac0Disable 0x0020 #define DAQBOARD2000_Dac1Disable 0x0030 #define DAQBOARD2000_Dac2Disable 0x0040 #define DAQBOARD2000_Dac3Disable 0x0050 #define DAQBOARD2000_DacResetFifo 0x0004 #define DAQBOARD2000_DacPatternDisable 0x0060 #define DAQBOARD2000_DacPatternEnable 0x0061 #define DAQBOARD2000_DacSelectSignedData 0x0002 #define DAQBOARD2000_DacSelectUnsignedData 0x0000 /* Trigger Control */ #define DAQBOARD2000_TrigAnalog 0x0000 #define DAQBOARD2000_TrigTTL 0x0010 #define DAQBOARD2000_TrigTransHiLo 0x0004 #define DAQBOARD2000_TrigTransLoHi 0x0000 #define DAQBOARD2000_TrigAbove 0x0000 #define DAQBOARD2000_TrigBelow 0x0004 #define DAQBOARD2000_TrigLevelSense 0x0002 #define DAQBOARD2000_TrigEdgeSense 0x0000 #define DAQBOARD2000_TrigEnable 0x0001 #define DAQBOARD2000_TrigDisable 0x0000 /* Reference Dac Selection */ #define DAQBOARD2000_PosRefDacSelect 0x0100 #define DAQBOARD2000_NegRefDacSelect 0x0000 static int daqboard2000_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int daqboard2000_detach(struct comedi_device *dev); static struct comedi_driver driver_daqboard2000 = { .driver_name = "daqboard2000", .module = THIS_MODULE, .attach = daqboard2000_attach, .detach = daqboard2000_detach, }; struct daq200_boardtype { const char *name; int id; }; static const struct daq200_boardtype boardtypes[] = { {"ids2", DAQBOARD2000_SUBSYSTEM_IDS2}, {"ids4", DAQBOARD2000_SUBSYSTEM_IDS4}, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct daq200_boardtype)) #define this_board ((const struct daq200_boardtype *)dev->board_ptr) static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = { { 0x1616, 0x0409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table); struct daqboard2000_private { enum { card_daqboard_2000 } card; struct pci_dev *pci_dev; void *daq; void *plx; int got_regions; unsigned int ao_readback[2]; }; #define devpriv ((struct daqboard2000_private *)dev->private) static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry) { struct daqboard2000_hw *fpga = devpriv->daq; /* udelay(4); */ fpga->acqScanListFIFO = entry & 0x00ff; /* udelay(4); */ fpga->acqScanListFIFO = (entry >> 8) & 0x00ff; } static void setup_sampling(struct comedi_device *dev, int chan, int gain) { u16 word0, word1, word2, word3; /* Channel 0-7 diff, channel 8-23 single ended */ word0 = 0; word1 = 0x0004; /* Last scan */ word2 = (chan << 6) & 0x00c0; switch (chan / 4) { case 0: word3 = 0x0001; break; case 1: word3 = 0x0002; break; case 2: word3 = 0x0005; break; case 3: word3 = 0x0006; break; case 4: word3 = 0x0041; break; case 5: word3 = 0x0042; break; default: word3 = 0; break; } /* dev->eeprom.correctionDACSE[i][j][k].offset = 0x800; dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00; */ /* These should be read from EEPROM */ word2 |= 0x0800; word3 |= 0xc000; /* printk("%d %4.4x %4.4x %4.4x %4.4x\n", chan, word0, word1, word2, word3);*/ writeAcqScanListEntry(dev, word0); writeAcqScanListEntry(dev, word1); writeAcqScanListEntry(dev, word2); writeAcqScanListEntry(dev, word3); } static int daqboard2000_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; struct daqboard2000_hw *fpga = devpriv->daq; int gain, chan, timeout; fpga->acqControl = DAQBOARD2000_AcqResetScanListFifo | DAQBOARD2000_AcqResetResultsFifo | DAQBOARD2000_AcqResetConfigPipe; /* If pacer clock is not set to some high value (> 10 us), we risk multiple samples to be put into the result FIFO. */ fpga->acqPacerClockDivLow = 1000000; /* 1 second, should be long enough */ fpga->acqPacerClockDivHigh = 0; gain = CR_RANGE(insn->chanspec); chan = CR_CHAN(insn->chanspec); /* This doesn't look efficient. I decided to take the conservative * approach when I did the insn conversion. Perhaps it would be * better to have broken it completely, then someone would have been * forced to fix it. --ds */ for (i = 0; i < insn->n; i++) { setup_sampling(dev, chan, gain); /* Enable reading from the scanlist FIFO */ fpga->acqControl = DAQBOARD2000_SeqStartScanList; for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull) { break; } /* udelay(2); */ } fpga->acqControl = DAQBOARD2000_AdcPacerEnable; for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning) { break; } /* udelay(2); */ } for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqResultsFIFOHasValidData) { break; } /* udelay(2); */ } data[i] = fpga->acqResultsFIFO; fpga->acqControl = DAQBOARD2000_AdcPacerDisable; fpga->acqControl = DAQBOARD2000_SeqStopScanList; } return i; } static int daqboard2000_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { data[i] = devpriv->ao_readback[chan]; } return i; } static int daqboard2000_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); struct daqboard2000_hw *fpga = devpriv->daq; int timeout; for (i = 0; i < insn->n; i++) { /* * OK, since it works OK without enabling the DAC's, let's keep * it as simple as possible... */ /* fpga->dacControl = (chan + 2) * 0x0010 | 0x0001; udelay(1000); */ fpga->dacSetting[chan] = data[i]; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0) { break; } /* udelay(2); */ } devpriv->ao_readback[chan] = data[i]; /* * Since we never enabled the DAC's, we don't need to disable it... * fpga->dacControl = (chan + 2) * 0x0010 | 0x0000; udelay(1000); */ } return i; } static void daqboard2000_resetLocalBus(struct comedi_device *dev) { printk("daqboard2000_resetLocalBus\n"); writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c); udelay(10000); } static void daqboard2000_reloadPLX(struct comedi_device *dev) { printk("daqboard2000_reloadPLX\n"); writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c); udelay(10000); } static void daqboard2000_pulseProgPin(struct comedi_device *dev) { printk("daqboard2000_pulseProgPin 1\n"); writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c); udelay(10000); /* Not in the original code, but I like symmetry... */ } static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask) { int result = 0; int i; int cpld; /* timeout after 50 tries -> 5ms */ for (i = 0; i < 50; i++) { cpld = readw(devpriv->daq + 0x1000); if ((cpld & mask) == mask) { result = 1; break; } udelay(100); } udelay(5); return result; } static int daqboard2000_writeCPLD(struct comedi_device *dev, int data) { int result = 0; udelay(10); writew(data, devpriv->daq + 0x1000); if ((readw(devpriv->daq + 0x1000) & DAQBOARD2000_CPLD_INIT) == DAQBOARD2000_CPLD_INIT) { result = 1; } return result; } static int initialize_daqboard2000(struct comedi_device *dev, unsigned char *cpld_array, int len) { int result = -EIO; /* Read the serial EEPROM control register */ int secr; int retry; int i; /* Check to make sure the serial eeprom is present on the board */ secr = readl(devpriv->plx + 0x6c); if (!(secr & DAQBOARD2000_EEPROM_PRESENT)) { #ifdef DEBUG_EEPROM printk("no serial eeprom\n"); #endif return -EIO; } for (retry = 0; retry < 3; retry++) { #ifdef DEBUG_EEPROM printk("Programming EEPROM try %x\n", retry); #endif daqboard2000_resetLocalBus(dev); daqboard2000_reloadPLX(dev); daqboard2000_pulseProgPin(dev); if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) { for (i = 0; i < len; i++) { if (cpld_array[i] == 0xff && cpld_array[i + 1] == 0x20) { #ifdef DEBUG_EEPROM printk("Preamble found at %d\n", i); #endif break; } } for (; i < len; i += 2) { int data = (cpld_array[i] << 8) + cpld_array[i + 1]; if (!daqboard2000_writeCPLD(dev, data)) { break; } } if (i >= len) { #ifdef DEBUG_EEPROM printk("Programmed\n"); #endif daqboard2000_resetLocalBus(dev); daqboard2000_reloadPLX(dev); result = 0; break; } } } return result; } static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev) { /* printk("Implement: daqboard2000_adcStopDmaTransfer\n");*/ } static void daqboard2000_adcDisarm(struct comedi_device *dev) { struct daqboard2000_hw *fpga = devpriv->daq; /* Disable hardware triggers */ udelay(2); fpga->trigControl = DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable; udelay(2); fpga->trigControl = DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable; /* Stop the scan list FIFO from loading the configuration pipe */ udelay(2); fpga->acqControl = DAQBOARD2000_SeqStopScanList; /* Stop the pacer clock */ udelay(2); fpga->acqControl = DAQBOARD2000_AdcPacerDisable; /* Stop the input dma (abort channel 1) */ daqboard2000_adcStopDmaTransfer(dev); } static void daqboard2000_activateReferenceDacs(struct comedi_device *dev) { struct daqboard2000_hw *fpga = devpriv->daq; int timeout; /* Set the + reference dac value in the FPGA */ fpga->refDacs = 0x80 | DAQBOARD2000_PosRefDacSelect; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) { break; } udelay(2); } /* printk("DAQBOARD2000_PosRefDacSelect %d\n", timeout);*/ /* Set the - reference dac value in the FPGA */ fpga->refDacs = 0x80 | DAQBOARD2000_NegRefDacSelect; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) { break; } udelay(2); } /* printk("DAQBOARD2000_NegRefDacSelect %d\n", timeout);*/ } static void daqboard2000_initializeCtrs(struct comedi_device *dev) { /* printk("Implement: daqboard2000_initializeCtrs\n");*/ } static void daqboard2000_initializeTmrs(struct comedi_device *dev) { /* printk("Implement: daqboard2000_initializeTmrs\n");*/ } static void daqboard2000_dacDisarm(struct comedi_device *dev) { /* printk("Implement: daqboard2000_dacDisarm\n");*/ } static void daqboard2000_initializeAdc(struct comedi_device *dev) { daqboard2000_adcDisarm(dev); daqboard2000_activateReferenceDacs(dev); daqboard2000_initializeCtrs(dev); daqboard2000_initializeTmrs(dev); } static void daqboard2000_initializeDac(struct comedi_device *dev) { daqboard2000_dacDisarm(dev); } /* The test command, REMOVE!!: rmmod daqboard2000 ; rmmod comedi; make install ; modprobe daqboard2000; /usr/sbin/comedi_config /dev/comedi0 daqboard/2000 ; tail -40 /var/log/messages */ static int daqboard2000_8255_cb(int dir, int port, int data, unsigned long ioaddr) { int result = 0; if (dir) { writew(data, ((void *)ioaddr) + port * 2); result = 0; } else { result = readw(((void *)ioaddr) + port * 2); } /* printk("daqboard2000_8255_cb %x %d %d %2.2x -> %2.2x\n", arg, dir, port, data, result); */ return result; } static int daqboard2000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int result = 0; struct comedi_subdevice *s; struct pci_dev *card = NULL; void *aux_data; unsigned int aux_len; int bus, slot; printk("comedi%d: daqboard2000:", dev->minor); bus = it->options[0]; slot = it->options[1]; result = alloc_private(dev, sizeof(struct daqboard2000_private)); if (result < 0) { return -ENOMEM; } for (card = pci_get_device(0x1616, 0x0409, NULL); card != NULL; card = pci_get_device(0x1616, 0x0409, card)) { if (bus || slot) { /* requested particular bus/slot */ if (card->bus->number != bus || PCI_SLOT(card->devfn) != slot) { continue; } } break; /* found one */ } if (!card) { if (bus || slot) printk(" no daqboard2000 found at bus/slot: %d/%d\n", bus, slot); else printk(" no daqboard2000 found\n"); return -EIO; } else { u32 id; int i; devpriv->pci_dev = card; id = ((u32) card-> subsystem_device << 16) | card->subsystem_vendor; for (i = 0; i < n_boardtypes; i++) { if (boardtypes[i].id == id) { printk(" %s", boardtypes[i].name); dev->board_ptr = boardtypes + i; } } if (!dev->board_ptr) { printk (" unknown subsystem id %08x (pretend it is an ids2)", id); dev->board_ptr = boardtypes; } } result = comedi_pci_enable(card, "daqboard2000"); if (result < 0) { printk(" failed to enable PCI device and request regions\n"); return -EIO; } devpriv->got_regions = 1; devpriv->plx = ioremap(pci_resource_start(card, 0), DAQBOARD2000_PLX_SIZE); devpriv->daq = ioremap(pci_resource_start(card, 2), DAQBOARD2000_DAQ_SIZE); if (!devpriv->plx || !devpriv->daq) { return -ENOMEM; } result = alloc_subdevices(dev, 3); if (result < 0) goto out; readl(devpriv->plx + 0x6c); /* u8 interrupt; Windows code does restore interrupts, but since we don't use them... pci_read_config_byte(card, PCI_INTERRUPT_LINE, &interrupt); printk("Interrupt before is: %x\n", interrupt); */ aux_data = comedi_aux_data(it->options, 0); aux_len = it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]; if (aux_data && aux_len) { result = initialize_daqboard2000(dev, aux_data, aux_len); } else { printk("no FPGA initialization code, aborting\n"); result = -EIO; } if (result < 0) goto out; daqboard2000_initializeAdc(dev); daqboard2000_initializeDac(dev); /* Windows code does restore interrupts, but since we don't use them... pci_read_config_byte(card, PCI_INTERRUPT_LINE, &interrupt); printk("Interrupt after is: %x\n", interrupt); */ dev->iobase = (unsigned long)devpriv->daq; dev->board_name = this_board->name; s = dev->subdevices + 0; /* ai subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 24; s->maxdata = 0xffff; s->insn_read = daqboard2000_ai_insn_read; s->range_table = &range_daqboard2000_ai; s = dev->subdevices + 1; /* ao subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 2; s->maxdata = 0xffff; s->insn_read = daqboard2000_ao_insn_read; s->insn_write = daqboard2000_ao_insn_write; s->range_table = &range_daqboard2000_ao; s = dev->subdevices + 2; result = subdev_8255_init(dev, s, daqboard2000_8255_cb, (unsigned long)(dev->iobase + 0x40)); printk("\n"); out: return result; } static int daqboard2000_detach(struct comedi_device *dev) { printk("comedi%d: daqboard2000: remove\n", dev->minor); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); if (dev->irq) { free_irq(dev->irq, dev); } if (devpriv) { if (devpriv->daq) iounmap(devpriv->daq); if (devpriv->plx) iounmap(devpriv->plx); if (devpriv->pci_dev) { if (devpriv->got_regions) { comedi_pci_disable(devpriv->pci_dev); } pci_dev_put(devpriv->pci_dev); } } return 0; } static int __devinit driver_daqboard2000_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_daqboard2000.driver_name); } static void __devexit driver_daqboard2000_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_daqboard2000_pci_driver = { .id_table = daqboard2000_pci_table, .probe = &driver_daqboard2000_pci_probe, .remove = __devexit_p(&driver_daqboard2000_pci_remove) }; static int __init driver_daqboard2000_init_module(void) { int retval; retval = comedi_driver_register(&driver_daqboard2000); if (retval < 0) return retval; driver_daqboard2000_pci_driver.name = (char *)driver_daqboard2000.driver_name; return pci_register_driver(&driver_daqboard2000_pci_driver); } static void __exit driver_daqboard2000_cleanup_module(void) { pci_unregister_driver(&driver_daqboard2000_pci_driver); comedi_driver_unregister(&driver_daqboard2000); } module_init(driver_daqboard2000_init_module); module_exit(driver_daqboard2000_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ayushrox/Pandora_kernel
sound/mips/sgio2audio.c
4023
27692
/* * Sound driver for Silicon Graphics O2 Workstations A/V board audio. * * Copyright 2003 Vivien Chappelier <vivien.chappelier@linux-mips.org> * Copyright 2008 Thomas Bogendoerfer <tsbogend@alpha.franken.de> * Mxier part taken from mace_audio.c: * Copyright 2007 Thorben Jändling <tj.trevelyan@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/ip32/ip32_ints.h> #include <asm/ip32/mace.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #define SNDRV_GET_ID #include <sound/initval.h> #include <sound/ad1843.h> MODULE_AUTHOR("Vivien Chappelier <vivien.chappelier@linux-mips.org>"); MODULE_DESCRIPTION("SGI O2 Audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Silicon Graphics, O2 Audio}}"); static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for SGI O2 soundcard."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for SGI O2 soundcard."); #define AUDIO_CONTROL_RESET BIT(0) /* 1: reset audio interface */ #define AUDIO_CONTROL_CODEC_PRESENT BIT(1) /* 1: codec detected */ #define CODEC_CONTROL_WORD_SHIFT 0 #define CODEC_CONTROL_READ BIT(16) #define CODEC_CONTROL_ADDRESS_SHIFT 17 #define CHANNEL_CONTROL_RESET BIT(10) /* 1: reset channel */ #define CHANNEL_DMA_ENABLE BIT(9) /* 1: enable DMA transfer */ #define CHANNEL_INT_THRESHOLD_DISABLED (0 << 5) /* interrupt disabled */ #define CHANNEL_INT_THRESHOLD_25 (1 << 5) /* int on buffer >25% full */ #define CHANNEL_INT_THRESHOLD_50 (2 << 5) /* int on buffer >50% full */ #define CHANNEL_INT_THRESHOLD_75 (3 << 5) /* int on buffer >75% full */ #define CHANNEL_INT_THRESHOLD_EMPTY (4 << 5) /* int on buffer empty */ #define CHANNEL_INT_THRESHOLD_NOT_EMPTY (5 << 5) /* int on buffer !empty */ #define CHANNEL_INT_THRESHOLD_FULL (6 << 5) /* int on buffer empty */ #define CHANNEL_INT_THRESHOLD_NOT_FULL (7 << 5) /* int on buffer !empty */ #define CHANNEL_RING_SHIFT 12 #define CHANNEL_RING_SIZE (1 << CHANNEL_RING_SHIFT) #define CHANNEL_RING_MASK (CHANNEL_RING_SIZE - 1) #define CHANNEL_LEFT_SHIFT 40 #define CHANNEL_RIGHT_SHIFT 8 struct snd_sgio2audio_chan { int idx; struct snd_pcm_substream *substream; int pos; snd_pcm_uframes_t size; spinlock_t lock; }; /* definition of the chip-specific record */ struct snd_sgio2audio { struct snd_card *card; /* codec */ struct snd_ad1843 ad1843; spinlock_t ad1843_lock; /* channels */ struct snd_sgio2audio_chan channel[3]; /* resources */ void *ring_base; dma_addr_t ring_base_dma; }; /* AD1843 access */ /* * read_ad1843_reg returns the current contents of a 16 bit AD1843 register. * * Returns unsigned register value on success, -errno on failure. */ static int read_ad1843_reg(void *priv, int reg) { struct snd_sgio2audio *chip = priv; int val; unsigned long flags; spin_lock_irqsave(&chip->ad1843_lock, flags); writeq((reg << CODEC_CONTROL_ADDRESS_SHIFT) | CODEC_CONTROL_READ, &mace->perif.audio.codec_control); wmb(); val = readq(&mace->perif.audio.codec_control); /* flush bus */ udelay(200); val = readq(&mace->perif.audio.codec_read); spin_unlock_irqrestore(&chip->ad1843_lock, flags); return val; } /* * write_ad1843_reg writes the specified value to a 16 bit AD1843 register. */ static int write_ad1843_reg(void *priv, int reg, int word) { struct snd_sgio2audio *chip = priv; int val; unsigned long flags; spin_lock_irqsave(&chip->ad1843_lock, flags); writeq((reg << CODEC_CONTROL_ADDRESS_SHIFT) | (word << CODEC_CONTROL_WORD_SHIFT), &mace->perif.audio.codec_control); wmb(); val = readq(&mace->perif.audio.codec_control); /* flush bus */ udelay(200); spin_unlock_irqrestore(&chip->ad1843_lock, flags); return 0; } static int sgio2audio_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_sgio2audio *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = ad1843_get_gain_max(&chip->ad1843, (int)kcontrol->private_value); return 0; } static int sgio2audio_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sgio2audio *chip = snd_kcontrol_chip(kcontrol); int vol; vol = ad1843_get_gain(&chip->ad1843, (int)kcontrol->private_value); ucontrol->value.integer.value[0] = (vol >> 8) & 0xFF; ucontrol->value.integer.value[1] = vol & 0xFF; return 0; } static int sgio2audio_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sgio2audio *chip = snd_kcontrol_chip(kcontrol); int newvol, oldvol; oldvol = ad1843_get_gain(&chip->ad1843, kcontrol->private_value); newvol = (ucontrol->value.integer.value[0] << 8) | ucontrol->value.integer.value[1]; newvol = ad1843_set_gain(&chip->ad1843, kcontrol->private_value, newvol); return newvol != oldvol; } static int sgio2audio_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char *texts[3] = { "Cam Mic", "Mic", "Line" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 3; if (uinfo->value.enumerated.item >= 3) uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int sgio2audio_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sgio2audio *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = ad1843_get_recsrc(&chip->ad1843); return 0; } static int sgio2audio_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sgio2audio *chip = snd_kcontrol_chip(kcontrol); int newsrc, oldsrc; oldsrc = ad1843_get_recsrc(&chip->ad1843); newsrc = ad1843_set_recsrc(&chip->ad1843, ucontrol->value.enumerated.item[0]); return newsrc != oldsrc; } /* dac1/pcm0 mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_PCM_0, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; /* dac2/pcm1 mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .index = 1, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_PCM_1, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; /* record level mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_reclevel __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_RECLEV, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; /* record level source control */ static struct snd_kcontrol_new sgio2audio_ctrl_recsource __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = sgio2audio_source_info, .get = sgio2audio_source_get, .put = sgio2audio_source_put, }; /* line mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_line __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Playback Volume", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_LINE, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; /* cd mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_cd __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Playback Volume", .index = 1, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_LINE_2, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; /* mic mixer control */ static struct snd_kcontrol_new sgio2audio_ctrl_mic __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = AD1843_GAIN_MIC, .info = sgio2audio_gain_info, .get = sgio2audio_gain_get, .put = sgio2audio_gain_put, }; static int __devinit snd_sgio2audio_new_mixer(struct snd_sgio2audio *chip) { int err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_pcm0, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_pcm1, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_reclevel, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_recsource, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_line, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_cd, chip)); if (err < 0) return err; err = snd_ctl_add(chip->card, snd_ctl_new1(&sgio2audio_ctrl_mic, chip)); if (err < 0) return err; return 0; } /* low-level audio interface DMA */ /* get data out of bounce buffer, count must be a multiple of 32 */ /* returns 1 if a period has elapsed */ static int snd_sgio2audio_dma_pull_frag(struct snd_sgio2audio *chip, unsigned int ch, unsigned int count) { int ret; unsigned long src_base, src_pos, dst_mask; unsigned char *dst_base; int dst_pos; u64 *src; s16 *dst; u64 x; unsigned long flags; struct snd_pcm_runtime *runtime = chip->channel[ch].substream->runtime; spin_lock_irqsave(&chip->channel[ch].lock, flags); src_base = (unsigned long) chip->ring_base | (ch << CHANNEL_RING_SHIFT); src_pos = readq(&mace->perif.audio.chan[ch].read_ptr); dst_base = runtime->dma_area; dst_pos = chip->channel[ch].pos; dst_mask = frames_to_bytes(runtime, runtime->buffer_size) - 1; /* check if a period has elapsed */ chip->channel[ch].size += (count >> 3); /* in frames */ ret = chip->channel[ch].size >= runtime->period_size; chip->channel[ch].size %= runtime->period_size; while (count) { src = (u64 *)(src_base + src_pos); dst = (s16 *)(dst_base + dst_pos); x = *src; dst[0] = (x >> CHANNEL_LEFT_SHIFT) & 0xffff; dst[1] = (x >> CHANNEL_RIGHT_SHIFT) & 0xffff; src_pos = (src_pos + sizeof(u64)) & CHANNEL_RING_MASK; dst_pos = (dst_pos + 2 * sizeof(s16)) & dst_mask; count -= sizeof(u64); } writeq(src_pos, &mace->perif.audio.chan[ch].read_ptr); /* in bytes */ chip->channel[ch].pos = dst_pos; spin_unlock_irqrestore(&chip->channel[ch].lock, flags); return ret; } /* put some DMA data in bounce buffer, count must be a multiple of 32 */ /* returns 1 if a period has elapsed */ static int snd_sgio2audio_dma_push_frag(struct snd_sgio2audio *chip, unsigned int ch, unsigned int count) { int ret; s64 l, r; unsigned long dst_base, dst_pos, src_mask; unsigned char *src_base; int src_pos; u64 *dst; s16 *src; unsigned long flags; struct snd_pcm_runtime *runtime = chip->channel[ch].substream->runtime; spin_lock_irqsave(&chip->channel[ch].lock, flags); dst_base = (unsigned long)chip->ring_base | (ch << CHANNEL_RING_SHIFT); dst_pos = readq(&mace->perif.audio.chan[ch].write_ptr); src_base = runtime->dma_area; src_pos = chip->channel[ch].pos; src_mask = frames_to_bytes(runtime, runtime->buffer_size) - 1; /* check if a period has elapsed */ chip->channel[ch].size += (count >> 3); /* in frames */ ret = chip->channel[ch].size >= runtime->period_size; chip->channel[ch].size %= runtime->period_size; while (count) { src = (s16 *)(src_base + src_pos); dst = (u64 *)(dst_base + dst_pos); l = src[0]; /* sign extend */ r = src[1]; /* sign extend */ *dst = ((l & 0x00ffffff) << CHANNEL_LEFT_SHIFT) | ((r & 0x00ffffff) << CHANNEL_RIGHT_SHIFT); dst_pos = (dst_pos + sizeof(u64)) & CHANNEL_RING_MASK; src_pos = (src_pos + 2 * sizeof(s16)) & src_mask; count -= sizeof(u64); } writeq(dst_pos, &mace->perif.audio.chan[ch].write_ptr); /* in bytes */ chip->channel[ch].pos = src_pos; spin_unlock_irqrestore(&chip->channel[ch].lock, flags); return ret; } static int snd_sgio2audio_dma_start(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_sgio2audio_chan *chan = substream->runtime->private_data; int ch = chan->idx; /* reset DMA channel */ writeq(CHANNEL_CONTROL_RESET, &mace->perif.audio.chan[ch].control); udelay(10); writeq(0, &mace->perif.audio.chan[ch].control); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* push a full buffer */ snd_sgio2audio_dma_push_frag(chip, ch, CHANNEL_RING_SIZE - 32); } /* set DMA to wake on 50% empty and enable interrupt */ writeq(CHANNEL_DMA_ENABLE | CHANNEL_INT_THRESHOLD_50, &mace->perif.audio.chan[ch].control); return 0; } static int snd_sgio2audio_dma_stop(struct snd_pcm_substream *substream) { struct snd_sgio2audio_chan *chan = substream->runtime->private_data; writeq(0, &mace->perif.audio.chan[chan->idx].control); return 0; } static irqreturn_t snd_sgio2audio_dma_in_isr(int irq, void *dev_id) { struct snd_sgio2audio_chan *chan = dev_id; struct snd_pcm_substream *substream; struct snd_sgio2audio *chip; int count, ch; substream = chan->substream; chip = snd_pcm_substream_chip(substream); ch = chan->idx; /* empty the ring */ count = CHANNEL_RING_SIZE - readq(&mace->perif.audio.chan[ch].depth) - 32; if (snd_sgio2audio_dma_pull_frag(chip, ch, count)) snd_pcm_period_elapsed(substream); return IRQ_HANDLED; } static irqreturn_t snd_sgio2audio_dma_out_isr(int irq, void *dev_id) { struct snd_sgio2audio_chan *chan = dev_id; struct snd_pcm_substream *substream; struct snd_sgio2audio *chip; int count, ch; substream = chan->substream; chip = snd_pcm_substream_chip(substream); ch = chan->idx; /* fill the ring */ count = CHANNEL_RING_SIZE - readq(&mace->perif.audio.chan[ch].depth) - 32; if (snd_sgio2audio_dma_push_frag(chip, ch, count)) snd_pcm_period_elapsed(substream); return IRQ_HANDLED; } static irqreturn_t snd_sgio2audio_error_isr(int irq, void *dev_id) { struct snd_sgio2audio_chan *chan = dev_id; struct snd_pcm_substream *substream; substream = chan->substream; snd_sgio2audio_dma_stop(substream); snd_sgio2audio_dma_start(substream); return IRQ_HANDLED; } /* PCM part */ /* PCM hardware definition */ static struct snd_pcm_hardware snd_sgio2audio_pcm_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), .formats = SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 32768, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, }; /* PCM playback open callback */ static int snd_sgio2audio_playback1_open(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw = snd_sgio2audio_pcm_hw; runtime->private_data = &chip->channel[1]; return 0; } static int snd_sgio2audio_playback2_open(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw = snd_sgio2audio_pcm_hw; runtime->private_data = &chip->channel[2]; return 0; } /* PCM capture open callback */ static int snd_sgio2audio_capture_open(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw = snd_sgio2audio_pcm_hw; runtime->private_data = &chip->channel[0]; return 0; } /* PCM close callback */ static int snd_sgio2audio_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->private_data = NULL; return 0; } /* hw_params callback */ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); } /* hw_free callback */ static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_vmalloc_buffer(substream); } /* prepare callback */ static int snd_sgio2audio_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_sgio2audio_chan *chan = substream->runtime->private_data; int ch = chan->idx; unsigned long flags; spin_lock_irqsave(&chip->channel[ch].lock, flags); /* Setup the pseudo-dma transfer pointers. */ chip->channel[ch].pos = 0; chip->channel[ch].size = 0; chip->channel[ch].substream = substream; /* set AD1843 format */ /* hardware format is always S16_LE */ switch (substream->stream) { case SNDRV_PCM_STREAM_PLAYBACK: ad1843_setup_dac(&chip->ad1843, ch - 1, runtime->rate, SNDRV_PCM_FORMAT_S16_LE, runtime->channels); break; case SNDRV_PCM_STREAM_CAPTURE: ad1843_setup_adc(&chip->ad1843, runtime->rate, SNDRV_PCM_FORMAT_S16_LE, runtime->channels); break; } spin_unlock_irqrestore(&chip->channel[ch].lock, flags); return 0; } /* trigger callback */ static int snd_sgio2audio_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* start the PCM engine */ snd_sgio2audio_dma_start(substream); break; case SNDRV_PCM_TRIGGER_STOP: /* stop the PCM engine */ snd_sgio2audio_dma_stop(substream); break; default: return -EINVAL; } return 0; } /* pointer callback */ static snd_pcm_uframes_t snd_sgio2audio_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_sgio2audio *chip = snd_pcm_substream_chip(substream); struct snd_sgio2audio_chan *chan = substream->runtime->private_data; /* get the current hardware pointer */ return bytes_to_frames(substream->runtime, chip->channel[chan->idx].pos); } /* operators */ static struct snd_pcm_ops snd_sgio2audio_playback1_ops = { .open = snd_sgio2audio_playback1_open, .close = snd_sgio2audio_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sgio2audio_pcm_hw_params, .hw_free = snd_sgio2audio_pcm_hw_free, .prepare = snd_sgio2audio_pcm_prepare, .trigger = snd_sgio2audio_pcm_trigger, .pointer = snd_sgio2audio_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops snd_sgio2audio_playback2_ops = { .open = snd_sgio2audio_playback2_open, .close = snd_sgio2audio_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sgio2audio_pcm_hw_params, .hw_free = snd_sgio2audio_pcm_hw_free, .prepare = snd_sgio2audio_pcm_prepare, .trigger = snd_sgio2audio_pcm_trigger, .pointer = snd_sgio2audio_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops snd_sgio2audio_capture_ops = { .open = snd_sgio2audio_capture_open, .close = snd_sgio2audio_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sgio2audio_pcm_hw_params, .hw_free = snd_sgio2audio_pcm_hw_free, .prepare = snd_sgio2audio_pcm_prepare, .trigger = snd_sgio2audio_pcm_trigger, .pointer = snd_sgio2audio_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; /* * definitions of capture are omitted here... */ /* create a pcm device */ static int __devinit snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip) { struct snd_pcm *pcm; int err; /* create first pcm device with one outputs and one input */ err = snd_pcm_new(chip->card, "SGI O2 Audio", 0, 1, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, "SGI O2 DAC1"); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sgio2audio_playback1_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sgio2audio_capture_ops); /* create second pcm device with one outputs and no input */ err = snd_pcm_new(chip->card, "SGI O2 Audio", 1, 1, 0, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, "SGI O2 DAC2"); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sgio2audio_playback2_ops); return 0; } static struct { int idx; int irq; irqreturn_t (*isr)(int, void *); const char *desc; } snd_sgio2_isr_table[] = { { .idx = 0, .irq = MACEISA_AUDIO1_DMAT_IRQ, .isr = snd_sgio2audio_dma_in_isr, .desc = "Capture DMA Channel 0" }, { .idx = 0, .irq = MACEISA_AUDIO1_OF_IRQ, .isr = snd_sgio2audio_error_isr, .desc = "Capture Overflow" }, { .idx = 1, .irq = MACEISA_AUDIO2_DMAT_IRQ, .isr = snd_sgio2audio_dma_out_isr, .desc = "Playback DMA Channel 1" }, { .idx = 1, .irq = MACEISA_AUDIO2_MERR_IRQ, .isr = snd_sgio2audio_error_isr, .desc = "Memory Error Channel 1" }, { .idx = 2, .irq = MACEISA_AUDIO3_DMAT_IRQ, .isr = snd_sgio2audio_dma_out_isr, .desc = "Playback DMA Channel 2" }, { .idx = 2, .irq = MACEISA_AUDIO3_MERR_IRQ, .isr = snd_sgio2audio_error_isr, .desc = "Memory Error Channel 2" } }; /* ALSA driver */ static int snd_sgio2audio_free(struct snd_sgio2audio *chip) { int i; /* reset interface */ writeq(AUDIO_CONTROL_RESET, &mace->perif.audio.control); udelay(1); writeq(0, &mace->perif.audio.control); /* release IRQ's */ for (i = 0; i < ARRAY_SIZE(snd_sgio2_isr_table); i++) free_irq(snd_sgio2_isr_table[i].irq, &chip->channel[snd_sgio2_isr_table[i].idx]); dma_free_coherent(NULL, MACEISA_RINGBUFFERS_SIZE, chip->ring_base, chip->ring_base_dma); /* release card data */ kfree(chip); return 0; } static int snd_sgio2audio_dev_free(struct snd_device *device) { struct snd_sgio2audio *chip = device->device_data; return snd_sgio2audio_free(chip); } static struct snd_device_ops ops = { .dev_free = snd_sgio2audio_dev_free, }; static int __devinit snd_sgio2audio_create(struct snd_card *card, struct snd_sgio2audio **rchip) { struct snd_sgio2audio *chip; int i, err; *rchip = NULL; /* check if a codec is attached to the interface */ /* (Audio or Audio/Video board present) */ if (!(readq(&mace->perif.audio.control) & AUDIO_CONTROL_CODEC_PRESENT)) return -ENOENT; chip = kzalloc(sizeof(struct snd_sgio2audio), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->card = card; chip->ring_base = dma_alloc_coherent(NULL, MACEISA_RINGBUFFERS_SIZE, &chip->ring_base_dma, GFP_USER); if (chip->ring_base == NULL) { printk(KERN_ERR "sgio2audio: could not allocate ring buffers\n"); kfree(chip); return -ENOMEM; } spin_lock_init(&chip->ad1843_lock); /* initialize channels */ for (i = 0; i < 3; i++) { spin_lock_init(&chip->channel[i].lock); chip->channel[i].idx = i; } /* allocate IRQs */ for (i = 0; i < ARRAY_SIZE(snd_sgio2_isr_table); i++) { if (request_irq(snd_sgio2_isr_table[i].irq, snd_sgio2_isr_table[i].isr, 0, snd_sgio2_isr_table[i].desc, &chip->channel[snd_sgio2_isr_table[i].idx])) { snd_sgio2audio_free(chip); printk(KERN_ERR "sgio2audio: cannot allocate irq %d\n", snd_sgio2_isr_table[i].irq); return -EBUSY; } } /* reset the interface */ writeq(AUDIO_CONTROL_RESET, &mace->perif.audio.control); udelay(1); writeq(0, &mace->perif.audio.control); msleep_interruptible(1); /* give time to recover */ /* set ring base */ writeq(chip->ring_base_dma, &mace->perif.ctrl.ringbase); /* attach the AD1843 codec */ chip->ad1843.read = read_ad1843_reg; chip->ad1843.write = write_ad1843_reg; chip->ad1843.chip = chip; /* initialize the AD1843 codec */ err = ad1843_init(&chip->ad1843); if (err < 0) { snd_sgio2audio_free(chip); return err; } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_sgio2audio_free(chip); return err; } *rchip = chip; return 0; } static int __devinit snd_sgio2audio_probe(struct platform_device *pdev) { struct snd_card *card; struct snd_sgio2audio *chip; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; err = snd_sgio2audio_create(card, &chip); if (err < 0) { snd_card_free(card); return err; } snd_card_set_dev(card, &pdev->dev); err = snd_sgio2audio_new_pcm(chip); if (err < 0) { snd_card_free(card); return err; } err = snd_sgio2audio_new_mixer(chip); if (err < 0) { snd_card_free(card); return err; } strcpy(card->driver, "SGI O2 Audio"); strcpy(card->shortname, "SGI O2 Audio"); sprintf(card->longname, "%s irq %i-%i", card->shortname, MACEISA_AUDIO1_DMAT_IRQ, MACEISA_AUDIO3_MERR_IRQ); err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } platform_set_drvdata(pdev, card); return 0; } static int __devexit snd_sgio2audio_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); snd_card_free(card); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver sgio2audio_driver = { .probe = snd_sgio2audio_probe, .remove = __devexit_p(snd_sgio2audio_remove), .driver = { .name = "sgio2audio", .owner = THIS_MODULE, } }; static int __init alsa_card_sgio2audio_init(void) { return platform_driver_register(&sgio2audio_driver); } static void __exit alsa_card_sgio2audio_exit(void) { platform_driver_unregister(&sgio2audio_driver); } module_init(alsa_card_sgio2audio_init) module_exit(alsa_card_sgio2audio_exit)
gpl-2.0
madhwang/linuxKernel
drivers/scsi/libsas/sas_discover.c
4023
10302
/* * Serial Attached SCSI (SAS) Discover process * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/scatterlist.h> #include <linux/slab.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include "sas_internal.h" #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" /* ---------- Basic task processing for discovery purposes ---------- */ void sas_init_dev(struct domain_device *dev) { INIT_LIST_HEAD(&dev->siblings); INIT_LIST_HEAD(&dev->dev_list_node); switch (dev->dev_type) { case SAS_END_DEV: break; case EDGE_DEV: case FANOUT_DEV: INIT_LIST_HEAD(&dev->ex_dev.children); break; case SATA_DEV: case SATA_PM: case SATA_PM_PORT: INIT_LIST_HEAD(&dev->sata_dev.children); break; default: break; } } /* ---------- Domain device discovery ---------- */ /** * sas_get_port_device -- Discover devices which caused port creation * @port: pointer to struct sas_port of interest * * Devices directly attached to a HA port, have no parent. This is * how we know they are (domain) "root" devices. All other devices * do, and should have their "parent" pointer set appropriately as * soon as a child device is discovered. */ static int sas_get_port_device(struct asd_sas_port *port) { unsigned long flags; struct asd_sas_phy *phy; struct sas_rphy *rphy; struct domain_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_irqsave(&port->phy_list_lock, flags); if (list_empty(&port->phy_list)) { spin_unlock_irqrestore(&port->phy_list_lock, flags); kfree(dev); return -ENODEV; } phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); spin_lock(&phy->frame_rcvd_lock); memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), (size_t)phy->frame_rcvd_size)); spin_unlock(&phy->frame_rcvd_lock); spin_unlock_irqrestore(&port->phy_list_lock, flags); if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) dev->dev_type = SATA_PM; else dev->dev_type = SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; } sas_init_dev(dev); switch (dev->dev_type) { case SAS_END_DEV: case SATA_DEV: rphy = sas_end_device_alloc(port->port); break; case EDGE_DEV: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; case FANOUT_DEV: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; default: printk("ERROR: Unidentified device type %d\n", dev->dev_type); rphy = NULL; break; } if (!rphy) { kfree(dev); return -ENODEV; } rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); sas_fill_in_rphy(dev, rphy); sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); port->port_dev = dev; dev->port = port; dev->linkrate = port->linkrate; dev->min_linkrate = port->linkrate; dev->max_linkrate = port->linkrate; dev->pathways = port->num_phys; memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE); memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); port->disc.max_level = 0; dev->rphy = rphy; spin_lock_irq(&port->dev_list_lock); list_add_tail(&dev->dev_list_node, &port->dev_list); spin_unlock_irq(&port->dev_list_lock); return 0; } /* ---------- Discover and Revalidate ---------- */ int sas_notify_lldd_dev_found(struct domain_device *dev) { int res = 0; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_found) { res = i->dft->lldd_dev_found(dev); if (res) { printk("sas: driver on pcidev %s cannot handle " "device %llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); } } return res; } void sas_notify_lldd_dev_gone(struct domain_device *dev) { struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_gone) i->dft->lldd_dev_gone(dev); } /* ---------- Common/dispatchers ---------- */ /** * sas_discover_end_dev -- discover an end device (SSP, etc) * @end: pointer to domain device of interest * * See comment in sas_discover_sata(). */ int sas_discover_end_dev(struct domain_device *dev) { int res; res = sas_notify_lldd_dev_found(dev); if (res) goto out_err2; res = sas_rphy_add(dev->rphy); if (res) goto out_err; return 0; out_err: sas_notify_lldd_dev_gone(dev); out_err2: return res; } /* ---------- Device registration and unregistration ---------- */ static inline void sas_unregister_common_dev(struct domain_device *dev) { sas_notify_lldd_dev_gone(dev); if (!dev->parent) dev->port->port_dev = NULL; else list_del_init(&dev->siblings); list_del_init(&dev->dev_list_node); } void sas_unregister_dev(struct domain_device *dev) { if (dev->rphy) { sas_remove_children(&dev->rphy->dev); sas_rphy_delete(dev->rphy); dev->rphy = NULL; } if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { /* remove the phys and ports, everything else should be gone */ kfree(dev->ex_dev.ex_phy); dev->ex_dev.ex_phy = NULL; } sas_unregister_common_dev(dev); } void sas_unregister_domain_devices(struct asd_sas_port *port) { struct domain_device *dev, *n; list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node) sas_unregister_dev(dev); port->port->rphy = NULL; } /* ---------- Discovery and Revalidation ---------- */ /** * sas_discover_domain -- discover the domain * @port: port to the domain of interest * * NOTE: this process _must_ quit (return) as soon as any connection * errors are encountered. Connection recovery is done elsewhere. * Discover process only interrogates devices in order to discover the * domain. */ static void sas_discover_domain(struct work_struct *work) { struct domain_device *dev; int error = 0; struct sas_discovery_event *ev = container_of(work, struct sas_discovery_event, work); struct asd_sas_port *port = ev->port; sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, &port->disc.pending); if (port->port_dev) return; error = sas_get_port_device(port); if (error) return; dev = port->port_dev; SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, task_pid_nr(current)); switch (dev->dev_type) { case SAS_END_DEV: error = sas_discover_end_dev(dev); break; case EDGE_DEV: case FANOUT_DEV: error = sas_discover_root_expander(dev); break; case SATA_DEV: case SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; #else SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif default: error = -ENXIO; SAS_DPRINTK("unhandled device %d\n", dev->dev_type); break; } if (error) { sas_rphy_free(dev->rphy); dev->rphy = NULL; spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); spin_unlock_irq(&port->dev_list_lock); kfree(dev); /* not kobject_register-ed yet */ port->port_dev = NULL; } SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, task_pid_nr(current), error); } static void sas_revalidate_domain(struct work_struct *work) { int res = 0; struct sas_discovery_event *ev = container_of(work, struct sas_discovery_event, work); struct asd_sas_port *port = ev->port; sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, &port->disc.pending); SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); if (port->port_dev) res = sas_ex_revalidate_domain(port->port_dev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); } /* ---------- Events ---------- */ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) { struct sas_discovery *disc; if (!port) return 0; disc = &port->disc; BUG_ON(ev >= DISC_NUM_EVENTS); sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, &disc->disc_work[ev].work, port->ha); return 0; } /** * sas_init_disc -- initialize the discovery struct in the port * @port: pointer to struct port * * Called when the ports are being initialized. */ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) { int i; static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, }; spin_lock_init(&disc->disc_event_lock); disc->pending = 0; for (i = 0; i < DISC_NUM_EVENTS; i++) { INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); disc->disc_work[i].port = port; } }
gpl-2.0
matyushov/vs311
drivers/ide/opti621.c
4791
4568
/* * Copyright (C) 1996-1998 Linus Torvalds & authors (see below) */ /* * Authors: * Jaromir Koutek <miri@punknet.cz>, * Jan Harkes <jaharkes@cwi.nl>, * Mark Lord <mlord@pobox.com> * Some parts of code are from ali14xx.c and from rz1000.c. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <asm/io.h> #define DRV_NAME "opti621" #define READ_REG 0 /* index of Read cycle timing register */ #define WRITE_REG 1 /* index of Write cycle timing register */ #define CNTRL_REG 3 /* index of Control register */ #define STRAP_REG 5 /* index of Strap register */ #define MISC_REG 6 /* index of Miscellaneous register */ static int reg_base; static DEFINE_SPINLOCK(opti621_lock); /* Write value to register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static void write_reg(u8 value, int reg) { inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); outb(value, reg_base + reg); outb(0x83, reg_base + 2); } /* Read value from register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static u8 read_reg(int reg) { u8 ret = 0; inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); ret = inb(reg_base + reg); outb(0x83, reg_base + 2); return ret; } static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_drive_t *pair = ide_get_pair_dev(drive); unsigned long flags; unsigned long mode = drive->pio_mode, pair_mode; const u8 pio = mode - XFER_PIO_0; u8 tim, misc, addr_pio = pio, clk; /* DRDY is default 2 (by OPTi Databook) */ static const u8 addr_timings[2][5] = { { 0x20, 0x10, 0x00, 0x00, 0x00 }, /* 33 MHz */ { 0x10, 0x10, 0x00, 0x00, 0x00 }, /* 25 MHz */ }; static const u8 data_rec_timings[2][5] = { { 0x5b, 0x45, 0x32, 0x21, 0x20 }, /* 33 MHz */ { 0x48, 0x34, 0x21, 0x10, 0x10 } /* 25 MHz */ }; ide_set_drivedata(drive, (void *)mode); if (pair) { pair_mode = (unsigned long)ide_get_drivedata(pair); if (pair_mode && pair_mode < mode) addr_pio = pair_mode - XFER_PIO_0; } spin_lock_irqsave(&opti621_lock, flags); reg_base = hwif->io_ports.data_addr; /* allow Register-B */ outb(0xc0, reg_base + CNTRL_REG); /* hmm, setupvic.exe does this ;-) */ outb(0xff, reg_base + 5); /* if reads 0xff, adapter not exist? */ (void)inb(reg_base + CNTRL_REG); /* if reads 0xc0, no interface exist? */ read_reg(CNTRL_REG); /* check CLK speed */ clk = read_reg(STRAP_REG) & 1; printk(KERN_INFO "%s: CLK = %d MHz\n", hwif->name, clk ? 25 : 33); tim = data_rec_timings[clk][pio]; misc = addr_timings[clk][addr_pio]; /* select Index-0/1 for Register-A/B */ write_reg(drive->dn & 1, MISC_REG); /* set read cycle timings */ write_reg(tim, READ_REG); /* set write cycle timings */ write_reg(tim, WRITE_REG); /* use Register-A for drive 0 */ /* use Register-B for drive 1 */ write_reg(0x85, CNTRL_REG); /* set address setup, DRDY timings, */ /* and read prefetch for both drives */ write_reg(misc, MISC_REG); spin_unlock_irqrestore(&opti621_lock, flags); } static const struct ide_port_ops opti621_port_ops = { .set_pio_mode = opti621_set_pio_mode, }; static const struct ide_port_info opti621_chipset = { .name = DRV_NAME, .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, .port_ops = &opti621_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &opti621_chipset, NULL); } static const struct pci_device_id opti621_pci_tbl[] = { { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 }, { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, opti621_pci_tbl); static struct pci_driver opti621_pci_driver = { .name = "Opti621_IDE", .id_table = opti621_pci_tbl, .probe = opti621_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init opti621_ide_init(void) { return ide_pci_register_driver(&opti621_pci_driver); } static void __exit opti621_ide_exit(void) { pci_unregister_driver(&opti621_pci_driver); } module_init(opti621_ide_init); module_exit(opti621_ide_exit); MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord"); MODULE_DESCRIPTION("PCI driver module for Opti621 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
TheMeddlingMonk/android_kernel_toshiba_tostab03
arch/cris/arch-v32/mach-a3/dma.c
4791
4404
/* Wrapper for DMA channel allocator that starts clocks etc */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <mach/dma.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/marb_defs.h> #include <hwregs/clkgen_defs.h> #include <hwregs/strmux_defs.h> #include <linux/errno.h> #include <asm/system.h> #include <arbiter.h> static char used_dma_channels[MAX_DMA_CHANNELS]; static const char *used_dma_channels_users[MAX_DMA_CHANNELS]; static DEFINE_SPINLOCK(dma_lock); int crisv32_request_dma(unsigned int dmanr, const char *device_id, unsigned options, unsigned int bandwidth, enum dma_owner owner) { unsigned long flags; reg_clkgen_rw_clk_ctrl clk_ctrl; reg_strmux_rw_cfg strmux_cfg; if (crisv32_arbiter_allocate_bandwidth(dmanr, options & DMA_INT_MEM ? INT_REGION : EXT_REGION, bandwidth)) return -ENOMEM; spin_lock_irqsave(&dma_lock, flags); if (used_dma_channels[dmanr]) { spin_unlock_irqrestore(&dma_lock, flags); if (options & DMA_VERBOSE_ON_ERROR) printk(KERN_ERR "Failed to request DMA %i for %s, " "already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]); if (options & DMA_PANIC_ON_ERROR) panic("request_dma error!"); spin_unlock_irqrestore(&dma_lock, flags); return -EBUSY; } clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); switch (dmanr) { case 0: case 1: clk_ctrl.dma0_1_eth = 1; break; case 2: case 3: clk_ctrl.dma2_3_strcop = 1; break; case 4: case 5: clk_ctrl.dma4_5_iop = 1; break; case 6: case 7: clk_ctrl.sser_ser_dma6_7 = 1; break; case 9: case 11: clk_ctrl.dma9_11 = 1; break; #if MAX_DMA_CHANNELS-1 != 11 #error Check dma.c #endif default: spin_unlock_irqrestore(&dma_lock, flags); if (options & DMA_VERBOSE_ON_ERROR) printk(KERN_ERR "Failed to request DMA %i for %s, " "only 0-%i valid)\n", dmanr, device_id, MAX_DMA_CHANNELS-1); if (options & DMA_PANIC_ON_ERROR) panic("request_dma error!"); return -EINVAL; } switch (owner) { case dma_eth: if (dmanr == 0) strmux_cfg.dma0 = regk_strmux_eth; else if (dmanr == 1) strmux_cfg.dma1 = regk_strmux_eth; else panic("Invalid DMA channel for eth\n"); break; case dma_ser0: if (dmanr == 0) strmux_cfg.dma0 = regk_strmux_ser0; else if (dmanr == 1) strmux_cfg.dma1 = regk_strmux_ser0; else panic("Invalid DMA channel for ser0\n"); break; case dma_ser3: if (dmanr == 2) strmux_cfg.dma2 = regk_strmux_ser3; else if (dmanr == 3) strmux_cfg.dma3 = regk_strmux_ser3; else panic("Invalid DMA channel for ser3\n"); break; case dma_strp: if (dmanr == 2) strmux_cfg.dma2 = regk_strmux_strcop; else if (dmanr == 3) strmux_cfg.dma3 = regk_strmux_strcop; else panic("Invalid DMA channel for strp\n"); break; case dma_ser1: if (dmanr == 4) strmux_cfg.dma4 = regk_strmux_ser1; else if (dmanr == 5) strmux_cfg.dma5 = regk_strmux_ser1; else panic("Invalid DMA channel for ser1\n"); break; case dma_iop: if (dmanr == 4) strmux_cfg.dma4 = regk_strmux_iop; else if (dmanr == 5) strmux_cfg.dma5 = regk_strmux_iop; else panic("Invalid DMA channel for iop\n"); break; case dma_ser2: if (dmanr == 6) strmux_cfg.dma6 = regk_strmux_ser2; else if (dmanr == 7) strmux_cfg.dma7 = regk_strmux_ser2; else panic("Invalid DMA channel for ser2\n"); break; case dma_sser: if (dmanr == 6) strmux_cfg.dma6 = regk_strmux_sser; else if (dmanr == 7) strmux_cfg.dma7 = regk_strmux_sser; else panic("Invalid DMA channel for sser\n"); break; case dma_ser4: if (dmanr == 9) strmux_cfg.dma9 = regk_strmux_ser4; else panic("Invalid DMA channel for ser4\n"); break; case dma_jpeg: if (dmanr == 9) strmux_cfg.dma9 = regk_strmux_jpeg; else panic("Invalid DMA channel for JPEG\n"); break; case dma_h264: if (dmanr == 11) strmux_cfg.dma11 = regk_strmux_h264; else panic("Invalid DMA channel for H264\n"); break; } used_dma_channels[dmanr] = 1; used_dma_channels_users[dmanr] = device_id; REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); spin_unlock_irqrestore(&dma_lock, flags); return 0; } void crisv32_free_dma(unsigned int dmanr) { spin_lock(&dma_lock); used_dma_channels[dmanr] = 0; spin_unlock(&dma_lock); }
gpl-2.0
VanirAOSP/kernel_samsung_jf
arch/powerpc/platforms/8xx/mpc885ads_setup.c
6839
6887
/* * Platform setup for the Freescale mpc885ads board * * Vitaly Bordug <vbordug@ru.mvista.com> * * Copyright 2005 MontaVista Software Inc. * * Heavily modified by Scott Wood <scottwood@freescale.com> * Copyright 2007 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/param.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> #include <linux/of_platform.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/time.h> #include <asm/mpc8xx.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> #include <asm/fs_pd.h> #include <asm/udbg.h> #include "mpc885ads.h" #include "mpc8xx.h" static u32 __iomem *bcsr, *bcsr5; #ifdef CONFIG_PCMCIA_M8XX static void pcmcia_hw_setup(int slot, int enable) { if (enable) clrbits32(&bcsr[1], BCSR1_PCCEN); else setbits32(&bcsr[1], BCSR1_PCCEN); } static int pcmcia_set_voltage(int slot, int vcc, int vpp) { u32 reg = 0; switch (vcc) { case 0: break; case 33: reg |= BCSR1_PCCVCC0; break; case 50: reg |= BCSR1_PCCVCC1; break; default: return 1; } switch (vpp) { case 0: break; case 33: case 50: if (vcc == vpp) reg |= BCSR1_PCCVPP1; else return 1; break; case 120: if ((vcc == 33) || (vcc == 50)) reg |= BCSR1_PCCVPP0; else return 1; default: return 1; } /* first, turn off all power */ clrbits32(&bcsr[1], 0x00610000); /* enable new powersettings */ setbits32(&bcsr[1], reg); return 0; } #endif struct cpm_pin { int port, pin, flags; }; static struct cpm_pin mpc885ads_pins[] = { /* SMC1 */ {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */ {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ /* SMC2 */ #ifndef CONFIG_MPC8xx_SECOND_ETH_FEC2 {CPM_PORTE, 21, CPM_PIN_INPUT}, /* RX */ {CPM_PORTE, 20, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ #endif /* SCC3 */ {CPM_PORTA, 9, CPM_PIN_INPUT}, /* RX */ {CPM_PORTA, 8, CPM_PIN_INPUT}, /* TX */ {CPM_PORTC, 4, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* RENA */ {CPM_PORTC, 5, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CLSN */ {CPM_PORTE, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TENA */ {CPM_PORTE, 17, CPM_PIN_INPUT}, /* CLK5 */ {CPM_PORTE, 16, CPM_PIN_INPUT}, /* CLK6 */ /* MII1 */ {CPM_PORTA, 0, CPM_PIN_INPUT}, {CPM_PORTA, 1, CPM_PIN_INPUT}, {CPM_PORTA, 2, CPM_PIN_INPUT}, {CPM_PORTA, 3, CPM_PIN_INPUT}, {CPM_PORTA, 4, CPM_PIN_OUTPUT}, {CPM_PORTA, 10, CPM_PIN_OUTPUT}, {CPM_PORTA, 11, CPM_PIN_OUTPUT}, {CPM_PORTB, 19, CPM_PIN_INPUT}, {CPM_PORTB, 31, CPM_PIN_INPUT}, {CPM_PORTC, 12, CPM_PIN_INPUT}, {CPM_PORTC, 13, CPM_PIN_INPUT}, {CPM_PORTE, 30, CPM_PIN_OUTPUT}, {CPM_PORTE, 31, CPM_PIN_OUTPUT}, /* MII2 */ #ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 {CPM_PORTE, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 16, CPM_PIN_OUTPUT}, {CPM_PORTE, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {CPM_PORTE, 21, CPM_PIN_OUTPUT}, {CPM_PORTE, 22, CPM_PIN_OUTPUT}, {CPM_PORTE, 23, CPM_PIN_OUTPUT}, {CPM_PORTE, 24, CPM_PIN_OUTPUT}, {CPM_PORTE, 25, CPM_PIN_OUTPUT}, {CPM_PORTE, 26, CPM_PIN_OUTPUT}, {CPM_PORTE, 27, CPM_PIN_OUTPUT}, {CPM_PORTE, 28, CPM_PIN_OUTPUT}, {CPM_PORTE, 29, CPM_PIN_OUTPUT}, #endif /* I2C */ {CPM_PORTB, 26, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN}, {CPM_PORTB, 27, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN}, }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(mpc885ads_pins); i++) { struct cpm_pin *pin = &mpc885ads_pins[i]; cpm1_set_pin(pin->port, pin->pin, pin->flags); } cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); cpm1_clk_setup(CPM_CLK_SMC2, CPM_BRG2, CPM_CLK_RTX); cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_TX); cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_RX); /* Set FEC1 and FEC2 to MII mode */ clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180); } static void __init mpc885ads_setup_arch(void) { struct device_node *np; cpm_reset(); init_ioports(); np = of_find_compatible_node(NULL, NULL, "fsl,mpc885ads-bcsr"); if (!np) { printk(KERN_CRIT "Could not find fsl,mpc885ads-bcsr node\n"); return; } bcsr = of_iomap(np, 0); bcsr5 = of_iomap(np, 1); of_node_put(np); if (!bcsr || !bcsr5) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } clrbits32(&bcsr[1], BCSR1_RS232EN_1); #ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 setbits32(&bcsr[1], BCSR1_RS232EN_2); #else clrbits32(&bcsr[1], BCSR1_RS232EN_2); #endif clrbits32(bcsr5, BCSR5_MII1_EN); setbits32(bcsr5, BCSR5_MII1_RST); udelay(1000); clrbits32(bcsr5, BCSR5_MII1_RST); #ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 clrbits32(bcsr5, BCSR5_MII2_EN); setbits32(bcsr5, BCSR5_MII2_RST); udelay(1000); clrbits32(bcsr5, BCSR5_MII2_RST); #else setbits32(bcsr5, BCSR5_MII2_EN); #endif #ifdef CONFIG_MPC8xx_SECOND_ETH_SCC3 clrbits32(&bcsr[4], BCSR4_ETH10_RST); udelay(1000); setbits32(&bcsr[4], BCSR4_ETH10_RST); setbits32(&bcsr[1], BCSR1_ETHEN); np = of_find_node_by_path("/soc@ff000000/cpm@9c0/serial@a80"); #else np = of_find_node_by_path("/soc@ff000000/cpm@9c0/ethernet@a40"); #endif /* The SCC3 enet registers overlap the SMC1 registers, so * one of the two must be removed from the device tree. */ if (np) { of_detach_node(np); of_node_put(np); } #ifdef CONFIG_PCMCIA_M8XX /* Set up board specific hook-ups.*/ m8xx_pcmcia_ops.hw_ctrl = pcmcia_hw_setup; m8xx_pcmcia_ops.voltage_set = pcmcia_set_voltage; #endif } static int __init mpc885ads_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc885ads"); } static struct of_device_id __initdata of_bus_ids[] = { { .name = "soc", }, { .name = "cpm", }, { .name = "localbus", }, {}, }; static int __init declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(mpc885_ads, declare_of_platform_devices); define_machine(mpc885_ads) { .name = "Freescale MPC885 ADS", .probe = mpc885ads_probe, .setup_arch = mpc885ads_setup_arch, .init_IRQ = mpc8xx_pics_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, .set_rtc_time = mpc8xx_set_rtc_time, .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, };
gpl-2.0
StelixROM/android_kernel_google_msm
sound/oss/pas2_pcm.c
8375
9582
/* * pas2_pcm.c Audio routines for PAS16 * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Alan Cox : Swatted a double allocation of device bug. Made a few * more things module options. * Bartlomiej Zolnierkiewicz : Added __init to pas_pcm_init() */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/timex.h> #include "sound_config.h" #include "pas2.h" #ifndef DEB #define DEB(WHAT) #endif #define PAS_PCM_INTRBITS (0x08) /* * Sample buffer timer interrupt enable */ #define PCM_NON 0 #define PCM_DAC 1 #define PCM_ADC 2 static unsigned long pcm_speed; /* sampling rate */ static unsigned char pcm_channels = 1; /* channels (1 or 2) */ static unsigned char pcm_bits = 8; /* bits/sample (8 or 16) */ static unsigned char pcm_filter; /* filter FLAG */ static unsigned char pcm_mode = PCM_NON; static unsigned long pcm_count; static unsigned short pcm_bitsok = 8; /* mask of OK bits */ static int pcm_busy; int pas_audiodev = -1; static int open_mode; extern spinlock_t pas_lock; static int pcm_set_speed(int arg) { int foo, tmp; unsigned long flags; if (arg == 0) return pcm_speed; if (arg > 44100) arg = 44100; if (arg < 5000) arg = 5000; if (pcm_channels & 2) { foo = ((PIT_TICK_RATE / 2) + (arg / 2)) / arg; arg = ((PIT_TICK_RATE / 2) + (foo / 2)) / foo; } else { foo = (PIT_TICK_RATE + (arg / 2)) / arg; arg = (PIT_TICK_RATE + (foo / 2)) / foo; } pcm_speed = arg; tmp = pas_read(0x0B8A); /* * Set anti-aliasing filters according to sample rate. You really *NEED* * to enable this feature for all normal recording unless you want to * experiment with aliasing effects. * These filters apply to the selected "recording" source. * I (pfw) don't know the encoding of these 5 bits. The values shown * come from the SDK found on ftp.uwp.edu:/pub/msdos/proaudio/. * * I cleared bit 5 of these values, since that bit controls the master * mute flag. (Olav Wölfelschneider) * */ #if !defined NO_AUTO_FILTER_SET tmp &= 0xe0; if (pcm_speed >= 2 * 17897) tmp |= 0x01; else if (pcm_speed >= 2 * 15909) tmp |= 0x02; else if (pcm_speed >= 2 * 11931) tmp |= 0x09; else if (pcm_speed >= 2 * 8948) tmp |= 0x11; else if (pcm_speed >= 2 * 5965) tmp |= 0x19; else if (pcm_speed >= 2 * 2982) tmp |= 0x04; pcm_filter = tmp; #endif spin_lock_irqsave(&pas_lock, flags); pas_write(tmp & ~(0x40 | 0x80), 0x0B8A); pas_write(0x00 | 0x30 | 0x04, 0x138B); pas_write(foo & 0xff, 0x1388); pas_write((foo >> 8) & 0xff, 0x1388); pas_write(tmp, 0x0B8A); spin_unlock_irqrestore(&pas_lock, flags); return pcm_speed; } static int pcm_set_channels(int arg) { if ((arg != 1) && (arg != 2)) return pcm_channels; if (arg != pcm_channels) { pas_write(pas_read(0xF8A) ^ 0x20, 0xF8A); pcm_channels = arg; pcm_set_speed(pcm_speed); /* The speed must be reinitialized */ } return pcm_channels; } static int pcm_set_bits(int arg) { if (arg == 0) return pcm_bits; if ((arg & pcm_bitsok) != arg) return pcm_bits; if (arg != pcm_bits) { pas_write(pas_read(0x8389) ^ 0x04, 0x8389); pcm_bits = arg; } return pcm_bits; } static int pas_audio_ioctl(int dev, unsigned int cmd, void __user *arg) { int val, ret; int __user *p = arg; DEB(printk("pas2_pcm.c: static int pas_audio_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg)); switch (cmd) { case SOUND_PCM_WRITE_RATE: if (get_user(val, p)) return -EFAULT; ret = pcm_set_speed(val); break; case SOUND_PCM_READ_RATE: ret = pcm_speed; break; case SNDCTL_DSP_STEREO: if (get_user(val, p)) return -EFAULT; ret = pcm_set_channels(val + 1) - 1; break; case SOUND_PCM_WRITE_CHANNELS: if (get_user(val, p)) return -EFAULT; ret = pcm_set_channels(val); break; case SOUND_PCM_READ_CHANNELS: ret = pcm_channels; break; case SNDCTL_DSP_SETFMT: if (get_user(val, p)) return -EFAULT; ret = pcm_set_bits(val); break; case SOUND_PCM_READ_BITS: ret = pcm_bits; break; default: return -EINVAL; } return put_user(ret, p); } static void pas_audio_reset(int dev) { DEB(printk("pas2_pcm.c: static void pas_audio_reset(void)\n")); pas_write(pas_read(0xF8A) & ~0x40, 0xF8A); /* Disable PCM */ } static int pas_audio_open(int dev, int mode) { int err; unsigned long flags; DEB(printk("pas2_pcm.c: static int pas_audio_open(int mode = %X)\n", mode)); spin_lock_irqsave(&pas_lock, flags); if (pcm_busy) { spin_unlock_irqrestore(&pas_lock, flags); return -EBUSY; } pcm_busy = 1; spin_unlock_irqrestore(&pas_lock, flags); if ((err = pas_set_intr(PAS_PCM_INTRBITS)) < 0) return err; pcm_count = 0; open_mode = mode; return 0; } static void pas_audio_close(int dev) { unsigned long flags; DEB(printk("pas2_pcm.c: static void pas_audio_close(void)\n")); spin_lock_irqsave(&pas_lock, flags); pas_audio_reset(dev); pas_remove_intr(PAS_PCM_INTRBITS); pcm_mode = PCM_NON; pcm_busy = 0; spin_unlock_irqrestore(&pas_lock, flags); } static void pas_audio_output_block(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags, cnt; DEB(printk("pas2_pcm.c: static void pas_audio_output_block(char *buf = %P, int count = %X)\n", buf, count)); cnt = count; if (audio_devs[dev]->dmap_out->dma > 3) cnt >>= 1; if (audio_devs[dev]->flags & DMA_AUTOMODE && intrflag && cnt == pcm_count) return; spin_lock_irqsave(&pas_lock, flags); pas_write(pas_read(0xF8A) & ~0x40, 0xF8A); /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; if (count != pcm_count) { pas_write(pas_read(0x0B8A) & ~0x80, 0x0B8A); pas_write(0x40 | 0x30 | 0x04, 0x138B); pas_write(count & 0xff, 0x1389); pas_write((count >> 8) & 0xff, 0x1389); pas_write(pas_read(0x0B8A) | 0x80, 0x0B8A); pcm_count = count; } pas_write(pas_read(0x0B8A) | 0x80 | 0x40, 0x0B8A); #ifdef NO_TRIGGER pas_write(pas_read(0xF8A) | 0x40 | 0x10, 0xF8A); #endif pcm_mode = PCM_DAC; spin_unlock_irqrestore(&pas_lock, flags); } static void pas_audio_start_input(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags; int cnt; DEB(printk("pas2_pcm.c: static void pas_audio_start_input(char *buf = %P, int count = %X)\n", buf, count)); cnt = count; if (audio_devs[dev]->dmap_out->dma > 3) cnt >>= 1; if (audio_devs[pas_audiodev]->flags & DMA_AUTOMODE && intrflag && cnt == pcm_count) return; spin_lock_irqsave(&pas_lock, flags); /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; if (count != pcm_count) { pas_write(pas_read(0x0B8A) & ~0x80, 0x0B8A); pas_write(0x40 | 0x30 | 0x04, 0x138B); pas_write(count & 0xff, 0x1389); pas_write((count >> 8) & 0xff, 0x1389); pas_write(pas_read(0x0B8A) | 0x80, 0x0B8A); pcm_count = count; } pas_write(pas_read(0x0B8A) | 0x80 | 0x40, 0x0B8A); #ifdef NO_TRIGGER pas_write((pas_read(0xF8A) | 0x40) & ~0x10, 0xF8A); #endif pcm_mode = PCM_ADC; spin_unlock_irqrestore(&pas_lock, flags); } #ifndef NO_TRIGGER static void pas_audio_trigger(int dev, int state) { unsigned long flags; spin_lock_irqsave(&pas_lock, flags); state &= open_mode; if (state & PCM_ENABLE_OUTPUT) pas_write(pas_read(0xF8A) | 0x40 | 0x10, 0xF8A); else if (state & PCM_ENABLE_INPUT) pas_write((pas_read(0xF8A) | 0x40) & ~0x10, 0xF8A); else pas_write(pas_read(0xF8A) & ~0x40, 0xF8A); spin_unlock_irqrestore(&pas_lock, flags); } #endif static int pas_audio_prepare_for_input(int dev, int bsize, int bcount) { pas_audio_reset(dev); return 0; } static int pas_audio_prepare_for_output(int dev, int bsize, int bcount) { pas_audio_reset(dev); return 0; } static struct audio_driver pas_audio_driver = { .owner = THIS_MODULE, .open = pas_audio_open, .close = pas_audio_close, .output_block = pas_audio_output_block, .start_input = pas_audio_start_input, .ioctl = pas_audio_ioctl, .prepare_for_input = pas_audio_prepare_for_input, .prepare_for_output = pas_audio_prepare_for_output, .halt_io = pas_audio_reset, .trigger = pas_audio_trigger }; void __init pas_pcm_init(struct address_info *hw_config) { DEB(printk("pas2_pcm.c: long pas_pcm_init()\n")); pcm_bitsok = 8; if (pas_read(0xEF8B) & 0x08) pcm_bitsok |= 16; pcm_set_speed(DSP_DEFAULT_SPEED); if ((pas_audiodev = sound_install_audiodrv(AUDIO_DRIVER_VERSION, "Pro Audio Spectrum", &pas_audio_driver, sizeof(struct audio_driver), DMA_AUTOMODE, AFMT_U8 | AFMT_S16_LE, NULL, hw_config->dma, hw_config->dma)) < 0) printk(KERN_WARNING "PAS16: Too many PCM devices available\n"); } void pas_pcm_interrupt(unsigned char status, int cause) { if (cause == 1) { /* * Halt the PCM first. Otherwise we don't have time to start a new * block before the PCM chip proceeds to the next sample */ if (!(audio_devs[pas_audiodev]->flags & DMA_AUTOMODE)) pas_write(pas_read(0xF8A) & ~0x40, 0xF8A); switch (pcm_mode) { case PCM_DAC: DMAbuf_outputintr(pas_audiodev, 1); break; case PCM_ADC: DMAbuf_inputintr(pas_audiodev); break; default: printk(KERN_WARNING "PAS: Unexpected PCM interrupt\n"); } } }
gpl-2.0
uberlaggydarwin/useless
arch/arm/mm/copypage-v4mc.c
9655
3481
/* * linux/arch/arm/lib/copypage-armv4mc.S * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This handles the mini data cache, as found on SA11x0 and XScale * processors. When we copy a user page page, we map it in such a way * that accesses to this page will not touch the main data cache, but * will be cached in the mini data cache. This prevents us thrashing * the main data cache on page faults. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "mm.h" #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) static DEFINE_RAW_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ static void __naked mc_copy_user_page(void *from, void *to) { asm volatile( "stmfd sp!, {r4, lr} @ 2\n\ mov r4, %2 @ 1\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r4, r4, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ bne 1b @ 1\n\ ldmfd sp!, {r4, pc} @ 3" : : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); raw_spin_lock(&minicache_lock); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); kunmap_atomic(kto); } /* * ARMv4 optimised clear_user_page */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr); } struct cpu_user_fns v4_mc_user_fns __initdata = { .cpu_clear_user_highpage = v4_mc_clear_user_highpage, .cpu_copy_user_highpage = v4_mc_copy_user_highpage, };
gpl-2.0
jderrick/linux-block
arch/arm/mm/copypage-v4mc.c
9655
3481
/* * linux/arch/arm/lib/copypage-armv4mc.S * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This handles the mini data cache, as found on SA11x0 and XScale * processors. When we copy a user page page, we map it in such a way * that accesses to this page will not touch the main data cache, but * will be cached in the mini data cache. This prevents us thrashing * the main data cache on page faults. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "mm.h" #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) static DEFINE_RAW_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ static void __naked mc_copy_user_page(void *from, void *to) { asm volatile( "stmfd sp!, {r4, lr} @ 2\n\ mov r4, %2 @ 1\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r4, r4, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ bne 1b @ 1\n\ ldmfd sp!, {r4, pc} @ 3" : : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); raw_spin_lock(&minicache_lock); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); kunmap_atomic(kto); } /* * ARMv4 optimised clear_user_page */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile("\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr); } struct cpu_user_fns v4_mc_user_fns __initdata = { .cpu_clear_user_highpage = v4_mc_clear_user_highpage, .cpu_copy_user_highpage = v4_mc_copy_user_highpage, };
gpl-2.0
tejaswanjari/SMR_FS-EXT4
kernel/drivers/net/wireless/brcm80211/brcmsmac/rate.c
10935
16697
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <brcmu_wifi.h> #include <brcmu_utils.h> #include "d11.h" #include "pub.h" #include "rate.h" /* * Rate info per rate: It tells whether a rate is ofdm or not and its phy_rate * value */ const u8 rate_info[BRCM_MAXRATE + 1] = { /* 0 1 2 3 4 5 6 7 8 9 */ /* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00, /* 20 */ 0x00, 0x00, 0x6e, 0x00, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, /* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, /* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c }; /* rates are in units of Kbps */ const struct brcms_mcs_info mcs_table[MCS_TABLE_SIZE] = { /* MCS 0: SS 1, MOD: BPSK, CR 1/2 */ {6500, 13500, CEIL(6500 * 10, 9), CEIL(13500 * 10, 9), 0x00, BRCM_RATE_6M}, /* MCS 1: SS 1, MOD: QPSK, CR 1/2 */ {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x08, BRCM_RATE_12M}, /* MCS 2: SS 1, MOD: QPSK, CR 3/4 */ {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x0A, BRCM_RATE_18M}, /* MCS 3: SS 1, MOD: 16QAM, CR 1/2 */ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x10, BRCM_RATE_24M}, /* MCS 4: SS 1, MOD: 16QAM, CR 3/4 */ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x12, BRCM_RATE_36M}, /* MCS 5: SS 1, MOD: 64QAM, CR 2/3 */ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x19, BRCM_RATE_48M}, /* MCS 6: SS 1, MOD: 64QAM, CR 3/4 */ {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x1A, BRCM_RATE_54M}, /* MCS 7: SS 1, MOD: 64QAM, CR 5/6 */ {65000, 135000, CEIL(65000 * 10, 9), CEIL(135000 * 10, 9), 0x1C, BRCM_RATE_54M}, /* MCS 8: SS 2, MOD: BPSK, CR 1/2 */ {13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x40, BRCM_RATE_6M}, /* MCS 9: SS 2, MOD: QPSK, CR 1/2 */ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x48, BRCM_RATE_12M}, /* MCS 10: SS 2, MOD: QPSK, CR 3/4 */ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x4A, BRCM_RATE_18M}, /* MCS 11: SS 2, MOD: 16QAM, CR 1/2 */ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x50, BRCM_RATE_24M}, /* MCS 12: SS 2, MOD: 16QAM, CR 3/4 */ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x52, BRCM_RATE_36M}, /* MCS 13: SS 2, MOD: 64QAM, CR 2/3 */ {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0x59, BRCM_RATE_48M}, /* MCS 14: SS 2, MOD: 64QAM, CR 3/4 */ {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x5A, BRCM_RATE_54M}, /* MCS 15: SS 2, MOD: 64QAM, CR 5/6 */ {130000, 270000, CEIL(130000 * 10, 9), CEIL(270000 * 10, 9), 0x5C, BRCM_RATE_54M}, /* MCS 16: SS 3, MOD: BPSK, CR 1/2 */ {19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x80, BRCM_RATE_6M}, /* MCS 17: SS 3, MOD: QPSK, CR 1/2 */ {39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x88, BRCM_RATE_12M}, /* MCS 18: SS 3, MOD: QPSK, CR 3/4 */ {58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x8A, BRCM_RATE_18M}, /* MCS 19: SS 3, MOD: 16QAM, CR 1/2 */ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x90, BRCM_RATE_24M}, /* MCS 20: SS 3, MOD: 16QAM, CR 3/4 */ {117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x92, BRCM_RATE_36M}, /* MCS 21: SS 3, MOD: 64QAM, CR 2/3 */ {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0x99, BRCM_RATE_48M}, /* MCS 22: SS 3, MOD: 64QAM, CR 3/4 */ {175500, 364500, CEIL(175500 * 10, 9), CEIL(364500 * 10, 9), 0x9A, BRCM_RATE_54M}, /* MCS 23: SS 3, MOD: 64QAM, CR 5/6 */ {195000, 405000, CEIL(195000 * 10, 9), CEIL(405000 * 10, 9), 0x9B, BRCM_RATE_54M}, /* MCS 24: SS 4, MOD: BPSK, CR 1/2 */ {26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0xC0, BRCM_RATE_6M}, /* MCS 25: SS 4, MOD: QPSK, CR 1/2 */ {52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0xC8, BRCM_RATE_12M}, /* MCS 26: SS 4, MOD: QPSK, CR 3/4 */ {78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0xCA, BRCM_RATE_18M}, /* MCS 27: SS 4, MOD: 16QAM, CR 1/2 */ {104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0xD0, BRCM_RATE_24M}, /* MCS 28: SS 4, MOD: 16QAM, CR 3/4 */ {156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0xD2, BRCM_RATE_36M}, /* MCS 29: SS 4, MOD: 64QAM, CR 2/3 */ {208000, 432000, CEIL(208000 * 10, 9), CEIL(432000 * 10, 9), 0xD9, BRCM_RATE_48M}, /* MCS 30: SS 4, MOD: 64QAM, CR 3/4 */ {234000, 486000, CEIL(234000 * 10, 9), CEIL(486000 * 10, 9), 0xDA, BRCM_RATE_54M}, /* MCS 31: SS 4, MOD: 64QAM, CR 5/6 */ {260000, 540000, CEIL(260000 * 10, 9), CEIL(540000 * 10, 9), 0xDB, BRCM_RATE_54M}, /* MCS 32: SS 1, MOD: BPSK, CR 1/2 */ {0, 6000, 0, CEIL(6000 * 10, 9), 0x00, BRCM_RATE_6M}, }; /* * phycfg for legacy OFDM frames: code rate, modulation scheme, spatial streams * Number of spatial streams: always 1 other fields: refer to table 78 of * section 17.3.2.2 of the original .11a standard */ struct legacy_phycfg { u32 rate_ofdm; /* ofdm mac rate */ /* phy ctl byte 3, code rate, modulation type, # of streams */ u8 tx_phy_ctl3; }; /* Number of legacy_rate_cfg entries in the table */ #define LEGACY_PHYCFG_TABLE_SIZE 12 /* * In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate * Eventually MIMOPHY would also be converted to this format * 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */ static const struct legacy_phycfg legacy_phycfg_table[LEGACY_PHYCFG_TABLE_SIZE] = { {BRCM_RATE_1M, 0x00}, /* CCK 1Mbps, data rate 0 */ {BRCM_RATE_2M, 0x08}, /* CCK 2Mbps, data rate 1 */ {BRCM_RATE_5M5, 0x10}, /* CCK 5.5Mbps, data rate 2 */ {BRCM_RATE_11M, 0x18}, /* CCK 11Mbps, data rate 3 */ /* OFDM 6Mbps, code rate 1/2, BPSK, 1 spatial stream */ {BRCM_RATE_6M, 0x00}, /* OFDM 9Mbps, code rate 3/4, BPSK, 1 spatial stream */ {BRCM_RATE_9M, 0x02}, /* OFDM 12Mbps, code rate 1/2, QPSK, 1 spatial stream */ {BRCM_RATE_12M, 0x08}, /* OFDM 18Mbps, code rate 3/4, QPSK, 1 spatial stream */ {BRCM_RATE_18M, 0x0A}, /* OFDM 24Mbps, code rate 1/2, 16-QAM, 1 spatial stream */ {BRCM_RATE_24M, 0x10}, /* OFDM 36Mbps, code rate 3/4, 16-QAM, 1 spatial stream */ {BRCM_RATE_36M, 0x12}, /* OFDM 48Mbps, code rate 2/3, 64-QAM, 1 spatial stream */ {BRCM_RATE_48M, 0x19}, /* OFDM 54Mbps, code rate 3/4, 64-QAM, 1 spatial stream */ {BRCM_RATE_54M, 0x1A}, }; /* Hardware rates (also encodes default basic rates) */ const struct brcms_c_rateset cck_ofdm_mimo_rates = { 12, /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48, */ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60, /* 54 Mbps */ 0x6c}, 0x00, { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; const struct brcms_c_rateset ofdm_mimo_rates = { 8, /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c}, 0x00, { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Default ratesets that include MCS32 for 40BW channels */ static const struct brcms_c_rateset cck_ofdm_40bw_mimo_rates = { 12, /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48 */ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60, /* 54 Mbps */ 0x6c}, 0x00, { 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct brcms_c_rateset ofdm_40bw_mimo_rates = { 8, /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c}, 0x00, { 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; const struct brcms_c_rateset cck_ofdm_rates = { 12, /* 1b, 2b, 5.5b, 6, 9, 11b, 12, 18, 24, 36, 48,*/ { 0x82, 0x84, 0x8b, 0x0c, 0x12, 0x96, 0x18, 0x24, 0x30, 0x48, 0x60, /*54 Mbps */ 0x6c}, 0x00, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; const struct brcms_c_rateset gphy_legacy_rates = { 4, /* 1b, 2b, 5.5b, 11b Mbps */ { 0x82, 0x84, 0x8b, 0x96}, 0x00, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; const struct brcms_c_rateset ofdm_rates = { 8, /* 6b, 9, 12b, 18, 24b, 36, 48, 54 Mbps */ { 0x8c, 0x12, 0x98, 0x24, 0xb0, 0x48, 0x60, 0x6c}, 0x00, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; const struct brcms_c_rateset cck_rates = { 4, /* 1b, 2b, 5.5, 11 Mbps */ { 0x82, 0x84, 0x0b, 0x16}, 0x00, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* check if rateset is valid. * if check_brate is true, rateset without a basic rate is considered NOT valid. */ static bool brcms_c_rateset_valid(struct brcms_c_rateset *rs, bool check_brate) { uint idx; if (!rs->count) return false; if (!check_brate) return true; /* error if no basic rates */ for (idx = 0; idx < rs->count; idx++) { if (rs->rates[idx] & BRCMS_RATE_FLAG) return true; } return false; } void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams) { int i; for (i = txstreams; i < MAX_STREAMS_SUPPORTED; i++) rs->mcs[i] = 0; } /* * filter based on hardware rateset, and sort filtered rateset with basic * bit(s) preserved, and check if resulting rateset is valid. */ bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs, const struct brcms_c_rateset *hw_rs, bool check_brate, u8 txstreams) { u8 rateset[BRCM_MAXRATE + 1]; u8 r; uint count; uint i; memset(rateset, 0, sizeof(rateset)); count = rs->count; for (i = 0; i < count; i++) { /* mask off "basic rate" bit, BRCMS_RATE_FLAG */ r = (int)rs->rates[i] & BRCMS_RATE_MASK; if ((r > BRCM_MAXRATE) || (rate_info[r] == 0)) continue; rateset[r] = rs->rates[i]; /* preserve basic bit! */ } /* fill out the rates in order, looking at only supported rates */ count = 0; for (i = 0; i < hw_rs->count; i++) { r = hw_rs->rates[i] & BRCMS_RATE_MASK; if (rateset[r]) rs->rates[count++] = rateset[r]; } rs->count = count; /* only set the mcs rate bit if the equivalent hw mcs bit is set */ for (i = 0; i < MCSSET_LEN; i++) rs->mcs[i] = (rs->mcs[i] & hw_rs->mcs[i]); if (brcms_c_rateset_valid(rs, check_brate)) return true; else return false; } /* calculate the rate of a rx'd frame and return it as a ratespec */ u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp) { int phy_type; u32 rspec = PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT; phy_type = ((rxh->RxChan & RXS_CHAN_PHYTYPE_MASK) >> RXS_CHAN_PHYTYPE_SHIFT); if ((phy_type == PHY_TYPE_N) || (phy_type == PHY_TYPE_SSN) || (phy_type == PHY_TYPE_LCN) || (phy_type == PHY_TYPE_HT)) { switch (rxh->PhyRxStatus_0 & PRXS0_FT_MASK) { case PRXS0_CCK: rspec = cck_phy2mac_rate( ((struct cck_phy_hdr *) plcp)->signal); break; case PRXS0_OFDM: rspec = ofdm_phy2mac_rate( ((struct ofdm_phy_hdr *) plcp)->rlpt[0]); break; case PRXS0_PREN: rspec = (plcp[0] & MIMO_PLCP_MCS_MASK) | RSPEC_MIMORATE; if (plcp[0] & MIMO_PLCP_40MHZ) { /* indicate rspec is for 40 MHz mode */ rspec &= ~RSPEC_BW_MASK; rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT); } break; case PRXS0_STDN: /* fallthru */ default: /* not supported, error condition */ break; } if (plcp3_issgi(plcp[3])) rspec |= RSPEC_SHORT_GI; } else if ((phy_type == PHY_TYPE_A) || (rxh->PhyRxStatus_0 & PRXS0_OFDM)) rspec = ofdm_phy2mac_rate( ((struct ofdm_phy_hdr *) plcp)->rlpt[0]); else rspec = cck_phy2mac_rate( ((struct cck_phy_hdr *) plcp)->signal); return rspec; } /* copy rateset src to dst as-is (no masking or sorting) */ void brcms_c_rateset_copy(const struct brcms_c_rateset *src, struct brcms_c_rateset *dst) { memcpy(dst, src, sizeof(struct brcms_c_rateset)); } /* * Copy and selectively filter one rateset to another. * 'basic_only' means only copy basic rates. * 'rates' indicates cck (11b) and ofdm rates combinations. * - 0: cck and ofdm * - 1: cck only * - 2: ofdm only * 'xmask' is the copy mask (typically 0x7f or 0xff). */ void brcms_c_rateset_filter(struct brcms_c_rateset *src, struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask, bool mcsallow) { uint i; uint r; uint count; count = 0; for (i = 0; i < src->count; i++) { r = src->rates[i]; if (basic_only && !(r & BRCMS_RATE_FLAG)) continue; if (rates == BRCMS_RATES_CCK && is_ofdm_rate((r & BRCMS_RATE_MASK))) continue; if (rates == BRCMS_RATES_OFDM && is_cck_rate((r & BRCMS_RATE_MASK))) continue; dst->rates[count++] = r & xmask; } dst->count = count; dst->htphy_membership = src->htphy_membership; if (mcsallow && rates != BRCMS_RATES_CCK) memcpy(&dst->mcs[0], &src->mcs[0], MCSSET_LEN); else brcms_c_rateset_mcs_clear(dst); } /* select rateset for a given phy_type and bandtype and filter it, sort it * and fill rs_tgt with result */ void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt, const struct brcms_c_rateset *rs_hw, uint phy_type, int bandtype, bool cck_only, uint rate_mask, bool mcsallow, u8 bw, u8 txstreams) { const struct brcms_c_rateset *rs_dflt; struct brcms_c_rateset rs_sel; if ((PHYTYPE_IS(phy_type, PHY_TYPE_HT)) || (PHYTYPE_IS(phy_type, PHY_TYPE_N)) || (PHYTYPE_IS(phy_type, PHY_TYPE_LCN)) || (PHYTYPE_IS(phy_type, PHY_TYPE_SSN))) { if (bandtype == BRCM_BAND_5G) rs_dflt = (bw == BRCMS_20_MHZ ? &ofdm_mimo_rates : &ofdm_40bw_mimo_rates); else rs_dflt = (bw == BRCMS_20_MHZ ? &cck_ofdm_mimo_rates : &cck_ofdm_40bw_mimo_rates); } else if (PHYTYPE_IS(phy_type, PHY_TYPE_LP)) { rs_dflt = (bandtype == BRCM_BAND_5G) ? &ofdm_rates : &cck_ofdm_rates; } else if (PHYTYPE_IS(phy_type, PHY_TYPE_A)) { rs_dflt = &ofdm_rates; } else if (PHYTYPE_IS(phy_type, PHY_TYPE_G)) { rs_dflt = &cck_ofdm_rates; } else { /* should not happen, error condition */ rs_dflt = &cck_rates; /* force cck */ } /* if hw rateset is not supplied, assign selected rateset to it */ if (!rs_hw) rs_hw = rs_dflt; brcms_c_rateset_copy(rs_dflt, &rs_sel); brcms_c_rateset_mcs_upd(&rs_sel, txstreams); brcms_c_rateset_filter(&rs_sel, rs_tgt, false, cck_only ? BRCMS_RATES_CCK : BRCMS_RATES_CCK_OFDM, rate_mask, mcsallow); brcms_c_rate_hwrs_filter_sort_validate(rs_tgt, rs_hw, false, mcsallow ? txstreams : 1); } s16 brcms_c_rate_legacy_phyctl(uint rate) { uint i; for (i = 0; i < LEGACY_PHYCFG_TABLE_SIZE; i++) if (rate == legacy_phycfg_table[i].rate_ofdm) return legacy_phycfg_table[i].tx_phy_ctl3; return -1; } void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset) { uint i; for (i = 0; i < MCSSET_LEN; i++) rateset->mcs[i] = 0; } void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams) { memcpy(&rateset->mcs[0], &cck_ofdm_mimo_rates.mcs[0], MCSSET_LEN); brcms_c_rateset_mcs_upd(rateset, txstreams); } /* Based on bandwidth passed, allow/disallow MCS 32 in the rateset */ void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw) { if (bw == BRCMS_40_MHZ) setbit(rateset->mcs, 32); else clrbit(rateset->mcs, 32); }
gpl-2.0
TenchiMasaki/android_kernel_asus_moorefield
fs/lockd/xdr4.c
12727
7513
/* * linux/fs/lockd/xdr4.c * * XDR support for lockd and the lock client. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR static inline loff_t s64_to_loff_t(__s64 offset) { return (loff_t)offset; } static inline s64 loff_t_to_s64(loff_t offset) { s64 res; if (offset > NLM4_OFFSET_MAX) res = NLM4_OFFSET_MAX; else if (offset < -NLM4_OFFSET_MAX) res = -NLM4_OFFSET_MAX; else res = offset; return res; } /* * XDR functions for basic NLM types */ static __be32 * nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c) { unsigned int len; len = ntohl(*p++); if(len==0) { c->len=4; memset(c->data, 0, 4); /* hockeypux brain damage */ } else if(len<=NLM_MAXCOOKIELEN) { c->len=len; memcpy(c->data, p, len); p+=XDR_QUADLEN(len); } else { dprintk("lockd: bad cookie size %d (only cookies under " "%d bytes are supported.)\n", len, NLM_MAXCOOKIELEN); return NULL; } return p; } static __be32 * nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c) { *p++ = htonl(c->len); memcpy(p, c->data, c->len); p+=XDR_QUADLEN(c->len); return p; } static __be32 * nlm4_decode_fh(__be32 *p, struct nfs_fh *f) { memset(f->data, 0, sizeof(f->data)); f->size = ntohl(*p++); if (f->size > NFS_MAXFHSIZE) { dprintk("lockd: bad fhandle size %d (should be <=%d)\n", f->size, NFS_MAXFHSIZE); return NULL; } memcpy(f->data, p, f->size); return p + XDR_QUADLEN(f->size); } /* * Encode and decode owner handle */ static __be32 * nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh) { return xdr_decode_netobj(p, oh); } static __be32 * nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) { struct file_lock *fl = &lock->fl; __u64 len, start; __s64 end; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return NULL; lock->svid = ntohl(*p++); locks_init_lock(fl); fl->fl_owner = current->files; fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ p = xdr_decode_hyper(p, &start); p = xdr_decode_hyper(p, &len); end = start + len - 1; fl->fl_start = s64_to_loff_t(start); if (len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = s64_to_loff_t(end); return p; } /* * Encode result of a TEST/TEST_MSG call */ static __be32 * nlm4_encode_testres(__be32 *p, struct nlm_res *resp) { s64 start, len; dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp); if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return NULL; *p++ = resp->status; if (resp->status == nlm_lck_denied) { struct file_lock *fl = &resp->lock.fl; *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; *p++ = htonl(resp->lock.svid); /* Encode owner handle. */ if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) return NULL; start = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) len = 0; else len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); p = xdr_encode_hyper(p, start); p = xdr_encode_hyper(p, len); dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n", resp->status, (int)resp->lock.svid, fl->fl_type, (long long)fl->fl_start, (long long)fl->fl_end); } dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp); return p; } /* * First, the server side XDR functions */ int nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_testres(p, resp))) return 0; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; argp->reclaim = ntohl(*p++); argp->state = ntohl(*p++); argp->monitor = 1; /* monitor client by default */ return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = nlm4_decode_lock(p, &argp->lock))) return 0; argp->lock.fl.fl_type = F_UNLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { struct nlm_lock *lock = &argp->lock; memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32) 0; lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return 0; argp->fsm_mode = ntohl(*p++); argp->fsm_access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; *p++ = xdr_zero; /* sequence argument */ return xdr_ressize_check(rqstp, p); } int nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp) { struct nlm_lock *lock = &argp->lock; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp) { if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); memcpy(&argp->priv.data, p, sizeof(argp->priv.data)); p += XDR_QUADLEN(SM_PRIV_SIZE); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_decode_cookie(p, &resp->cookie))) return 0; resp->status = *p++; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); }
gpl-2.0
mpokwsths/hammerhead_kernel
arch/sh/boards/mach-sdk7780/irq.c
13239
1186
/* * linux/arch/sh/boards/renesas/sdk7780/irq.c * * Renesas Technology Europe SDK7780 Support. * * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/sdk7780.h> enum { UNUSED = 0, /* board specific interrupt sources */ SMC91C111, /* Ethernet controller */ }; static struct intc_vect fpga_vectors[] __initdata = { INTC_IRQ(SMC91C111, IRQ_ETHERNET), }; static struct intc_mask_reg fpga_mask_registers[] __initdata = { { 0, FPGA_IRQ0MR, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SMC91C111, 0, 0, 0, 0 } }, }; static DECLARE_INTC_DESC(fpga_intc_desc, "sdk7780-irq", fpga_vectors, NULL, fpga_mask_registers, NULL, NULL); void __init init_sdk7780_IRQ(void) { printk(KERN_INFO "Using SDK7780 interrupt controller.\n"); __raw_writew(0xFFFF, FPGA_IRQ0MR); /* Setup IRL 0-3 */ __raw_writew(0x0003, FPGA_IMSR); plat_irq_setup_pins(IRQ_MODE_IRL3210); register_intc_controller(&fpga_intc_desc); }
gpl-2.0
hexiaolong2008/linux-2.6.32
arch/sh/kernel/vsyscall/vsyscall.c
696
2504
/* * arch/sh/kernel/vsyscall/vsyscall.c * * Copyright (C) 2006 Paul Mundt * * vDSO randomization * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/elf.h> #include <linux/sched.h> #include <linux/err.h> /* * Should the kernel map a VDSO page into processes and pass its * address down to glibc upon exec()? */ unsigned int __read_mostly vdso_enabled = 1; EXPORT_SYMBOL_GPL(vdso_enabled); static int __init vdso_setup(char *s) { vdso_enabled = simple_strtoul(s, NULL, 0); return 1; } __setup("vdso=", vdso_setup); /* * These symbols are defined by vsyscall.o to mark the bounds * of the ELF DSO images included therein. */ extern const char vsyscall_trapa_start, vsyscall_trapa_end; static struct page *syscall_pages[1]; int __init vsyscall_init(void) { void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); syscall_pages[0] = virt_to_page(syscall_page); /* * XXX: Map this page to a fixmap entry if we get around * to adding the page to ELF core dumps */ memcpy(syscall_page, &vsyscall_trapa_start, &vsyscall_trapa_end - &vsyscall_trapa_start); return 0; } /* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_ALWAYSDUMP, syscall_pages); if (unlikely(ret)) goto up_fail; current->mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; return NULL; } struct vm_area_struct *get_gate_vma(struct task_struct *task) { return NULL; } int in_gate_area(struct task_struct *task, unsigned long address) { return 0; } int in_gate_area_no_task(unsigned long address) { return 0; }
gpl-2.0
wangxingchao/spi-omap
drivers/video/omap2/dss/sdi.c
696
3894
/* * linux/drivers/video/omap2/dss/sdi.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "SDI" #include <linux/kernel.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/regulator/consumer.h> #include <video/omapdss.h> #include "dss.h" static struct { bool update_enabled; struct regulator *vdds_sdi_reg; } sdi; static void sdi_basic_init(struct omap_dss_device *dssdev) { dispc_set_parallel_interface_mode(dssdev->manager->id, OMAP_DSS_PARALLELMODE_BYPASS); dispc_set_lcd_display_type(dssdev->manager->id, OMAP_DSS_LCD_DISPLAY_TFT); dispc_set_tft_data_lines(dssdev->manager->id, 24); dispc_lcd_enable_signal_polarity(1); } int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) { struct omap_video_timings *t = &dssdev->panel.timings; struct dss_clock_info dss_cinfo; struct dispc_clock_info dispc_cinfo; u16 lck_div, pck_div; unsigned long fck; unsigned long pck; int r; r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); goto err_start_dev; } r = regulator_enable(sdi.vdds_sdi_reg); if (r) goto err_reg_enable; r = dss_runtime_get(); if (r) goto err_get_dss; r = dispc_runtime_get(); if (r) goto err_get_dispc; sdi_basic_init(dssdev); /* 15.5.9.1.2 */ dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, dssdev->panel.acbi, dssdev->panel.acb); r = dss_calc_clock_div(1, t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) goto err_calc_clock_div; fck = dss_cinfo.fck; lck_div = dispc_cinfo.lck_div; pck_div = dispc_cinfo.pck_div; pck = fck / lck_div / pck_div / 1000; if (pck != t->pixel_clock) { DSSWARN("Could not find exact pixel clock. Requested %d kHz, " "got %lu kHz\n", t->pixel_clock, pck); t->pixel_clock = pck; } dispc_set_lcd_timings(dssdev->manager->id, t); r = dss_set_clock_div(&dss_cinfo); if (r) goto err_set_dss_clock_div; r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) goto err_set_dispc_clock_div; dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); if (r) goto err_sdi_enable; mdelay(2); dssdev->manager->enable(dssdev->manager); return 0; err_sdi_enable: err_set_dispc_clock_div: err_set_dss_clock_div: err_calc_clock_div: dispc_runtime_put(); err_get_dispc: dss_runtime_put(); err_get_dss: regulator_disable(sdi.vdds_sdi_reg); err_reg_enable: omap_dss_stop_device(dssdev); err_start_dev: return r; } EXPORT_SYMBOL(omapdss_sdi_display_enable); void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) { dssdev->manager->disable(dssdev->manager); dss_sdi_disable(); dispc_runtime_put(); dss_runtime_put(); regulator_disable(sdi.vdds_sdi_reg); omap_dss_stop_device(dssdev); } EXPORT_SYMBOL(omapdss_sdi_display_disable); int sdi_init_display(struct omap_dss_device *dssdev) { DSSDBG("SDI init\n"); if (sdi.vdds_sdi_reg == NULL) { struct regulator *vdds_sdi; vdds_sdi = dss_get_vdds_sdi(); if (IS_ERR(vdds_sdi)) { DSSERR("can't get VDDS_SDI regulator\n"); return PTR_ERR(vdds_sdi); } sdi.vdds_sdi_reg = vdds_sdi; } return 0; } int sdi_init(void) { return 0; } void sdi_exit(void) { }
gpl-2.0
dummie999/android_kernel_htc_z4u
sound/usb/misc/ua101.c
952
38417
/* * Edirol UA-101/UA-1000 driver * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * This driver is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "../usbaudio.h" #include "../midi.h" MODULE_DESCRIPTION("Edirol UA-101/1000 driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101},{Edirol,UA-1000}}"); /* * Should not be lower than the minimum scheduling delay of the host * controller. Some Intel controllers need more than one frame; as long as * that driver doesn't tell us about this, use 1.5 frames just to be sure. */ #define MIN_QUEUE_LENGTH 12 /* Somewhat random. */ #define MAX_QUEUE_LENGTH 30 /* * This magic value optimizes memory usage efficiency for the UA-101's packet * sizes at all sample rates, taking into account the stupid cache pool sizes * that usb_alloc_coherent() uses. */ #define DEFAULT_QUEUE_LENGTH 21 #define MAX_PACKET_SIZE 672 /* hardware specific */ #define MAX_MEMORY_BUFFERS DIV_ROUND_UP(MAX_QUEUE_LENGTH, \ PAGE_SIZE / MAX_PACKET_SIZE) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static unsigned int queue_length = 21; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "card index"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "enable card"); module_param(queue_length, uint, 0644); MODULE_PARM_DESC(queue_length, "USB queue length in microframes, " __stringify(MIN_QUEUE_LENGTH)"-"__stringify(MAX_QUEUE_LENGTH)); enum { INTF_PLAYBACK, INTF_CAPTURE, INTF_MIDI, INTF_COUNT }; /* bits in struct ua101::states */ enum { USB_CAPTURE_RUNNING, USB_PLAYBACK_RUNNING, ALSA_CAPTURE_OPEN, ALSA_PLAYBACK_OPEN, ALSA_CAPTURE_RUNNING, ALSA_PLAYBACK_RUNNING, CAPTURE_URB_COMPLETED, PLAYBACK_URB_COMPLETED, DISCONNECTED, }; struct ua101 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf[INTF_COUNT]; int card_index; struct snd_pcm *pcm; struct list_head midi_list; u64 format_bit; unsigned int rate; unsigned int packets_per_second; spinlock_t lock; struct mutex mutex; unsigned long states; /* FIFO to synchronize playback rate to capture rate */ unsigned int rate_feedback_start; unsigned int rate_feedback_count; u8 rate_feedback[MAX_QUEUE_LENGTH]; struct list_head ready_playback_urbs; struct tasklet_struct playback_tasklet; wait_queue_head_t alsa_capture_wait; wait_queue_head_t rate_feedback_wait; wait_queue_head_t alsa_playback_wait; struct ua101_stream { struct snd_pcm_substream *substream; unsigned int usb_pipe; unsigned int channels; unsigned int frame_bytes; unsigned int max_packet_bytes; unsigned int period_pos; unsigned int buffer_pos; unsigned int queue_length; struct ua101_urb { struct urb urb; struct usb_iso_packet_descriptor iso_frame_desc[1]; struct list_head ready_list; } *urbs[MAX_QUEUE_LENGTH]; struct { unsigned int size; void *addr; dma_addr_t dma; } buffers[MAX_MEMORY_BUFFERS]; } capture, playback; }; static DEFINE_MUTEX(devices_mutex); static unsigned int devices_used; static struct usb_driver ua101_driver; static void abort_alsa_playback(struct ua101 *ua); static void abort_alsa_capture(struct ua101 *ua); static const char *usb_error_string(int err) { switch (err) { case -ENODEV: return "no device"; case -ENOENT: return "endpoint not enabled"; case -EPIPE: return "endpoint stalled"; case -ENOSPC: return "not enough bandwidth"; case -ESHUTDOWN: return "device disabled"; case -EHOSTUNREACH: return "device suspended"; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: return "internal error"; default: return "unknown error"; } } static void abort_usb_capture(struct ua101 *ua) { if (test_and_clear_bit(USB_CAPTURE_RUNNING, &ua->states)) { wake_up(&ua->alsa_capture_wait); wake_up(&ua->rate_feedback_wait); } } static void abort_usb_playback(struct ua101 *ua) { if (test_and_clear_bit(USB_PLAYBACK_RUNNING, &ua->states)) wake_up(&ua->alsa_playback_wait); } static void playback_urb_complete(struct urb *usb_urb) { struct ua101_urb *urb = (struct ua101_urb *)usb_urb; struct ua101 *ua = urb->urb.context; unsigned long flags; if (unlikely(urb->urb.status == -ENOENT || /* unlinked */ urb->urb.status == -ENODEV || /* device removed */ urb->urb.status == -ECONNRESET || /* unlinked */ urb->urb.status == -ESHUTDOWN)) { /* device disabled */ abort_usb_playback(ua); abort_alsa_playback(ua); return; } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) { /* append URB to FIFO */ spin_lock_irqsave(&ua->lock, flags); list_add_tail(&urb->ready_list, &ua->ready_playback_urbs); if (ua->rate_feedback_count > 0) tasklet_schedule(&ua->playback_tasklet); ua->playback.substream->runtime->delay -= urb->urb.iso_frame_desc[0].length / ua->playback.frame_bytes; spin_unlock_irqrestore(&ua->lock, flags); } } static void first_playback_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = playback_urb_complete; playback_urb_complete(urb); set_bit(PLAYBACK_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_playback_wait); } /* copy data from the ALSA ring buffer into the URB buffer */ static bool copy_playback_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; const u8 *source; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; source = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(urb->transfer_buffer, source, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(urb->transfer_buffer, source, frames1 * frame_bytes); memcpy(urb->transfer_buffer + frames1 * frame_bytes, runtime->dma_area, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static inline void add_with_wraparound(struct ua101 *ua, unsigned int *value, unsigned int add) { *value += add; if (*value >= ua->playback.queue_length) *value -= ua->playback.queue_length; } static void playback_tasklet(unsigned long data) { struct ua101 *ua = (void *)data; unsigned long flags; unsigned int frames; struct ua101_urb *urb; bool do_period_elapsed = false; int err; if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states))) return; /* * Synchronizing the playback rate to the capture rate is done by using * the same sequence of packet sizes for both streams. * Submitting a playback URB therefore requires both a ready URB and * the size of the corresponding capture packet, i.e., both playback * and capture URBs must have been completed. Since the USB core does * not guarantee that playback and capture complete callbacks are * called alternately, we use two FIFOs for packet sizes and read URBs; * submitting playback URBs is possible as long as both FIFOs are * nonempty. */ spin_lock_irqsave(&ua->lock, flags); while (ua->rate_feedback_count > 0 && !list_empty(&ua->ready_playback_urbs)) { /* take packet size out of FIFO */ frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; /* take URB out of FIFO */ urb = list_first_entry(&ua->ready_playback_urbs, struct ua101_urb, ready_list); list_del(&urb->ready_list); /* fill packet with data or silence */ urb->urb.iso_frame_desc[0].length = frames * ua->playback.frame_bytes; if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) do_period_elapsed |= copy_playback_data(&ua->playback, &urb->urb, frames); else memset(urb->urb.transfer_buffer, 0, urb->urb.iso_frame_desc[0].length); /* and off you go ... */ err = usb_submit_urb(&urb->urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); abort_usb_playback(ua); abort_alsa_playback(ua); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return; } ua->playback.substream->runtime->delay += frames; } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(ua->playback.substream); } /* copy data from the URB buffer into the ALSA ring buffer */ static bool copy_capture_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; u8 *dest; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; dest = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(dest, urb->transfer_buffer, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(dest, urb->transfer_buffer, frames1 * frame_bytes); memcpy(runtime->dma_area, urb->transfer_buffer + frames1 * frame_bytes, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static void capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; struct ua101_stream *stream = &ua->capture; unsigned long flags; unsigned int frames, write_ptr; bool do_period_elapsed; int err; if (unlikely(urb->status == -ENOENT || /* unlinked */ urb->status == -ENODEV || /* device removed */ urb->status == -ECONNRESET || /* unlinked */ urb->status == -ESHUTDOWN)) /* device disabled */ goto stream_stopped; if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0) frames = urb->iso_frame_desc[0].actual_length / stream->frame_bytes; else frames = 0; spin_lock_irqsave(&ua->lock, flags); if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) do_period_elapsed = copy_capture_data(stream, urb, frames); else do_period_elapsed = false; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) { err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); goto stream_stopped; } /* append packet size to FIFO */ write_ptr = ua->rate_feedback_start; add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count); ua->rate_feedback[write_ptr] = frames; if (ua->rate_feedback_count < ua->playback.queue_length) { ua->rate_feedback_count++; if (ua->rate_feedback_count == ua->playback.queue_length) wake_up(&ua->rate_feedback_wait); } else { /* * Ring buffer overflow; this happens when the playback * stream is not running. Throw away the oldest entry, * so that the playback stream, when it starts, sees * the most recent packet sizes. */ add_with_wraparound(ua, &ua->rate_feedback_start, 1); } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) && !list_empty(&ua->ready_playback_urbs)) tasklet_schedule(&ua->playback_tasklet); } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(stream->substream); return; stream_stopped: abort_usb_playback(ua); abort_usb_capture(ua); abort_alsa_playback(ua); abort_alsa_capture(ua); } static void first_capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = capture_urb_complete; capture_urb_complete(urb); set_bit(CAPTURE_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_capture_wait); } static int submit_stream_urbs(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { int err = usb_submit_urb(&stream->urbs[i]->urb, GFP_KERNEL); if (err < 0) { dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void kill_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) if (stream->urbs[i]) usb_kill_urb(&stream->urbs[i]->urb); } static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 1) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 1); if (err < 0) { dev_err(&ua->dev->dev, "cannot initialize interface; error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; if (!ua->intf[intf_index]) return; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 0) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 0); if (err < 0 && !test_bit(DISCONNECTED, &ua->states)) dev_warn(&ua->dev->dev, "interface reset failed; error %d: %s\n", err, usb_error_string(err)); } } static void stop_usb_capture(struct ua101 *ua) { clear_bit(USB_CAPTURE_RUNNING, &ua->states); kill_stream_urbs(&ua->capture); disable_iso_interface(ua, INTF_CAPTURE); } static int start_usb_capture(struct ua101 *ua) { int err; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->capture); err = enable_iso_interface(ua, INTF_CAPTURE); if (err < 0) return err; clear_bit(CAPTURE_URB_COMPLETED, &ua->states); ua->capture.urbs[0]->urb.complete = first_capture_urb_complete; ua->rate_feedback_start = 0; ua->rate_feedback_count = 0; set_bit(USB_CAPTURE_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->capture); if (err < 0) stop_usb_capture(ua); return err; } static void stop_usb_playback(struct ua101 *ua) { clear_bit(USB_PLAYBACK_RUNNING, &ua->states); kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); disable_iso_interface(ua, INTF_PLAYBACK); } static int start_usb_playback(struct ua101 *ua) { unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); tasklet_kill(&ua->playback_tasklet); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err; } static void abort_alsa_capture(struct ua101 *ua) { unsigned long flags; if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) { snd_pcm_stream_lock_irqsave(ua->capture.substream, flags); snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags); } } static void abort_alsa_playback(struct ua101 *ua) { unsigned long flags; if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) { snd_pcm_stream_lock_irqsave(ua->playback.substream, flags); snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags); } } static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream, unsigned int channels) { int err; substream->runtime->hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_FIFO_IN_FRAMES; substream->runtime->hw.formats = ua->format_bit; substream->runtime->hw.rates = snd_pcm_rate_to_rate_bit(ua->rate); substream->runtime->hw.rate_min = ua->rate; substream->runtime->hw.rate_max = ua->rate; substream->runtime->hw.channels_min = channels; substream->runtime->hw.channels_max = channels; substream->runtime->hw.buffer_bytes_max = 45000 * 1024; substream->runtime->hw.period_bytes_min = 1; substream->runtime->hw.period_bytes_max = UINT_MAX; substream->runtime->hw.periods_min = 2; substream->runtime->hw.periods_max = UINT_MAX; err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1500000 / ua->packets_per_second, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); return err; } static int capture_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->capture.substream = substream; err = set_stream_hw(ua, substream, ua->capture.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate, ua->packets_per_second); substream->runtime->delay = substream->runtime->hw.fifo_size; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) set_bit(ALSA_CAPTURE_OPEN, &ua->states); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->playback.substream = substream; err = set_stream_hw(ua, substream, ua->playback.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate * ua->playback.queue_length, ua->packets_per_second); mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err < 0) goto error; err = start_usb_playback(ua); if (err < 0) { if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); goto error; } set_bit(ALSA_PLAYBACK_OPEN, &ua->states); error: mutex_unlock(&ua->mutex); return err; } static int capture_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); clear_bit(ALSA_CAPTURE_OPEN, &ua->states); if (!test_bit(ALSA_PLAYBACK_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int playback_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); stop_usb_playback(ua); clear_bit(ALSA_PLAYBACK_OPEN, &ua->states); if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int capture_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); } static int playback_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); } static int ua101_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_vmalloc_buffer(substream); } static int capture_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* * The EHCI driver schedules the first packet of an iso stream at 10 ms * in the future, i.e., no data is actually captured for that long. * Take the wait here so that the stream is known to be actually * running when the start trigger has been called. */ wait_event(ua->alsa_capture_wait, test_bit(CAPTURE_URB_COMPLETED, &ua->states) || !test_bit(USB_CAPTURE_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; ua->capture.period_pos = 0; ua->capture.buffer_pos = 0; return 0; } static int playback_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* see the comment in capture_pcm_prepare() */ wait_event(ua->alsa_playback_wait, test_bit(PLAYBACK_URB_COMPLETED, &ua->states) || !test_bit(USB_PLAYBACK_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; substream->runtime->delay = 0; ua->playback.period_pos = 0; ua->playback.buffer_pos = 0; return 0; } static int capture_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua, struct ua101_stream *stream) { unsigned long flags; unsigned int pos; spin_lock_irqsave(&ua->lock, flags); pos = stream->buffer_pos; spin_unlock_irqrestore(&ua->lock, flags); return pos; } static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->capture); } static snd_pcm_uframes_t playback_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->playback); } static struct snd_pcm_ops capture_pcm_ops = { .open = capture_pcm_open, .close = capture_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = capture_pcm_hw_params, .hw_free = ua101_pcm_hw_free, .prepare = capture_pcm_prepare, .trigger = capture_pcm_trigger, .pointer = capture_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops playback_pcm_ops = { .open = playback_pcm_open, .close = playback_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = playback_pcm_hw_params, .hw_free = ua101_pcm_hw_free, .prepare = playback_pcm_prepare, .trigger = playback_pcm_trigger, .pointer = playback_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static const struct uac_format_type_i_discrete_descriptor * find_format_descriptor(struct usb_interface *interface) { struct usb_host_interface *alt; u8 *extra; int extralen; if (interface->num_altsetting != 2) { dev_err(&interface->dev, "invalid num_altsetting\n"); return NULL; } alt = &interface->altsetting[0]; if (alt->desc.bNumEndpoints != 0) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } alt = &interface->altsetting[1]; if (alt->desc.bNumEndpoints != 1) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } extra = alt->extra; extralen = alt->extralen; while (extralen >= sizeof(struct usb_descriptor_header)) { struct uac_format_type_i_discrete_descriptor *desc; desc = (struct uac_format_type_i_discrete_descriptor *)extra; if (desc->bLength > extralen) { dev_err(&interface->dev, "descriptor overflow\n"); return NULL; } if (desc->bLength == UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1) && desc->bDescriptorType == USB_DT_CS_INTERFACE && desc->bDescriptorSubtype == UAC_FORMAT_TYPE) { if (desc->bFormatType != UAC_FORMAT_TYPE_I_PCM || desc->bSamFreqType != 1) { dev_err(&interface->dev, "invalid format type\n"); return NULL; } return desc; } extralen -= desc->bLength; extra += desc->bLength; } dev_err(&interface->dev, "sample format descriptor not found\n"); return NULL; } static int detect_usb_format(struct ua101 *ua) { const struct uac_format_type_i_discrete_descriptor *fmt_capture; const struct uac_format_type_i_discrete_descriptor *fmt_playback; const struct usb_endpoint_descriptor *epd; unsigned int rate2; fmt_capture = find_format_descriptor(ua->intf[INTF_CAPTURE]); fmt_playback = find_format_descriptor(ua->intf[INTF_PLAYBACK]); if (!fmt_capture || !fmt_playback) return -ENXIO; switch (fmt_capture->bSubframeSize) { case 3: ua->format_bit = SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: ua->format_bit = SNDRV_PCM_FMTBIT_S32_LE; break; default: dev_err(&ua->dev->dev, "sample width is not 24 or 32 bits\n"); return -ENXIO; } if (fmt_capture->bSubframeSize != fmt_playback->bSubframeSize) { dev_err(&ua->dev->dev, "playback/capture sample widths do not match\n"); return -ENXIO; } if (fmt_capture->bBitResolution != 24 || fmt_playback->bBitResolution != 24) { dev_err(&ua->dev->dev, "sample width is not 24 bits\n"); return -ENXIO; } ua->rate = combine_triple(fmt_capture->tSamFreq[0]); rate2 = combine_triple(fmt_playback->tSamFreq[0]); if (ua->rate != rate2) { dev_err(&ua->dev->dev, "playback/capture rates do not match: %u/%u\n", rate2, ua->rate); return -ENXIO; } switch (ua->dev->speed) { case USB_SPEED_FULL: ua->packets_per_second = 1000; break; case USB_SPEED_HIGH: ua->packets_per_second = 8000; break; default: dev_err(&ua->dev->dev, "unknown device speed\n"); return -ENXIO; } ua->capture.channels = fmt_capture->bNrChannels; ua->playback.channels = fmt_playback->bNrChannels; ua->capture.frame_bytes = fmt_capture->bSubframeSize * ua->capture.channels; ua->playback.frame_bytes = fmt_playback->bSubframeSize * ua->playback.channels; epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_in(epd)) { dev_err(&ua->dev->dev, "invalid capture endpoint\n"); return -ENXIO; } ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, usb_endpoint_num(epd)); ua->capture.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize); epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_out(epd)) { dev_err(&ua->dev->dev, "invalid playback endpoint\n"); return -ENXIO; } ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, usb_endpoint_num(epd)); ua->playback.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize); return 0; } static int alloc_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int remaining_packets, packets, packets_per_page, i; size_t size; stream->queue_length = queue_length; stream->queue_length = max(stream->queue_length, (unsigned int)MIN_QUEUE_LENGTH); stream->queue_length = min(stream->queue_length, (unsigned int)MAX_QUEUE_LENGTH); /* * The cache pool sizes used by usb_alloc_coherent() (128, 512, 2048) are * quite bad when used with the packet sizes of this device (e.g. 280, * 520, 624). Therefore, we allocate and subdivide entire pages, using * a smaller buffer only for the last chunk. */ remaining_packets = stream->queue_length; packets_per_page = PAGE_SIZE / stream->max_packet_bytes; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) { packets = min(remaining_packets, packets_per_page); size = packets * stream->max_packet_bytes; stream->buffers[i].addr = usb_alloc_coherent(ua->dev, size, GFP_KERNEL, &stream->buffers[i].dma); if (!stream->buffers[i].addr) return -ENOMEM; stream->buffers[i].size = size; remaining_packets -= packets; if (!remaining_packets) break; } if (remaining_packets) { dev_err(&ua->dev->dev, "too many packets\n"); return -ENXIO; } return 0; } static void free_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) usb_free_coherent(ua->dev, stream->buffers[i].size, stream->buffers[i].addr, stream->buffers[i].dma); } static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream, void (*urb_complete)(struct urb *)) { unsigned max_packet_size = stream->max_packet_bytes; struct ua101_urb *urb; unsigned int b, u = 0; for (b = 0; b < ARRAY_SIZE(stream->buffers); ++b) { unsigned int size = stream->buffers[b].size; u8 *addr = stream->buffers[b].addr; dma_addr_t dma = stream->buffers[b].dma; while (size >= max_packet_size) { if (u >= stream->queue_length) goto bufsize_error; urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) return -ENOMEM; usb_init_urb(&urb->urb); urb->urb.dev = ua->dev; urb->urb.pipe = stream->usb_pipe; urb->urb.transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->urb.transfer_buffer = addr; urb->urb.transfer_dma = dma; urb->urb.transfer_buffer_length = max_packet_size; urb->urb.number_of_packets = 1; urb->urb.interval = 1; urb->urb.context = ua; urb->urb.complete = urb_complete; urb->urb.iso_frame_desc[0].offset = 0; urb->urb.iso_frame_desc[0].length = max_packet_size; stream->urbs[u++] = urb; size -= max_packet_size; addr += max_packet_size; dma += max_packet_size; } } if (u == stream->queue_length) return 0; bufsize_error: dev_err(&ua->dev->dev, "internal buffer size error\n"); return -ENXIO; } static void free_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { kfree(stream->urbs[i]); stream->urbs[i] = NULL; } } static void free_usb_related_resources(struct ua101 *ua, struct usb_interface *interface) { unsigned int i; struct usb_interface *intf; mutex_lock(&ua->mutex); free_stream_urbs(&ua->capture); free_stream_urbs(&ua->playback); mutex_unlock(&ua->mutex); free_stream_buffers(ua, &ua->capture); free_stream_buffers(ua, &ua->playback); for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) { mutex_lock(&ua->mutex); intf = ua->intf[i]; ua->intf[i] = NULL; mutex_unlock(&ua->mutex); if (intf) { usb_set_intfdata(intf, NULL); if (intf != interface) usb_driver_release_interface(&ua101_driver, intf); } } } static void ua101_card_free(struct snd_card *card) { struct ua101 *ua = card->private_data; mutex_destroy(&ua->mutex); } static int ua101_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { static const struct snd_usb_midi_endpoint_info midi_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk midi_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &midi_ep }; static const int intf_numbers[2][3] = { { /* UA-101 */ [INTF_PLAYBACK] = 0, [INTF_CAPTURE] = 1, [INTF_MIDI] = 2, }, { /* UA-1000 */ [INTF_CAPTURE] = 1, [INTF_PLAYBACK] = 2, [INTF_MIDI] = 3, }, }; struct snd_card *card; struct ua101 *ua; unsigned int card_index, i; int is_ua1000; const char *name; char usb_path[32]; int err; is_ua1000 = usb_id->idProduct == 0x0044; if (interface->altsetting->desc.bInterfaceNumber != intf_numbers[is_ua1000][0]) return -ENODEV; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (enable[card_index] && !(devices_used & (1 << card_index))) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_create(index[card_index], id[card_index], THIS_MODULE, sizeof(*ua), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } card->private_free = ua101_card_free; ua = card->private_data; ua->dev = interface_to_usbdev(interface); ua->card = card; ua->card_index = card_index; INIT_LIST_HEAD(&ua->midi_list); spin_lock_init(&ua->lock); mutex_init(&ua->mutex); INIT_LIST_HEAD(&ua->ready_playback_urbs); tasklet_init(&ua->playback_tasklet, playback_tasklet, (unsigned long)ua); init_waitqueue_head(&ua->alsa_capture_wait); init_waitqueue_head(&ua->rate_feedback_wait); init_waitqueue_head(&ua->alsa_playback_wait); ua->intf[0] = interface; for (i = 1; i < ARRAY_SIZE(ua->intf); ++i) { ua->intf[i] = usb_ifnum_to_if(ua->dev, intf_numbers[is_ua1000][i]); if (!ua->intf[i]) { dev_err(&ua->dev->dev, "interface %u not found\n", intf_numbers[is_ua1000][i]); err = -ENXIO; goto probe_error; } err = usb_driver_claim_interface(&ua101_driver, ua->intf[i], ua); if (err < 0) { ua->intf[i] = NULL; err = -EBUSY; goto probe_error; } } snd_card_set_dev(card, &interface->dev); err = detect_usb_format(ua); if (err < 0) goto probe_error; name = usb_id->idProduct == 0x0044 ? "UA-1000" : "UA-101"; strcpy(card->driver, "UA-101"); strcpy(card->shortname, name); usb_make_path(ua->dev, usb_path, sizeof(usb_path)); snprintf(ua->card->longname, sizeof(ua->card->longname), "EDIROL %s (serial %s), %u Hz at %s, %s speed", name, ua->dev->serial ? ua->dev->serial : "?", ua->rate, usb_path, ua->dev->speed == USB_SPEED_HIGH ? "high" : "full"); err = alloc_stream_buffers(ua, &ua->capture); if (err < 0) goto probe_error; err = alloc_stream_buffers(ua, &ua->playback); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->capture, capture_urb_complete); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->playback, playback_urb_complete); if (err < 0) goto probe_error; err = snd_pcm_new(card, name, 0, 1, 1, &ua->pcm); if (err < 0) goto probe_error; ua->pcm->private_data = ua; strcpy(ua->pcm->name, name); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops); err = snd_usbmidi_create(card, ua->intf[INTF_MIDI], &ua->midi_list, &midi_quirk); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, ua); devices_used |= 1 << card_index; mutex_unlock(&devices_mutex); return 0; probe_error: free_usb_related_resources(ua, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void ua101_disconnect(struct usb_interface *interface) { struct ua101 *ua = usb_get_intfdata(interface); struct list_head *midi; if (!ua) return; mutex_lock(&devices_mutex); set_bit(DISCONNECTED, &ua->states); wake_up(&ua->rate_feedback_wait); /* make sure that userspace cannot create new requests */ snd_card_disconnect(ua->card); /* make sure that there are no pending USB requests */ __list_for_each(midi, &ua->midi_list) snd_usbmidi_disconnect(midi); abort_alsa_playback(ua); abort_alsa_capture(ua); mutex_lock(&ua->mutex); stop_usb_playback(ua); stop_usb_capture(ua); mutex_unlock(&ua->mutex); free_usb_related_resources(ua, interface); devices_used &= ~(1 << ua->card_index); snd_card_free_when_closed(ua->card); mutex_unlock(&devices_mutex); } static struct usb_device_id ua101_ids[] = { { USB_DEVICE(0x0582, 0x0044) }, /* UA-1000 high speed */ { USB_DEVICE(0x0582, 0x007d) }, /* UA-101 high speed */ { USB_DEVICE(0x0582, 0x008d) }, /* UA-101 full speed */ { } }; MODULE_DEVICE_TABLE(usb, ua101_ids); static struct usb_driver ua101_driver = { .name = "snd-ua101", .id_table = ua101_ids, .probe = ua101_probe, .disconnect = ua101_disconnect, #if 0 .suspend = ua101_suspend, .resume = ua101_resume, #endif }; module_usb_driver(ua101_driver);
gpl-2.0
TeamExodus/kernel_yu_tomato
drivers/tty/serial/sunsab.c
1208
29895
/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net) * * Rewrote buffer handling to use CIRC(Circular Buffer) macros. * Maxim Krasnyanskiy <maxk@qualcomm.com> * * Fixed to use tty_get_baud_rate, and to allow for arbitrary baud * rates to be programmed into the UART. Also eliminated a lot of * duplicated code in the console setup. * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12 * * Ported to new 2.5.x UART layer. * David S. Miller <davem@davemloft.net> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/circ_buf.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/setup.h> #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #include <linux/sunserialcore.h> #include "sunsab.h" struct uart_sunsab_port { struct uart_port port; /* Generic UART port */ union sab82532_async_regs __iomem *regs; /* Chip registers */ unsigned long irqflags; /* IRQ state flags */ int dsr; /* Current DSR state */ unsigned int cec_timeout; /* Chip poll timeout... */ unsigned int tec_timeout; /* likewise */ unsigned char interrupt_mask0;/* ISR0 masking */ unsigned char interrupt_mask1;/* ISR1 masking */ unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */ unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */ unsigned int gis_shift; int type; /* SAB82532 version */ /* Setting configuration bits while the transmitter is active * can cause garbage characters to get emitted by the chip. * Therefore, we cache such writes here and do the real register * write the next time the transmitter becomes idle. */ unsigned int cached_ebrg; unsigned char cached_mode; unsigned char cached_pvr; unsigned char cached_dafo; }; /* * This assumes you have a 29.4912 MHz clock for your UART. */ #define SAB_BASE_BAUD ( 29491200 / 16 ) static char *sab82532_version[16] = { "V1.0", "V2.0", "V3.2", "V(0x03)", "V(0x04)", "V(0x05)", "V(0x06)", "V(0x07)", "V(0x08)", "V(0x09)", "V(0x0a)", "V(0x0b)", "V(0x0c)", "V(0x0d)", "V(0x0e)", "V(0x0f)" }; #define SAB82532_MAX_TEC_TIMEOUT 200000 /* 1 character time (at 50 baud) */ #define SAB82532_MAX_CEC_TIMEOUT 50000 /* 2.5 TX CLKs (at 50 baud) */ #define SAB82532_RECV_FIFO_SIZE 32 /* Standard async fifo sizes */ #define SAB82532_XMIT_FIFO_SIZE 32 static __inline__ void sunsab_tec_wait(struct uart_sunsab_port *up) { int timeout = up->tec_timeout; while ((readb(&up->regs->r.star) & SAB82532_STAR_TEC) && --timeout) udelay(1); } static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up) { int timeout = up->cec_timeout; while ((readb(&up->regs->r.star) & SAB82532_STAR_CEC) && --timeout) udelay(1); } static struct tty_port * receive_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { struct tty_port *port = NULL; unsigned char buf[32]; int saw_console_brk = 0; int free_fifo = 0; int count = 0; int i; if (up->port.state != NULL) /* Unopened serial console */ port = &up->port.state->port; /* Read number of BYTES (Character + Status) available. */ if (stat->sreg.isr0 & SAB82532_ISR0_RPF) { count = SAB82532_RECV_FIFO_SIZE; free_fifo++; } if (stat->sreg.isr0 & SAB82532_ISR0_TCD) { count = readb(&up->regs->r.rbcl) & (SAB82532_RECV_FIFO_SIZE - 1); free_fifo++; } /* Issue a FIFO read command in case we where idle. */ if (stat->sreg.isr0 & SAB82532_ISR0_TIME) { sunsab_cec_wait(up); writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr); return port; } if (stat->sreg.isr0 & SAB82532_ISR0_RFO) free_fifo++; /* Read the FIFO. */ for (i = 0; i < count; i++) buf[i] = readb(&up->regs->r.rfifo[i]); /* Issue Receive Message Complete command. */ if (free_fifo) { sunsab_cec_wait(up); writeb(SAB82532_CMDR_RMC, &up->regs->w.cmdr); } /* Count may be zero for BRK, so we check for it here */ if ((stat->sreg.isr1 & SAB82532_ISR1_BRK) && (up->port.line == up->port.cons->index)) saw_console_brk = 1; for (i = 0; i < count; i++) { unsigned char ch = buf[i], flag; flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(stat->sreg.isr0 & (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR | SAB82532_ISR0_RFO)) || unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { /* * For statistics only */ if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) continue; } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) up->port.icount.parity++; else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) up->port.icount.frame++; if (stat->sreg.isr0 & SAB82532_ISR0_RFO) up->port.icount.overrun++; /* * Mask off conditions which should be ingored. */ stat->sreg.isr0 &= (up->port.read_status_mask & 0xff); stat->sreg.isr1 &= ((up->port.read_status_mask >> 8) & 0xff); if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { flag = TTY_BREAK; } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) flag = TTY_PARITY; else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch) || !port) continue; if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 && (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0) tty_insert_flip_char(port, ch, flag); if (stat->sreg.isr0 & SAB82532_ISR0_RFO) tty_insert_flip_char(port, 0, TTY_OVERRUN); } if (saw_console_brk) sun_do_break(); return port; } static void sunsab_stop_tx(struct uart_port *); static void sunsab_tx_idle(struct uart_sunsab_port *); static void transmit_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { struct circ_buf *xmit = &up->port.state->xmit; int i; if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) { up->interrupt_mask1 |= SAB82532_IMR1_ALLS; writeb(up->interrupt_mask1, &up->regs->w.imr1); set_bit(SAB82532_ALLS, &up->irqflags); } #if 0 /* bde@nwlink.com says this check causes problems */ if (!(stat->sreg.isr1 & SAB82532_ISR1_XPR)) return; #endif if (!(readb(&up->regs->r.star) & SAB82532_STAR_XFW)) return; set_bit(SAB82532_XPR, &up->irqflags); sunsab_tx_idle(up); if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); return; } up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); clear_bit(SAB82532_ALLS, &up->irqflags); /* Stuff 32 bytes into Transmit FIFO. */ clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } /* Issue a Transmit Frame command. */ sunsab_cec_wait(up); writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) sunsab_stop_tx(&up->port); } static void check_status(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { if (stat->sreg.isr0 & SAB82532_ISR0_CDSC) uart_handle_dcd_change(&up->port, !(readb(&up->regs->r.vstr) & SAB82532_VSTR_CD)); if (stat->sreg.isr1 & SAB82532_ISR1_CSC) uart_handle_cts_change(&up->port, (readb(&up->regs->r.star) & SAB82532_STAR_CTS)); if ((readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ^ up->dsr) { up->dsr = (readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ? 0 : 1; up->port.icount.dsr++; } wake_up_interruptible(&up->port.state->port.delta_msr_wait); } static irqreturn_t sunsab_interrupt(int irq, void *dev_id) { struct uart_sunsab_port *up = dev_id; struct tty_port *port = NULL; union sab82532_irq_status status; unsigned long flags; unsigned char gis; spin_lock_irqsave(&up->port.lock, flags); status.stat = 0; gis = readb(&up->regs->r.gis) >> up->gis_shift; if (gis & 1) status.sreg.isr0 = readb(&up->regs->r.isr0); if (gis & 2) status.sreg.isr1 = readb(&up->regs->r.isr1); if (status.stat) { if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) || (status.sreg.isr1 & SAB82532_ISR1_BRK)) port = receive_chars(up, &status); if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) || (status.sreg.isr1 & SAB82532_ISR1_CSC)) check_status(up, &status); if (status.sreg.isr1 & (SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR)) transmit_chars(up, &status); } spin_unlock_irqrestore(&up->port.lock, flags); if (port) tty_flip_buffer_push(port); return IRQ_HANDLED; } /* port->lock is not held. */ static unsigned int sunsab_tx_empty(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; int ret; /* Do not need a lock for a state test like this. */ if (test_bit(SAB82532_ALLS, &up->irqflags)) ret = TIOCSER_TEMT; else ret = 0; return ret; } /* port->lock held by caller. */ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; if (mctrl & TIOCM_RTS) { up->cached_mode &= ~SAB82532_MODE_FRTS; up->cached_mode |= SAB82532_MODE_RTS; } else { up->cached_mode |= (SAB82532_MODE_FRTS | SAB82532_MODE_RTS); } if (mctrl & TIOCM_DTR) { up->cached_pvr &= ~(up->pvr_dtr_bit); } else { up->cached_pvr |= up->pvr_dtr_bit; } set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); } /* port->lock is held by caller and interrupts are disabled. */ static unsigned int sunsab_get_mctrl(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned char val; unsigned int result; result = 0; val = readb(&up->regs->r.pvr); result |= (val & up->pvr_dsr_bit) ? 0 : TIOCM_DSR; val = readb(&up->regs->r.vstr); result |= (val & SAB82532_VSTR_CD) ? 0 : TIOCM_CAR; val = readb(&up->regs->r.star); result |= (val & SAB82532_STAR_CTS) ? TIOCM_CTS : 0; return result; } /* port->lock held by caller. */ static void sunsab_stop_tx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); } /* port->lock held by caller. */ static void sunsab_tx_idle(struct uart_sunsab_port *up) { if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) { u8 tmp; clear_bit(SAB82532_REGS_PENDING, &up->irqflags); writeb(up->cached_mode, &up->regs->rw.mode); writeb(up->cached_pvr, &up->regs->rw.pvr); writeb(up->cached_dafo, &up->regs->w.dafo); writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr); tmp = readb(&up->regs->rw.ccr2); tmp &= ~0xc0; tmp |= (up->cached_ebrg >> 2) & 0xc0; writeb(tmp, &up->regs->rw.ccr2); } } /* port->lock held by caller. */ static void sunsab_start_tx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; struct circ_buf *xmit = &up->port.state->xmit; int i; up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); if (!test_bit(SAB82532_XPR, &up->irqflags)) return; clear_bit(SAB82532_ALLS, &up->irqflags); clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } /* Issue a Transmit Frame command. */ sunsab_cec_wait(up); writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); } /* port->lock is not held. */ static void sunsab_send_xchar(struct uart_port *port, char ch) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); sunsab_tec_wait(up); writeb(ch, &up->regs->w.tic); spin_unlock_irqrestore(&up->port.lock, flags); } /* port->lock held by caller. */ static void sunsab_stop_rx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; up->interrupt_mask0 |= SAB82532_IMR0_TCD; writeb(up->interrupt_mask1, &up->regs->w.imr0); } /* port->lock held by caller. */ static void sunsab_enable_ms(struct uart_port *port) { /* For now we always receive these interrupts. */ } /* port->lock is not held. */ static void sunsab_break_ctl(struct uart_port *port, int break_state) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned char val; spin_lock_irqsave(&up->port.lock, flags); val = up->cached_dafo; if (break_state) val |= SAB82532_DAFO_XBRK; else val &= ~SAB82532_DAFO_XBRK; up->cached_dafo = val; set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); spin_unlock_irqrestore(&up->port.lock, flags); } /* port->lock is not held. */ static int sunsab_startup(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned char tmp; int err = request_irq(up->port.irq, sunsab_interrupt, IRQF_SHARED, "sab", up); if (err) return err; spin_lock_irqsave(&up->port.lock, flags); /* * Wait for any commands or immediate characters */ sunsab_cec_wait(up); sunsab_tec_wait(up); /* * Clear the FIFO buffers. */ writeb(SAB82532_CMDR_RRES, &up->regs->w.cmdr); sunsab_cec_wait(up); writeb(SAB82532_CMDR_XRES, &up->regs->w.cmdr); /* * Clear the interrupt registers. */ (void) readb(&up->regs->r.isr0); (void) readb(&up->regs->r.isr1); /* * Now, initialize the UART */ writeb(0, &up->regs->w.ccr0); /* power-down */ writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ | SAB82532_CCR0_SM_ASYNC, &up->regs->w.ccr0); writeb(SAB82532_CCR1_ODS | SAB82532_CCR1_BCR | 7, &up->regs->w.ccr1); writeb(SAB82532_CCR2_BDF | SAB82532_CCR2_SSEL | SAB82532_CCR2_TOE, &up->regs->w.ccr2); writeb(0, &up->regs->w.ccr3); writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4); up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS | SAB82532_MODE_RAC); writeb(up->cached_mode, &up->regs->w.mode); writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc); tmp = readb(&up->regs->rw.ccr0); tmp |= SAB82532_CCR0_PU; /* power-up */ writeb(tmp, &up->regs->rw.ccr0); /* * Finally, enable interrupts */ up->interrupt_mask0 = (SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | SAB82532_IMR0_PLLA); writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = (SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | SAB82532_IMR1_CSC | SAB82532_IMR1_XON | SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); set_bit(SAB82532_ALLS, &up->irqflags); set_bit(SAB82532_XPR, &up->irqflags); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } /* port->lock is not held. */ static void sunsab_shutdown(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); /* Disable Interrupts */ up->interrupt_mask0 = 0xff; writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = 0xff; writeb(up->interrupt_mask1, &up->regs->w.imr1); /* Disable break condition */ up->cached_dafo = readb(&up->regs->rw.dafo); up->cached_dafo &= ~SAB82532_DAFO_XBRK; writeb(up->cached_dafo, &up->regs->rw.dafo); /* Disable Receiver */ up->cached_mode &= ~SAB82532_MODE_RAC; writeb(up->cached_mode, &up->regs->rw.mode); /* * XXX FIXME * * If the chip is powered down here the system hangs/crashes during * reboot or shutdown. This needs to be investigated further, * similar behaviour occurs in 2.4 when the driver is configured * as a module only. One hint may be that data is sometimes * transmitted at 9600 baud during shutdown (regardless of the * speed the chip was configured for when the port was open). */ #if 0 /* Power Down */ tmp = readb(&up->regs->rw.ccr0); tmp &= ~SAB82532_CCR0_PU; writeb(tmp, &up->regs->rw.ccr0); #endif spin_unlock_irqrestore(&up->port.lock, flags); free_irq(up->port.irq, up); } /* * This is used to figure out the divisor speeds. * * The formula is: Baud = SAB_BASE_BAUD / ((N + 1) * (1 << M)), * * with 0 <= N < 64 and 0 <= M < 16 */ static void calc_ebrg(int baud, int *n_ret, int *m_ret) { int n, m; if (baud == 0) { *n_ret = 0; *m_ret = 0; return; } /* * We scale numbers by 10 so that we get better accuracy * without having to use floating point. Here we increment m * until n is within the valid range. */ n = (SAB_BASE_BAUD * 10) / baud; m = 0; while (n >= 640) { n = n / 2; m++; } n = (n+5) / 10; /* * We try very hard to avoid speeds with M == 0 since they may * not work correctly for XTAL frequences above 10 MHz. */ if ((m == 0) && ((n & 1) == 0)) { n = n / 2; m++; } *n_ret = n - 1; *m_ret = m; } /* Internal routine, port->lock is held and local interrupts are disabled. */ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cflag, unsigned int iflag, unsigned int baud, unsigned int quot) { unsigned char dafo; int bits, n, m; /* Byte size and parity */ switch (cflag & CSIZE) { case CS5: dafo = SAB82532_DAFO_CHL5; bits = 7; break; case CS6: dafo = SAB82532_DAFO_CHL6; bits = 8; break; case CS7: dafo = SAB82532_DAFO_CHL7; bits = 9; break; case CS8: dafo = SAB82532_DAFO_CHL8; bits = 10; break; /* Never happens, but GCC is too dumb to figure it out */ default: dafo = SAB82532_DAFO_CHL5; bits = 7; break; } if (cflag & CSTOPB) { dafo |= SAB82532_DAFO_STOP; bits++; } if (cflag & PARENB) { dafo |= SAB82532_DAFO_PARE; bits++; } if (cflag & PARODD) { dafo |= SAB82532_DAFO_PAR_ODD; } else { dafo |= SAB82532_DAFO_PAR_EVEN; } up->cached_dafo = dafo; calc_ebrg(baud, &n, &m); up->cached_ebrg = n | (m << 6); up->tec_timeout = (10 * 1000000) / baud; up->cec_timeout = up->tec_timeout >> 2; /* CTS flow control flags */ /* We encode read_status_mask and ignore_status_mask like so: * * --------------------- * | ... | ISR1 | ISR0 | * --------------------- * .. 15 8 7 0 */ up->port.read_status_mask = (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | SAB82532_ISR0_RFO | SAB82532_ISR0_RPF | SAB82532_ISR0_CDSC); up->port.read_status_mask |= (SAB82532_ISR1_CSC | SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR) << 8; if (iflag & INPCK) up->port.read_status_mask |= (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); if (iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8); /* * Characteres to ignore */ up->port.ignore_status_mask = 0; if (iflag & IGNPAR) up->port.ignore_status_mask |= (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); if (iflag & IGNBRK) { up->port.ignore_status_mask |= (SAB82532_ISR1_BRK << 8); /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (iflag & IGNPAR) up->port.ignore_status_mask |= SAB82532_ISR0_RFO; } /* * ignore all characters if CREAD is not set */ if ((cflag & CREAD) == 0) up->port.ignore_status_mask |= (SAB82532_ISR0_RPF | SAB82532_ISR0_TCD); uart_update_timeout(&up->port, cflag, (up->port.uartclk / (16 * quot))); /* Now schedule a register update when the chip's * transmitter is idle. */ up->cached_mode |= SAB82532_MODE_RAC; set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); } /* port->lock is not held. */ static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); unsigned int quot = uart_get_divisor(port, baud); spin_lock_irqsave(&up->port.lock, flags); sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot); spin_unlock_irqrestore(&up->port.lock, flags); } static const char *sunsab_type(struct uart_port *port) { struct uart_sunsab_port *up = (void *)port; static char buf[36]; sprintf(buf, "SAB82532 %s", sab82532_version[up->type]); return buf; } static void sunsab_release_port(struct uart_port *port) { } static int sunsab_request_port(struct uart_port *port) { return 0; } static void sunsab_config_port(struct uart_port *port, int flags) { } static int sunsab_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } static struct uart_ops sunsab_pops = { .tx_empty = sunsab_tx_empty, .set_mctrl = sunsab_set_mctrl, .get_mctrl = sunsab_get_mctrl, .stop_tx = sunsab_stop_tx, .start_tx = sunsab_start_tx, .send_xchar = sunsab_send_xchar, .stop_rx = sunsab_stop_rx, .enable_ms = sunsab_enable_ms, .break_ctl = sunsab_break_ctl, .startup = sunsab_startup, .shutdown = sunsab_shutdown, .set_termios = sunsab_set_termios, .type = sunsab_type, .release_port = sunsab_release_port, .request_port = sunsab_request_port, .config_port = sunsab_config_port, .verify_port = sunsab_verify_port, }; static struct uart_driver sunsab_reg = { .owner = THIS_MODULE, .driver_name = "sunsab", .dev_name = "ttyS", .major = TTY_MAJOR, }; static struct uart_sunsab_port *sunsab_ports; #ifdef CONFIG_SERIAL_SUNSAB_CONSOLE static void sunsab_console_putchar(struct uart_port *port, int c) { struct uart_sunsab_port *up = (struct uart_sunsab_port *)port; sunsab_tec_wait(up); writeb(c, &up->regs->w.tic); } static void sunsab_console_write(struct console *con, const char *s, unsigned n) { struct uart_sunsab_port *up = &sunsab_ports[con->index]; unsigned long flags; int locked = 1; local_irq_save(flags); if (up->port.sysrq) { locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&up->port.lock); } else spin_lock(&up->port.lock); uart_console_write(&up->port, s, n, sunsab_console_putchar); sunsab_tec_wait(up); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static int sunsab_console_setup(struct console *con, char *options) { struct uart_sunsab_port *up = &sunsab_ports[con->index]; unsigned long flags; unsigned int baud, quot; /* * The console framework calls us for each and every port * registered. Defer the console setup until the requested * port has been properly discovered. A bit of a hack, * though... */ if (up->port.type != PORT_SUNSAB) return -1; printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); sunserial_console_termios(con, up->port.dev->of_node); switch (con->cflag & CBAUD) { case B150: baud = 150; break; case B300: baud = 300; break; case B600: baud = 600; break; case B1200: baud = 1200; break; case B2400: baud = 2400; break; case B4800: baud = 4800; break; default: case B9600: baud = 9600; break; case B19200: baud = 19200; break; case B38400: baud = 38400; break; case B57600: baud = 57600; break; case B115200: baud = 115200; break; case B230400: baud = 230400; break; case B460800: baud = 460800; break; }; /* * Temporary fix. */ spin_lock_init(&up->port.lock); /* * Initialize the hardware */ sunsab_startup(&up->port); spin_lock_irqsave(&up->port.lock, flags); /* * Finally, enable interrupts */ up->interrupt_mask0 = SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | SAB82532_IMR0_PLLA | SAB82532_IMR0_CDSC; writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | SAB82532_IMR1_CSC | SAB82532_IMR1_XON | SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); quot = uart_get_divisor(&up->port, baud); sunsab_convert_to_sab(up, con->cflag, 0, baud, quot); sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } static struct console sunsab_console = { .name = "ttyS", .write = sunsab_console_write, .device = uart_console_device, .setup = sunsab_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sunsab_reg, }; static inline struct console *SUNSAB_CONSOLE(void) { return &sunsab_console; } #else #define SUNSAB_CONSOLE() (NULL) #define sunsab_console_init() do { } while (0) #endif static int sunsab_init_one(struct uart_sunsab_port *up, struct platform_device *op, unsigned long offset, int line) { up->port.line = line; up->port.dev = &op->dev; up->port.mapbase = op->resource[0].start + offset; up->port.membase = of_ioremap(&op->resource[0], offset, sizeof(union sab82532_async_regs), "sab"); if (!up->port.membase) return -ENOMEM; up->regs = (union sab82532_async_regs __iomem *) up->port.membase; up->port.irq = op->archdata.irqs[0]; up->port.fifosize = SAB82532_XMIT_FIFO_SIZE; up->port.iotype = UPIO_MEM; writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc); up->port.ops = &sunsab_pops; up->port.type = PORT_SUNSAB; up->port.uartclk = SAB_BASE_BAUD; up->type = readb(&up->regs->r.vstr) & 0x0f; writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr); writeb(0xff, &up->regs->w.pim); if ((up->port.line & 0x1) == 0) { up->pvr_dsr_bit = (1 << 0); up->pvr_dtr_bit = (1 << 1); up->gis_shift = 2; } else { up->pvr_dsr_bit = (1 << 3); up->pvr_dtr_bit = (1 << 2); up->gis_shift = 0; } up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4); writeb(up->cached_pvr, &up->regs->w.pvr); up->cached_mode = readb(&up->regs->rw.mode); up->cached_mode |= SAB82532_MODE_FRTS; writeb(up->cached_mode, &up->regs->rw.mode); up->cached_mode |= SAB82532_MODE_RTS; writeb(up->cached_mode, &up->regs->rw.mode); up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT; up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT; return 0; } static int sab_probe(struct platform_device *op) { static int inst; struct uart_sunsab_port *up; int err; up = &sunsab_ports[inst * 2]; err = sunsab_init_one(&up[0], op, 0, (inst * 2) + 0); if (err) goto out; err = sunsab_init_one(&up[1], op, sizeof(union sab82532_async_regs), (inst * 2) + 1); if (err) goto out1; sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[0].port.line, false); sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[1].port.line, false); err = uart_add_one_port(&sunsab_reg, &up[0].port); if (err) goto out2; err = uart_add_one_port(&sunsab_reg, &up[1].port); if (err) goto out3; dev_set_drvdata(&op->dev, &up[0]); inst++; return 0; out3: uart_remove_one_port(&sunsab_reg, &up[0].port); out2: of_iounmap(&op->resource[0], up[1].port.membase, sizeof(union sab82532_async_regs)); out1: of_iounmap(&op->resource[0], up[0].port.membase, sizeof(union sab82532_async_regs)); out: return err; } static int sab_remove(struct platform_device *op) { struct uart_sunsab_port *up = dev_get_drvdata(&op->dev); uart_remove_one_port(&sunsab_reg, &up[1].port); uart_remove_one_port(&sunsab_reg, &up[0].port); of_iounmap(&op->resource[0], up[1].port.membase, sizeof(union sab82532_async_regs)); of_iounmap(&op->resource[0], up[0].port.membase, sizeof(union sab82532_async_regs)); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id sab_match[] = { { .name = "se", }, { .name = "serial", .compatible = "sab82532", }, {}, }; MODULE_DEVICE_TABLE(of, sab_match); static struct platform_driver sab_driver = { .driver = { .name = "sab", .owner = THIS_MODULE, .of_match_table = sab_match, }, .probe = sab_probe, .remove = sab_remove, }; static int __init sunsab_init(void) { struct device_node *dp; int err; int num_channels = 0; for_each_node_by_name(dp, "se") num_channels += 2; for_each_node_by_name(dp, "serial") { if (of_device_is_compatible(dp, "sab82532")) num_channels += 2; } if (num_channels) { sunsab_ports = kzalloc(sizeof(struct uart_sunsab_port) * num_channels, GFP_KERNEL); if (!sunsab_ports) return -ENOMEM; err = sunserial_register_minors(&sunsab_reg, num_channels); if (err) { kfree(sunsab_ports); sunsab_ports = NULL; return err; } } return platform_driver_register(&sab_driver); } static void __exit sunsab_exit(void) { platform_driver_unregister(&sab_driver); if (sunsab_reg.nr) { sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr); } kfree(sunsab_ports); sunsab_ports = NULL; } module_init(sunsab_init); module_exit(sunsab_exit); MODULE_AUTHOR("Eddie C. Dost and David S. Miller"); MODULE_DESCRIPTION("Sun SAB82532 serial port driver"); MODULE_LICENSE("GPL");
gpl-2.0
zhangshenglin/linux-2.6.32.2
drivers/media/video/saa7164/saa7164-i2c.c
1464
3665
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/io.h> #include "saa7164.h" static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct saa7164_i2c *bus = i2c_adap->algo_data; struct saa7164_dev *dev = bus->dev; int i, retval = 0; dprintk(DBGLVL_I2C, "%s(num = %d)\n", __func__, num); for (i = 0 ; i < num; i++) { dprintk(DBGLVL_I2C, "%s(num = %d) addr = 0x%02x len = 0x%x\n", __func__, num, msgs[i].addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* Unsupported - Yet*/ printk(KERN_ERR "%s() Unsupported - Yet\n", __func__); continue; } else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* write then read from same address */ retval = saa7164_api_i2c_read(bus, msgs[i].addr, msgs[i].len, msgs[i].buf, msgs[i+1].len, msgs[i+1].buf ); i++; if (retval < 0) goto err; } else { /* write */ retval = saa7164_api_i2c_write(bus, msgs[i].addr, msgs[i].len, msgs[i].buf); } if (retval < 0) goto err; } return num; err: return retval; } void saa7164_call_i2c_clients(struct saa7164_i2c *bus, unsigned int cmd, void *arg) { if (bus->i2c_rc != 0) return; i2c_clients_command(&bus->i2c_adap, cmd, arg); } static u32 saa7164_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C; } static struct i2c_algorithm saa7164_i2c_algo_template = { .master_xfer = i2c_xfer, .functionality = saa7164_functionality, }; /* ----------------------------------------------------------------------- */ static struct i2c_adapter saa7164_i2c_adap_template = { .name = "saa7164", .owner = THIS_MODULE, .algo = &saa7164_i2c_algo_template, }; static struct i2c_client saa7164_i2c_client_template = { .name = "saa7164 internal", }; int saa7164_i2c_register(struct saa7164_i2c *bus) { struct saa7164_dev *dev = bus->dev; dprintk(DBGLVL_I2C, "%s(bus = %d)\n", __func__, bus->nr); memcpy(&bus->i2c_adap, &saa7164_i2c_adap_template, sizeof(bus->i2c_adap)); memcpy(&bus->i2c_algo, &saa7164_i2c_algo_template, sizeof(bus->i2c_algo)); memcpy(&bus->i2c_client, &saa7164_i2c_client_template, sizeof(bus->i2c_client)); bus->i2c_adap.dev.parent = &dev->pci->dev; strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name)); bus->i2c_algo.data = bus; bus->i2c_adap.algo_data = bus; i2c_set_adapdata(&bus->i2c_adap, bus); i2c_add_adapter(&bus->i2c_adap); bus->i2c_client.adapter = &bus->i2c_adap; if (0 != bus->i2c_rc) printk(KERN_ERR "%s: i2c bus %d register FAILED\n", dev->name, bus->nr); return bus->i2c_rc; } int saa7164_i2c_unregister(struct saa7164_i2c *bus) { i2c_del_adapter(&bus->i2c_adap); return 0; }
gpl-2.0
zanezam/boeffla-kernel-oos-bacon
kernel/audit_tree.c
3512
22232
#include "audit.h" #include <linux/fsnotify_backend.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/kthread.h> #include <linux/slab.h> struct audit_tree; struct audit_chunk; struct audit_tree { atomic_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct rcu_head head; char pathname[]; }; struct audit_chunk { struct list_head hash; struct fsnotify_mark mark; struct list_head trees; /* with root here */ int dead; int count; atomic_long_t refs; struct rcu_head head; struct node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ } owners[]; }; static LIST_HEAD(tree_list); static LIST_HEAD(prune_list); /* * One struct chunk is attached to each inode of interest. * We replace struct chunk on tagging/untagging. * Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount * of watch contributes 1 to .refs). * * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some. */ static struct fsnotify_group *audit_tree_group; static struct audit_tree *alloc_tree(const char *s) { struct audit_tree *tree; tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); if (tree) { atomic_set(&tree->count, 1); tree->goner = 0; INIT_LIST_HEAD(&tree->chunks); INIT_LIST_HEAD(&tree->rules); INIT_LIST_HEAD(&tree->list); INIT_LIST_HEAD(&tree->same_root); tree->root = NULL; strcpy(tree->pathname, s); } return tree; } static inline void get_tree(struct audit_tree *tree) { atomic_inc(&tree->count); } static inline void put_tree(struct audit_tree *tree) { if (atomic_dec_and_test(&tree->count)) kfree_rcu(tree, head); } /* to avoid bringing the entire thing in audit.h */ const char *audit_tree_path(struct audit_tree *tree) { return tree->pathname; } static void free_chunk(struct audit_chunk *chunk) { int i; for (i = 0; i < chunk->count; i++) { if (chunk->owners[i].owner) put_tree(chunk->owners[i].owner); } kfree(chunk); } void audit_put_chunk(struct audit_chunk *chunk) { if (atomic_long_dec_and_test(&chunk->refs)) free_chunk(chunk); } static void __put_chunk(struct rcu_head *rcu) { struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); audit_put_chunk(chunk); } static void audit_tree_destroy_watch(struct fsnotify_mark *entry) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); call_rcu(&chunk->head, __put_chunk); } static struct audit_chunk *alloc_chunk(int count) { struct audit_chunk *chunk; size_t size; int i; size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); chunk = kzalloc(size, GFP_KERNEL); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->hash); INIT_LIST_HEAD(&chunk->trees); chunk->count = count; atomic_long_set(&chunk->refs, 1); for (i = 0; i < count; i++) { INIT_LIST_HEAD(&chunk->owners[i].list); chunk->owners[i].index = i; } fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); return chunk; } enum {HASH_SIZE = 128}; static struct list_head chunk_hash_heads[HASH_SIZE]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); static inline struct list_head *chunk_hash(const struct inode *inode) { unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; return chunk_hash_heads + n % HASH_SIZE; } /* hash_lock & entry->lock is held by caller */ static void insert_hash(struct audit_chunk *chunk) { struct fsnotify_mark *entry = &chunk->mark; struct list_head *list; if (!entry->i.inode) return; list = chunk_hash(entry->i.inode); list_add_rcu(&chunk->hash, list); } /* called under rcu_read_lock */ struct audit_chunk *audit_tree_lookup(const struct inode *inode) { struct list_head *list = chunk_hash(inode); struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { /* mark.inode may have gone NULL, but who cares? */ if (p->mark.i.inode == inode) { atomic_long_inc(&p->refs); return p; } } return NULL; } int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) { int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) return 1; return 0; } /* tagging and untagging inodes with trees */ static struct audit_chunk *find_chunk(struct node *p) { int index = p->index & ~(1U<<31); p -= index; return container_of(p, struct audit_chunk, owners[0]); } static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); struct fsnotify_mark *entry = &chunk->mark; struct audit_chunk *new = NULL; struct audit_tree *owner; int size = chunk->count - 1; int i, j; fsnotify_get_mark(entry); spin_unlock(&hash_lock); if (size) new = alloc_chunk(size); spin_lock(&entry->lock); if (chunk->dead || !entry->i.inode) { spin_unlock(&entry->lock); if (new) free_chunk(new); goto out; } owner = p->owner; if (!size) { chunk->dead = 1; spin_lock(&hash_lock); list_del_init(&chunk->trees); if (owner->root == chunk) owner->root = NULL; list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; } if (!new) goto Fallback; fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { free_chunk(new); goto Fallback; } chunk->dead = 1; spin_lock(&hash_lock); list_replace_init(&chunk->trees, &new->trees); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } for (i = j = 0; j <= size; i++, j++) { struct audit_tree *s; if (&chunk->owners[j] == p) { list_del_init(&p->list); i--; continue; } s = chunk->owners[j].owner; new->owners[i].owner = s; new->owners[i].index = chunk->owners[j].index - j + i; if (!s) /* result of earlier fallback */ continue; get_tree(s); list_replace_init(&chunk->owners[j].list, &new->owners[i].list); } list_replace_rcu(&chunk->hash, &new->hash); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; Fallback: // do the best we can spin_lock(&hash_lock); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); spin_unlock(&entry->lock); out: fsnotify_put_mark(entry); spin_lock(&hash_lock); } static int create_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *entry; struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; entry = &chunk->mark; if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { free_chunk(chunk); return -ENOSPC; } spin_lock(&entry->lock); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); return 0; } chunk->owners[0].index = (1U << 31); chunk->owners[0].owner = tree; get_tree(tree); list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } insert_hash(chunk); spin_unlock(&hash_lock); spin_unlock(&entry->lock); return 0; } /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *old_entry, *chunk_entry; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; int n; old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); if (!old_entry) return create_chunk(inode, tree); old = container_of(old_entry, struct audit_chunk, mark); /* are we already there? */ spin_lock(&hash_lock); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); fsnotify_put_mark(old_entry); return 0; } } spin_unlock(&hash_lock); chunk = alloc_chunk(old->count + 1); if (!chunk) { fsnotify_put_mark(old_entry); return -ENOMEM; } chunk_entry = &chunk->mark; spin_lock(&old_entry->lock); if (!old_entry->i.inode) { /* old_entry is being shot, lets just lie */ spin_unlock(&old_entry->lock); fsnotify_put_mark(old_entry); free_chunk(chunk); return -ENOENT; } fsnotify_duplicate_mark(chunk_entry, old_entry); if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); free_chunk(chunk); fsnotify_put_mark(old_entry); return -ENOSPC; } /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ spin_lock(&chunk_entry->lock); spin_lock(&hash_lock); /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(chunk_entry); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return 0; } list_replace_init(&old->trees, &chunk->trees); for (n = 0, p = chunk->owners; n < old->count; n++, p++) { struct audit_tree *s = old->owners[n].owner; p->owner = s; p->index = old->owners[n].index; if (!s) /* result of fallback in untag */ continue; get_tree(s); list_replace_init(&old->owners[n].list, &p->list); } p->index = (chunk->count - 1) | (1U<<31); p->owner = tree; get_tree(tree); list_add(&p->list, &tree->chunks); list_replace_rcu(&old->hash, &chunk->hash); list_for_each_entry(owner, &chunk->trees, same_root) owner->root = chunk; old->dead = 1; if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(old_entry); fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ fsnotify_put_mark(old_entry); /* and kill it */ return 0; } static void kill_rules(struct audit_tree *tree) { struct audit_krule *rule, *next; struct audit_entry *entry; struct audit_buffer *ab; list_for_each_entry_safe(rule, next, &tree->rules, rlist) { entry = container_of(rule, struct audit_entry, rule); list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_format(ab, "op="); audit_log_string(ab, "remove rule"); audit_log_format(ab, " dir="); audit_log_untrustedstring(ab, rule->tree->pathname); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); rule->tree = NULL; list_del_rcu(&entry->list); list_del(&entry->rule.list); call_rcu(&entry->rcu, audit_free_rule_rcu); } } } /* * finish killing struct audit_tree */ static void prune_one(struct audit_tree *victim) { spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct node *p; p = list_entry(victim->chunks.next, struct node, list); untag_chunk(p); } spin_unlock(&hash_lock); put_tree(victim); } /* trim the uncommitted chunks from tree */ static void trim_marked(struct audit_tree *tree) { struct list_head *p, *q; spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); return; } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { struct node *node = list_entry(p, struct node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); list_add(p, &tree->chunks); } } while (!list_empty(&tree->chunks)) { struct node *node; node = list_entry(tree->chunks.next, struct node, list); /* have we run out of marked? */ if (!(node->index & (1U<<31))) break; untag_chunk(node); } if (!tree->root && !tree->goner) { tree->goner = 1; spin_unlock(&hash_lock); mutex_lock(&audit_filter_mutex); kill_rules(tree); list_del_init(&tree->list); mutex_unlock(&audit_filter_mutex); prune_one(tree); } else { spin_unlock(&hash_lock); } } static void audit_schedule_prune(void); /* called with audit_filter_mutex */ int audit_remove_tree_rule(struct audit_krule *rule) { struct audit_tree *tree; tree = rule->tree; if (tree) { spin_lock(&hash_lock); list_del_init(&rule->rlist); if (list_empty(&tree->rules) && !tree->goner) { tree->root = NULL; list_del_init(&tree->same_root); tree->goner = 1; list_move(&tree->list, &prune_list); rule->tree = NULL; spin_unlock(&hash_lock); audit_schedule_prune(); return 1; } rule->tree = NULL; spin_unlock(&hash_lock); return 1; } return 0; } static int compare_root(struct vfsmount *mnt, void *arg) { return mnt->mnt_root->d_inode == arg; } void audit_trim_trees(void) { struct list_head cursor; mutex_lock(&audit_filter_mutex); list_add(&cursor, &tree_list); while (cursor.next != &tree_list) { struct audit_tree *tree; struct path path; struct vfsmount *root_mnt; struct node *node; int err; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto skip_it; root_mnt = collect_mounts(&path); path_put(&path); if (!root_mnt) goto skip_it; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { struct audit_chunk *chunk = find_chunk(node); /* this could be NULL if the watch is dying else where... */ struct inode *inode = chunk->mark.i.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) node->index &= ~(1U<<31); } spin_unlock(&hash_lock); trim_marked(tree); put_tree(tree); drop_collected_mounts(root_mnt); skip_it: mutex_lock(&audit_filter_mutex); } list_del(&cursor); mutex_unlock(&audit_filter_mutex); } int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) { if (pathname[0] != '/' || rule->listnr != AUDIT_FILTER_EXIT || op != Audit_equal || rule->inode_f || rule->watch || rule->tree) return -EINVAL; rule->tree = alloc_tree(pathname); if (!rule->tree) return -ENOMEM; return 0; } void audit_put_tree(struct audit_tree *tree) { put_tree(tree); } static int tag_mount(struct vfsmount *mnt, void *arg) { return tag_chunk(mnt->mnt_root->d_inode, arg); } /* called with audit_filter_mutex */ int audit_add_tree_rule(struct audit_krule *rule) { struct audit_tree *seed = rule->tree, *tree; struct path path; struct vfsmount *mnt; int err; list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); rule->tree = tree; list_add(&rule->rlist, &tree->rules); return 0; } } tree = seed; list_add(&tree->list, &tree_list); list_add(&rule->rlist, &tree->rules); /* do not set rule->tree yet */ mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto Err; mnt = collect_mounts(&path); path_put(&path); if (!mnt) { err = -ENOMEM; goto Err; } get_tree(tree); err = iterate_mounts(tag_mount, tree, mnt); drop_collected_mounts(mnt); if (!err) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); goto Err; } mutex_lock(&audit_filter_mutex); if (list_empty(&rule->rlist)) { put_tree(tree); return -ENOENT; } rule->tree = tree; put_tree(tree); return 0; Err: mutex_lock(&audit_filter_mutex); list_del_init(&tree->list); list_del_init(&tree->rules); put_tree(tree); return err; } int audit_tag_tree(char *old, char *new) { struct list_head cursor, barrier; int failed = 0; struct path path1, path2; struct vfsmount *tagged; int err; err = kern_path(new, 0, &path2); if (err) return err; tagged = collect_mounts(&path2); path_put(&path2); if (!tagged) return -ENOMEM; err = kern_path(old, 0, &path1); if (err) { drop_collected_mounts(tagged); return err; } mutex_lock(&audit_filter_mutex); list_add(&barrier, &tree_list); list_add(&cursor, &barrier); while (cursor.next != &tree_list) { struct audit_tree *tree; int good_one = 0; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path2); if (!err) { good_one = path_is_under(&path1, &path2); path_put(&path2); } if (!good_one) { put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } failed = iterate_mounts(tag_mount, tree, tagged); if (failed) { put_tree(tree); mutex_lock(&audit_filter_mutex); break; } mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { list_del(&tree->list); list_add(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); } while (barrier.prev != &tree_list) { struct audit_tree *tree; tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); list_del(&tree->list); list_add(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); } put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&barrier); list_del(&cursor); mutex_unlock(&audit_filter_mutex); path_put(&path1); drop_collected_mounts(tagged); return failed; } /* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread. */ static int prune_tree_thread(void *unused) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { struct audit_tree *victim; victim = list_entry(prune_list.next, struct audit_tree, list); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); return 0; } static void audit_schedule_prune(void) { kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); } /* * ... and that one is done if evict_chunk() decides to delay until the end * of syscall. Runs synchronously. */ void audit_kill_trees(struct list_head *list) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(list)) { struct audit_tree *victim; victim = list_entry(list->next, struct audit_tree, list); kill_rules(victim); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); } /* * Here comes the stuff asynchronous to auditctl operations */ static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; struct list_head *postponed = audit_killed_trees(); int need_prune = 0; int n; if (chunk->dead) return; chunk->dead = 1; mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); while (!list_empty(&chunk->trees)) { owner = list_entry(chunk->trees.next, struct audit_tree, same_root); owner->goner = 1; owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); if (!postponed) { kill_rules(owner); list_move(&owner->list, &prune_list); need_prune = 1; } else { list_move(&owner->list, postponed); } spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); if (need_prune) audit_schedule_prune(); mutex_unlock(&audit_filter_mutex); } static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmonut_mark, struct fsnotify_event *event) { BUG(); return -EOPNOTSUPP; } static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); evict_chunk(chunk); fsnotify_put_mark(entry); } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { return false; } static const struct fsnotify_ops audit_tree_ops = { .handle_event = audit_tree_handle_event, .should_send_event = audit_tree_send_event, .free_group_priv = NULL, .free_event_priv = NULL, .freeing_mark = audit_tree_freeing_mark, }; static int __init audit_tree_init(void) { int i; audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); return 0; } __initcall(audit_tree_init);
gpl-2.0
LeMaker/linux-sunxi
drivers/net/ethernet/amd/pcnet32.c
4536
82553
/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ /* * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. * * Copyright 1993 United States Government as represented by the * Director, National Security Agency. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver is for PCnet32 and PCnetPCI based ethercards */ /************************************************************************** * 23 Oct, 2000. * Fixed a few bugs, related to running the controller in 32bit mode. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. * *************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "pcnet32" #define DRV_VERSION "1.35" #define DRV_RELDATE "21.Apr.2008" #define PFX DRV_NAME ": " static const char *const version = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/dma.h> #include <asm/irq.h> /* * PCI device identifiers for "new style" Linux PCI Device Drivers */ static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, /* * Adapters that were sold with IBM's RS/6000 or pSeries hardware have * the incorrect vendor id. */ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); static int cards_found; /* * VLB I/O addresses */ static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug; static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ static int pcnet32vlb; /* check for VLB cards ? */ static struct net_device *pcnet32_dev; static int max_interrupt_work = 2; static int rx_copybreak = 200; #define PCNET32_PORT_AUI 0x00 #define PCNET32_PORT_10BT 0x01 #define PCNET32_PORT_GPSI 0x02 #define PCNET32_PORT_MII 0x03 #define PCNET32_PORT_PORTSEL 0x03 #define PCNET32_PORT_ASEL 0x04 #define PCNET32_PORT_100 0x40 #define PCNET32_PORT_FD 0x80 #define PCNET32_DMA_MASK 0xffffffff #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) #define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) /* * table to translate option values from tulip * to internal options */ static const unsigned char options_mapping[] = { PCNET32_PORT_ASEL, /* 0 Auto-select */ PCNET32_PORT_AUI, /* 1 BNC/AUI */ PCNET32_PORT_AUI, /* 2 AUI/BNC */ PCNET32_PORT_ASEL, /* 3 not supported */ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ PCNET32_PORT_ASEL, /* 5 not supported */ PCNET32_PORT_ASEL, /* 6 not supported */ PCNET32_PORT_ASEL, /* 7 not supported */ PCNET32_PORT_ASEL, /* 8 not supported */ PCNET32_PORT_MII, /* 9 MII 10baseT */ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ PCNET32_PORT_MII, /* 11 MII (autosel) */ PCNET32_PORT_10BT, /* 12 10BaseT */ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ /* 14 MII 100BaseTx-FD */ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, PCNET32_PORT_ASEL /* 15 not supported */ }; static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { "Loopback test (offline)" }; #define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) #define PCNET32_NUM_REGS 136 #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS]; static int full_duplex[MAX_UNITS]; static int homepna[MAX_UNITS]; /* * Theory of Operation * * This driver uses the same software structure as the normal lance * driver. So look for a verbose description in lance.c. The differences * to the normal lance driver is the use of the 32bit mode of PCnet32 * and PCnetPCI chips. Because these chips are 32bit chips, there is no * 16MB limitation and we don't need bounce buffers. */ /* * Set the number of Tx and Rx buffers, using Log_2(# buffers). * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */ #ifndef PCNET32_LOG_TX_BUFFERS #define PCNET32_LOG_TX_BUFFERS 4 #define PCNET32_LOG_RX_BUFFERS 5 #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ #define PCNET32_LOG_MAX_RX_BUFFERS 9 #endif #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) #define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) #define PKT_BUF_SKB 1544 /* actual buffer length after being aligned */ #define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) /* chip wants twos complement of the (aligned) buffer length */ #define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) /* Offsets from base I/O address. */ #define PCNET32_WIO_RDP 0x10 #define PCNET32_WIO_RAP 0x12 #define PCNET32_WIO_RESET 0x14 #define PCNET32_WIO_BDP 0x16 #define PCNET32_DWIO_RDP 0x10 #define PCNET32_DWIO_RAP 0x14 #define PCNET32_DWIO_RESET 0x18 #define PCNET32_DWIO_BDP 0x1C #define PCNET32_TOTAL_SIZE 0x20 #define CSR0 0 #define CSR0_INIT 0x1 #define CSR0_START 0x2 #define CSR0_STOP 0x4 #define CSR0_TXPOLL 0x8 #define CSR0_INTEN 0x40 #define CSR0_IDON 0x0100 #define CSR0_NORMAL (CSR0_START | CSR0_INTEN) #define PCNET32_INIT_LOW 1 #define PCNET32_INIT_HIGH 2 #define CSR3 3 #define CSR4 4 #define CSR5 5 #define CSR5_SUSPEND 0x0001 #define CSR15 15 #define PCNET32_MC_FILTER 8 #define PCNET32_79C970A 0x2621 /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { __le32 base; __le16 buf_length; /* two`s complement of length */ __le16 status; __le32 msg_length; __le32 reserved; }; struct pcnet32_tx_head { __le32 base; __le16 length; /* two`s complement of length */ __le16 status; __le32 misc; __le32 reserved; }; /* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block { __le16 mode; __le16 tlen_rlen; u8 phys_addr[6]; __le16 reserved; __le32 filter[2]; /* Receive and transmit ring base, along with extra bits. */ __le32 rx_ring; __le32 tx_ring; }; /* PCnet32 access functions */ struct pcnet32_access { u16 (*read_csr) (unsigned long, int); void (*write_csr) (unsigned long, int, u16); u16 (*read_bcr) (unsigned long, int); void (*write_bcr) (unsigned long, int, u16); u16 (*read_rap) (unsigned long); void (*write_rap) (unsigned long, u16); void (*reset) (unsigned long); }; /* * The first field of pcnet32_private is read by the ethernet device * so the structure should be allocated using pci_alloc_consistent(). */ struct pcnet32_private { struct pcnet32_init_block *init_block; /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ struct pcnet32_rx_head *rx_ring; struct pcnet32_tx_head *tx_ring; dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, returned by pci_alloc_consistent */ struct pci_dev *pci_dev; const char *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ struct sk_buff **tx_skbuff; struct sk_buff **rx_skbuff; dma_addr_t *tx_dma_addr; dma_addr_t *rx_dma_addr; const struct pcnet32_access *a; spinlock_t lock; /* Guard lock */ unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int rx_ring_size; /* current rx ring size */ unsigned int tx_ring_size; /* current tx ring size */ unsigned int rx_mod_mask; /* rx ring modular mask */ unsigned int tx_mod_mask; /* tx ring modular mask */ unsigned short rx_len_bits; unsigned short tx_len_bits; dma_addr_t rx_ring_dma_addr; dma_addr_t tx_ring_dma_addr; unsigned int dirty_rx, /* ring entries to be freed. */ dirty_tx; struct net_device *dev; struct napi_struct napi; char tx_full; char phycount; /* number of phys found */ int options; unsigned int shared_irq:1, /* shared irq possible */ dxsuflo:1, /* disable transmit stop on uflo */ mii:1; /* mii port available */ struct net_device *next; struct mii_if_info mii_if; struct timer_list watchdog_timer; u32 msg_enable; /* debug message level */ /* each bit indicates an available PHY */ u32 phymask; unsigned short chip_version; /* which variant this is */ /* saved registers during ethtool blink */ u16 save_regs[4]; }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); static int pcnet32_probe1(unsigned long, int, struct pci_dev *); static int pcnet32_open(struct net_device *); static int pcnet32_init_ring(struct net_device *); static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, struct net_device *); static void pcnet32_tx_timeout(struct net_device *dev); static irqreturn_t pcnet32_interrupt(int, void *); static int pcnet32_close(struct net_device *); static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); static void pcnet32_set_multicast_list(struct net_device *); static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); static void pcnet32_watchdog(struct net_device *); static int mdio_read(struct net_device *dev, int phy_id, int reg_num); static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); static void pcnet32_ethtool_test(struct net_device *dev, struct ethtool_test *eth_test, u64 * data); static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); static int pcnet32_get_regs_len(struct net_device *dev); static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr); static void pcnet32_purge_tx_ring(struct net_device *dev); static int pcnet32_alloc_ring(struct net_device *dev, const char *name); static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_check_media(struct net_device *dev, int verbose); static u16 pcnet32_wio_read_csr(unsigned long addr, int index) { outw(index, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_RDP); } static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) { outw(index, addr + PCNET32_WIO_RAP); outw(val, addr + PCNET32_WIO_RDP); } static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) { outw(index, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_BDP); } static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) { outw(index, addr + PCNET32_WIO_RAP); outw(val, addr + PCNET32_WIO_BDP); } static u16 pcnet32_wio_read_rap(unsigned long addr) { return inw(addr + PCNET32_WIO_RAP); } static void pcnet32_wio_write_rap(unsigned long addr, u16 val) { outw(val, addr + PCNET32_WIO_RAP); } static void pcnet32_wio_reset(unsigned long addr) { inw(addr + PCNET32_WIO_RESET); } static int pcnet32_wio_check(unsigned long addr) { outw(88, addr + PCNET32_WIO_RAP); return inw(addr + PCNET32_WIO_RAP) == 88; } static const struct pcnet32_access pcnet32_wio = { .read_csr = pcnet32_wio_read_csr, .write_csr = pcnet32_wio_write_csr, .read_bcr = pcnet32_wio_read_bcr, .write_bcr = pcnet32_wio_write_bcr, .read_rap = pcnet32_wio_read_rap, .write_rap = pcnet32_wio_write_rap, .reset = pcnet32_wio_reset }; static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) { outl(index, addr + PCNET32_DWIO_RAP); return inl(addr + PCNET32_DWIO_RDP) & 0xffff; } static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) { outl(index, addr + PCNET32_DWIO_RAP); outl(val, addr + PCNET32_DWIO_RDP); } static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) { outl(index, addr + PCNET32_DWIO_RAP); return inl(addr + PCNET32_DWIO_BDP) & 0xffff; } static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) { outl(index, addr + PCNET32_DWIO_RAP); outl(val, addr + PCNET32_DWIO_BDP); } static u16 pcnet32_dwio_read_rap(unsigned long addr) { return inl(addr + PCNET32_DWIO_RAP) & 0xffff; } static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) { outl(val, addr + PCNET32_DWIO_RAP); } static void pcnet32_dwio_reset(unsigned long addr) { inl(addr + PCNET32_DWIO_RESET); } static int pcnet32_dwio_check(unsigned long addr) { outl(88, addr + PCNET32_DWIO_RAP); return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; } static const struct pcnet32_access pcnet32_dwio = { .read_csr = pcnet32_dwio_read_csr, .write_csr = pcnet32_dwio_write_csr, .read_bcr = pcnet32_dwio_read_bcr, .write_bcr = pcnet32_dwio_write_bcr, .read_rap = pcnet32_dwio_read_rap, .write_rap = pcnet32_dwio_write_rap, .reset = pcnet32_dwio_reset }; static void pcnet32_netif_stop(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); dev->trans_start = jiffies; /* prevent tx timeout */ napi_disable(&lp->napi); netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); ulong ioaddr = dev->base_addr; u16 val; netif_wake_queue(dev); val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); napi_enable(&lp->napi); } /* * Allocate space for the new sized tx ring. * Free old resources * Save new resources. * Any failure keeps old resources. * Must be called with lp->lock held. */ static void pcnet32_realloc_tx_ring(struct net_device *dev, struct pcnet32_private *lp, unsigned int size) { dma_addr_t new_ring_dma_addr; dma_addr_t *new_dma_addr_list; struct pcnet32_tx_head *new_tx_ring; struct sk_buff **new_skb_list; pcnet32_purge_tx_ring(dev); new_tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * (1 << size), &new_ring_dma_addr); if (new_tx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_tx_ring; } new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_lists; } kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring_size = (1 << size); lp->tx_mod_mask = lp->tx_ring_size - 1; lp->tx_len_bits = (size << 12); lp->tx_ring = new_tx_ring; lp->tx_ring_dma_addr = new_ring_dma_addr; lp->tx_dma_addr = new_dma_addr_list; lp->tx_skbuff = new_skb_list; return; free_new_lists: kfree(new_dma_addr_list); free_new_tx_ring: pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * (1 << size), new_tx_ring, new_ring_dma_addr); } /* * Allocate space for the new sized rx ring. * Re-use old receive buffers. * alloc extra buffers * free unneeded buffers * free unneeded buffers * Save new resources. * Any failure keeps old resources. * Must be called with lp->lock held. */ static void pcnet32_realloc_rx_ring(struct net_device *dev, struct pcnet32_private *lp, unsigned int size) { dma_addr_t new_ring_dma_addr; dma_addr_t *new_dma_addr_list; struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; new_rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * (1 << size), &new_ring_dma_addr); if (new_rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_rx_ring; } new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) { netif_err(lp, drv, dev, "Memory allocation failed\n"); goto free_new_lists; } /* first copy the current receive buffers */ overlap = min(size, lp->rx_ring_size); for (new = 0; new < overlap; new++) { new_rx_ring[new] = lp->rx_ring[new]; new_dma_addr_list[new] = lp->rx_dma_addr[new]; new_skb_list[new] = lp->rx_skbuff[new]; } /* now allocate any new buffers needed */ for (; new < size; new++) { struct sk_buff *rx_skbuff; new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = new_skb_list[new]; if (!rx_skbuff) { /* keep the original lists and buffers */ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", __func__); goto free_all_new; } skb_reserve(rx_skbuff, NET_IP_ALIGN); new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].status = cpu_to_le16(0x8000); } /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } kfree(lp->rx_skbuff); kfree(lp->rx_dma_addr); pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring_size = (1 << size); lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_len_bits = (size << 4); lp->rx_ring = new_rx_ring; lp->rx_ring_dma_addr = new_ring_dma_addr; lp->rx_dma_addr = new_dma_addr_list; lp->rx_skbuff = new_skb_list; return; free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(new_skb_list[new]); } } kfree(new_skb_list); free_new_lists: kfree(new_dma_addr_list); free_new_rx_ring: pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * (1 << size), new_rx_ring, new_ring_dma_addr); } static void pcnet32_purge_rx_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; /* free all allocated skbuffs */ for (i = 0; i < lp->rx_ring_size; i++) { lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; lp->rx_dma_addr[i] = 0; } } #ifdef CONFIG_NET_POLL_CONTROLLER static void pcnet32_poll_controller(struct net_device *dev) { disable_irq(dev->irq); pcnet32_interrupt(0, dev); enable_irq(dev->irq); } #endif static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); mii_ethtool_gset(&lp->mii_if, cmd); spin_unlock_irqrestore(&lp->lock, flags); r = 0; } return r; } static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); r = mii_ethtool_sset(&lp->mii_if, cmd); spin_unlock_irqrestore(&lp->lock, flags); } return r; } static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct pcnet32_private *lp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (lp->pci_dev) strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); else snprintf(info->bus_info, sizeof(info->bus_info), "VLB 0x%lx", dev->base_addr); } static u32 pcnet32_get_link(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r; spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } else { /* can not detect link on really old chips */ r = 1; } spin_unlock_irqrestore(&lp->lock, flags); return r; } static u32 pcnet32_get_msglevel(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); return lp->msg_enable; } static void pcnet32_set_msglevel(struct net_device *dev, u32 value) { struct pcnet32_private *lp = netdev_priv(dev); lp->msg_enable = value; } static int pcnet32_nway_reset(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; int r = -EOPNOTSUPP; if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); r = mii_nway_restart(&lp->mii_if); spin_unlock_irqrestore(&lp->lock, flags); } return r; } static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct pcnet32_private *lp = netdev_priv(dev); ering->tx_max_pending = TX_MAX_RING_SIZE; ering->tx_pending = lp->tx_ring_size; ering->rx_max_pending = RX_MAX_RING_SIZE; ering->rx_pending = lp->rx_ring_size; } static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; unsigned int size; ulong ioaddr = dev->base_addr; int i; if (ering->rx_mini_pending || ering->rx_jumbo_pending) return -EINVAL; if (netif_running(dev)) pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); /* set the minimum ring size to 4, to allow the loopback test to work * unchanged. */ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { if (size <= (1 << i)) break; } if ((1 << i) != lp->tx_ring_size) pcnet32_realloc_tx_ring(dev, lp, i); size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { if (size <= (1 << i)) break; } if ((1 << i) != lp->rx_ring_size) pcnet32_realloc_rx_ring(dev, lp, i); lp->napi.weight = lp->rx_ring_size / 2; if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); } spin_unlock_irqrestore(&lp->lock, flags); netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); return 0; } static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); } static int pcnet32_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: return PCNET32_TEST_LEN; default: return -EOPNOTSUPP; } } static void pcnet32_ethtool_test(struct net_device *dev, struct ethtool_test *test, u64 * data) { struct pcnet32_private *lp = netdev_priv(dev); int rc; if (test->flags == ETH_TEST_FL_OFFLINE) { rc = pcnet32_loopback_test(dev, data); if (rc) { netif_printk(lp, hw, KERN_DEBUG, dev, "Loopback test failed\n"); test->flags |= ETH_TEST_FL_FAILED; } else netif_printk(lp, hw, KERN_DEBUG, dev, "Loopback test passed\n"); } else netif_printk(lp, hw, KERN_DEBUG, dev, "No tests to run (specify 'Offline' on ethtool)\n"); } /* end pcnet32_ethtool_test */ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) { struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; /* access to registers */ ulong ioaddr = dev->base_addr; /* card base I/O address */ struct sk_buff *skb; /* sk buff */ int x, i; /* counters */ int numbuffs = 4; /* number of TX/RX buffers and descs */ u16 status = 0x8300; /* TX ring status */ __le16 teststatus; /* test of ring status */ int rc; /* return code */ int size; /* size of packets */ unsigned char *packet; /* source packet data */ static const int data_len = 60; /* length of source packets */ unsigned long flags; unsigned long ticks; rc = 1; /* default to fail */ if (netif_running(dev)) pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); /* Reset the PCNET32 */ lp->a->reset(ioaddr); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ /* switch pcnet32 to 32bit mode */ lp->a->write_bcr(ioaddr, 20, 2); /* purge & init rings but don't actually restart */ pcnet32_restart(dev, 0x0000); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ /* Initialize Transmit buffers. */ size = data_len + 15; for (x = 0; x < numbuffs; x++) { skb = netdev_alloc_skb(dev, size); if (!skb) { netif_printk(lp, hw, KERN_DEBUG, dev, "Cannot allocate skb at line: %d!\n", __LINE__); goto clean_up; } packet = skb->data; skb_put(skb, size); /* create space for data */ lp->tx_skbuff[x] = skb; lp->tx_ring[x].length = cpu_to_le16(-skb->len); lp->tx_ring[x].misc = 0; /* put DA and SA into the skb */ for (i = 0; i < 6; i++) *packet++ = dev->dev_addr[i]; for (i = 0; i < 6; i++) *packet++ = dev->dev_addr[i]; /* type */ *packet++ = 0x08; *packet++ = 0x06; /* packet number */ *packet++ = x; /* fill packet with data */ for (i = 0; i < data_len; i++) *packet++ = i; lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[x].status = cpu_to_le16(status); } x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ a->write_bcr(ioaddr, 32, x | 0x0002); /* set int loopback in CSR15 */ x = a->read_csr(ioaddr, CSR15) & 0xfffc; lp->a->write_csr(ioaddr, CSR15, x | 0x0044); teststatus = cpu_to_le16(0x8000); lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ /* Check status of descriptors */ for (x = 0; x < numbuffs; x++) { ticks = 0; rmb(); while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { spin_unlock_irqrestore(&lp->lock, flags); msleep(1); spin_lock_irqsave(&lp->lock, flags); rmb(); ticks++; } if (ticks == 200) { netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); break; } } lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ wmb(); if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); for (x = 0; x < numbuffs; x++) { netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); skb = lp->rx_skbuff[x]; for (i = 0; i < size; i++) pr_cont(" %02x", *(skb->data + i)); pr_cont("\n"); } } x = 0; rc = 0; while (x < numbuffs && !rc) { skb = lp->rx_skbuff[x]; packet = lp->tx_skbuff[x]->data; for (i = 0; i < size; i++) { if (*(skb->data + i) != packet[i]) { netif_printk(lp, hw, KERN_DEBUG, dev, "Error in compare! %2x - %02x %02x\n", i, *(skb->data + i), packet[i]); rc = 1; break; } } x++; } clean_up: *data1 = rc; pcnet32_purge_tx_ring(dev); x = a->read_csr(ioaddr, CSR15); a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ a->write_bcr(ioaddr, 32, (x & ~0x0002)); if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); } else { pcnet32_purge_rx_ring(dev); lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); return rc; } /* end pcnet32_loopback_test */ static int pcnet32_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; int i; switch (state) { case ETHTOOL_ID_ACTIVE: /* Save the current value of the bcrs */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); spin_unlock_irqrestore(&lp->lock, flags); return 2; /* cycle on/off twice per second */ case ETHTOOL_ID_ON: case ETHTOOL_ID_OFF: /* Blink the led */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); spin_unlock_irqrestore(&lp->lock, flags); break; case ETHTOOL_ID_INACTIVE: /* Restore the original value of the bcrs */ spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++) a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); spin_unlock_irqrestore(&lp->lock, flags); } return 0; } /* * lp->lock must be held. */ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, int can_sleep) { int csr5; struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; int ticks; /* really old chips have to be stopped. */ if (lp->chip_version < PCNET32_79C970A) return 0; /* set SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); /* poll waiting for bit to be set */ ticks = 0; while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { spin_unlock_irqrestore(&lp->lock, *flags); if (can_sleep) msleep(1); else mdelay(1); spin_lock_irqsave(&lp->lock, *flags); ticks++; if (ticks > 200) { netif_printk(lp, hw, KERN_DEBUG, dev, "Error getting into suspend!\n"); return 0; } } return 1; } /* * process one receive descriptor entry */ static void pcnet32_rx_entry(struct net_device *dev, struct pcnet32_private *lp, struct pcnet32_rx_head *rxp, int entry) { int status = (short)le16_to_cpu(rxp->status) >> 8; int rx_in_place = 0; struct sk_buff *skb; short pkt_len; if (status != 0x03) { /* There was an error. */ /* * There is a tricky error noted by John Murphy, * <murf@perftech.com> to Russ Nelson: Even with full-sized * buffers it's possible for a jabber packet to use two * buffers, with only the last correctly noting the error. */ if (status & 0x01) /* Only count a general error at the */ dev->stats.rx_errors++; /* end of a packet. */ if (status & 0x20) dev->stats.rx_frame_errors++; if (status & 0x10) dev->stats.rx_over_errors++; if (status & 0x08) dev->stats.rx_crc_errors++; if (status & 0x04) dev->stats.rx_fifo_errors++; return; } pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; /* Discard oversize frames. */ if (unlikely(pkt_len > PKT_BUF_SIZE)) { netif_err(lp, drv, dev, "Impossible packet size %d!\n", pkt_len); dev->stats.rx_errors++; return; } if (pkt_len < 60) { netif_err(lp, rx_err, dev, "Runt packet!\n"); dev->stats.rx_errors++; return; } if (pkt_len > rx_copybreak) { struct sk_buff *newskb; newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); skb = lp->rx_skbuff[entry]; pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len); lp->rx_skbuff[entry] = newskb; lp->rx_dma_addr[entry] = pci_map_single(lp->pci_dev, newskb->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); rx_in_place = 1; } else skb = NULL; } else skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (skb == NULL) { netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } if (!rx_in_place) { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len); /* Make room */ pci_dma_sync_single_for_cpu(lp->pci_dev, lp->rx_dma_addr[entry], pkt_len, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, (unsigned char *)(lp->rx_skbuff[entry]->data), pkt_len); pci_dma_sync_single_for_device(lp->pci_dev, lp->rx_dma_addr[entry], pkt_len, PCI_DMA_FROMDEVICE); } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; } static int pcnet32_rx(struct net_device *dev, int budget) { struct pcnet32_private *lp = netdev_priv(dev); int entry = lp->cur_rx & lp->rx_mod_mask; struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; int npackets = 0; /* If we own the next entry, it's a new packet. Send it up. */ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { pcnet32_rx_entry(dev, lp, rxp, entry); npackets += 1; /* * The docs say that the buffer length isn't touched, but Andrew * Boyd of QNX reports that some revs of the 79C965 clear it. */ rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after others are visible */ rxp->status = cpu_to_le16(0x8000); entry = (++lp->cur_rx) & lp->rx_mod_mask; rxp = &lp->rx_ring[entry]; } return npackets; } static int pcnet32_tx(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned int dirty_tx = lp->dirty_tx; int delta; int must_restart = 0; while (dirty_tx != lp->cur_tx) { int entry = dirty_tx & lp->tx_mod_mask; int status = (short)le16_to_cpu(lp->tx_ring[entry].status); if (status < 0) break; /* It still hasn't been Txed */ lp->tx_ring[entry].base = 0; if (status & 0x4000) { /* There was a major error, log it. */ int err_status = le32_to_cpu(lp->tx_ring[entry].misc); dev->stats.tx_errors++; netif_err(lp, tx_err, dev, "Tx error status=%04x err_status=%08x\n", status, err_status); if (err_status & 0x04000000) dev->stats.tx_aborted_errors++; if (err_status & 0x08000000) dev->stats.tx_carrier_errors++; if (err_status & 0x10000000) dev->stats.tx_window_errors++; #ifndef DO_DXSUFLO if (err_status & 0x40000000) { dev->stats.tx_fifo_errors++; /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); must_restart = 1; } #else if (err_status & 0x40000000) { dev->stats.tx_fifo_errors++; if (!lp->dxsuflo) { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); must_restart = 1; } } #endif } else { if (status & 0x1800) dev->stats.collisions++; dev->stats.tx_packets++; } /* We must free the original skb */ if (lp->tx_skbuff[entry]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], lp->tx_skbuff[entry]-> len, PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; lp->tx_dma_addr[entry] = 0; } dirty_tx++; } delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); if (delta > lp->tx_ring_size) { netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", dirty_tx, lp->cur_tx, lp->tx_full); dirty_tx += lp->tx_ring_size; delta -= lp->tx_ring_size; } if (lp->tx_full && netif_queue_stopped(dev) && delta < lp->tx_ring_size - 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue(dev); } lp->dirty_tx = dirty_tx; return must_restart; } static int pcnet32_poll(struct napi_struct *napi, int budget) { struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); struct net_device *dev = lp->dev; unsigned long ioaddr = dev->base_addr; unsigned long flags; int work_done; u16 val; work_done = pcnet32_rx(dev, budget); spin_lock_irqsave(&lp->lock, flags); if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */ lp->a->reset(ioaddr); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); if (work_done < budget) { spin_lock_irqsave(&lp->lock, flags); __napi_complete(napi); /* clear interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); /* Set interrupt enable. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); spin_unlock_irqrestore(&lp->lock, flags); } return work_done; } #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 static int pcnet32_get_regs_len(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int j = lp->phycount * PCNET32_REGS_PER_PHY; return (PCNET32_NUM_REGS + j) * sizeof(u16); } static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr) { int i, csr0; u16 *buff = ptr; struct pcnet32_private *lp = netdev_priv(dev); const struct pcnet32_access *a = lp->a; ulong ioaddr = dev->base_addr; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); csr0 = a->read_csr(ioaddr, CSR0); if (!(csr0 & CSR0_STOP)) /* If not stopped */ pcnet32_suspend(dev, &flags, 1); /* read address PROM */ for (i = 0; i < 16; i += 2) *buff++ = inw(ioaddr + i); /* read control and status registers */ for (i = 0; i < 90; i++) *buff++ = a->read_csr(ioaddr, i); *buff++ = a->read_csr(ioaddr, 112); *buff++ = a->read_csr(ioaddr, 114); /* read bus configuration registers */ for (i = 0; i < 30; i++) *buff++ = a->read_bcr(ioaddr, i); *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ for (i = 31; i < 36; i++) *buff++ = a->read_bcr(ioaddr, i); /* read mii phy registers */ if (lp->mii) { int j; for (j = 0; j < PCNET32_MAX_PHYS; j++) { if (lp->phymask & (1 << j)) { for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { lp->a->write_bcr(ioaddr, 33, (j << 5) | i); *buff++ = lp->a->read_bcr(ioaddr, 34); } } } } if (!(csr0 & CSR0_STOP)) { /* If not stopped */ int csr5; /* clear SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } spin_unlock_irqrestore(&lp->lock, flags); } static const struct ethtool_ops pcnet32_ethtool_ops = { .get_settings = pcnet32_get_settings, .set_settings = pcnet32_set_settings, .get_drvinfo = pcnet32_get_drvinfo, .get_msglevel = pcnet32_get_msglevel, .set_msglevel = pcnet32_set_msglevel, .nway_reset = pcnet32_nway_reset, .get_link = pcnet32_get_link, .get_ringparam = pcnet32_get_ringparam, .set_ringparam = pcnet32_set_ringparam, .get_strings = pcnet32_get_strings, .self_test = pcnet32_ethtool_test, .set_phys_id = pcnet32_set_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, .get_sset_count = pcnet32_get_sset_count, }; /* only probes for non-PCI devices, the rest are handled by * pci_register_driver via pcnet32_probe_pci */ static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) { unsigned int *port, ioaddr; /* search for PCnet32 VLB cards at known addresses */ for (port = pcnet32_portlist; (ioaddr = *port); port++) { if (request_region (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { /* check if there is really a pcnet chip on that ioaddr */ if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { pcnet32_probe1(ioaddr, 0, NULL); } else { release_region(ioaddr, PCNET32_TOTAL_SIZE); } } } } static int __devinit pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long ioaddr; int err; err = pci_enable_device(pdev); if (err < 0) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("failed to enable device -- err=%d\n", err); return err; } pci_set_master(pdev); ioaddr = pci_resource_start(pdev, 0); if (!ioaddr) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); return -ENODEV; } if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); return -ENODEV; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); return -EBUSY; } err = pcnet32_probe1(ioaddr, 1, pdev); if (err < 0) pci_disable_device(pdev); return err; } static const struct net_device_ops pcnet32_netdev_ops = { .ndo_open = pcnet32_open, .ndo_stop = pcnet32_close, .ndo_start_xmit = pcnet32_start_xmit, .ndo_tx_timeout = pcnet32_tx_timeout, .ndo_get_stats = pcnet32_get_stats, .ndo_set_rx_mode = pcnet32_set_multicast_list, .ndo_do_ioctl = pcnet32_ioctl, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pcnet32_poll_controller, #endif }; /* pcnet32_probe1 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. * pdev will be NULL when called from pcnet32_probe_vlbus. */ static int __devinit pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; int fdx, mii, fset, dxsuflo; int chip_version; char *chipname; struct net_device *dev; const struct pcnet32_access *a = NULL; u8 promaddr[6]; int ret = -ENODEV; /* reset the chip */ pcnet32_wio_reset(ioaddr); /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { a = &pcnet32_wio; } else { pcnet32_dwio_reset(ioaddr); if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { a = &pcnet32_dwio; } else { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("No access methods\n"); goto err_release_region; } } chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) pr_info(" PCnet chip version is %#x\n", chip_version); if ((chip_version & 0xfff) != 0x003) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("Unsupported chip version\n"); goto err_release_region; } /* initialize variables */ fdx = mii = fset = dxsuflo = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { case 0x2420: chipname = "PCnet/PCI 79C970"; /* PCI */ break; case 0x2430: if (shared) chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ else chipname = "PCnet/32 79C965"; /* 486/VL bus */ break; case 0x2621: chipname = "PCnet/PCI II 79C970A"; /* PCI */ fdx = 1; break; case 0x2623: chipname = "PCnet/FAST 79C971"; /* PCI */ fdx = 1; mii = 1; fset = 1; break; case 0x2624: chipname = "PCnet/FAST+ 79C972"; /* PCI */ fdx = 1; mii = 1; fset = 1; break; case 0x2625: chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ fdx = 1; /* * This is based on specs published at www.amd.com. This section * assumes that a card with a 79C978 wants to go into standard * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, * and the module option homepna=1 can select this instead. */ media = a->read_bcr(ioaddr, 49); media &= ~3; /* default to 10Mb ethernet */ if (cards_found < MAX_UNITS && homepna[cards_found]) media |= 1; /* switch to home wiring mode */ if (pcnet32_debug & NETIF_MSG_PROBE) printk(KERN_DEBUG PFX "media set to %sMbit mode\n", (media & 1) ? "1" : "10"); a->write_bcr(ioaddr, 49, media); break; case 0x2627: chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; fdx = 1; mii = 1; break; default: if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("PCnet version %#x, no PCnet32 chip\n", chip_version); goto err_release_region; } /* * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit * starting until the packet is loaded. Strike one for reliability, lose * one for latency - although on PCI this isn't a big loss. Older chips * have FIFO's smaller than a packet, so you can't do this. * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. */ if (fset) { a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); dxsuflo = 1; } dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; goto err_release_region; } if (pdev) SET_NETDEV_DEV(dev, &pdev->dev); if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("%s at %#3lx,", chipname, ioaddr); /* In most chips, after a chip reset, the ethernet address is read from the * station address PROM at the base address and programmed into the * "Physical Address Registers" CSR12-14. * As a precautionary measure, we read the PROM values and complain if * they disagree with the CSRs. If they miscompare, and the PROM addr * is valid, then the PROM addr is used. */ for (i = 0; i < 3; i++) { unsigned int val; val = a->read_csr(ioaddr, i + 12) & 0x0ffff; /* There may be endianness issues here. */ dev->dev_addr[2 * i] = val & 0x0ff; dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; } /* read PROM address and compare with CSR address */ for (i = 0; i < 6; i++) promaddr[i] = inb(ioaddr + i); if (memcmp(promaddr, dev->dev_addr, 6) || !is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } memcpy(dev->dev_addr, promaddr, 6); } } memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->perm_addr)) memset(dev->dev_addr, 0, ETH_ALEN); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); /* Version 0x2623 and 0x2624 */ if (((chip_version + 1) & 0xfffe) == 0x2624) { i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ pr_info(" tx_start_pt(0x%04x):", i); switch (i >> 10) { case 0: pr_cont(" 20 bytes,"); break; case 1: pr_cont(" 64 bytes,"); break; case 2: pr_cont(" 128 bytes,"); break; case 3: pr_cont("~220 bytes,"); break; } i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ pr_cont(" BCR18(%x):", i & 0xffff); if (i & (1 << 5)) pr_cont("BurstWrEn "); if (i & (1 << 6)) pr_cont("BurstRdEn "); if (i & (1 << 7)) pr_cont("DWordIO "); if (i & (1 << 11)) pr_cont("NoUFlow "); i = a->read_bcr(ioaddr, 25); pr_info(" SRAMSIZE=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 26); pr_cont(" SRAM_BND=0x%04x,", i << 8); i = a->read_bcr(ioaddr, 27); if (i & (1 << 14)) pr_cont("LowLatRx"); } } dev->base_addr = ioaddr; lp = netdev_priv(dev); /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr); if (!lp->init_block) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("Consistent memory allocation failed\n"); ret = -ENOMEM; goto err_free_netdev; } lp->pci_dev = pdev; lp->dev = dev; spin_lock_init(&lp->lock); lp->name = chipname; lp->shared_irq = shared; lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ lp->tx_mod_mask = lp->tx_ring_size - 1; lp->rx_mod_mask = lp->rx_ring_size - 1; lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); lp->mii_if.full_duplex = fdx; lp->mii_if.phy_id_mask = 0x1f; lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] >= sizeof(options_mapping))) lp->options = PCNET32_PORT_ASEL; else lp->options = options_mapping[options[cards_found]]; lp->mii_if.dev = dev; lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) lp->options |= PCNET32_PORT_FD; lp->a = a; /* prior to register_netdev, dev->name is not yet correct */ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { ret = -ENOMEM; goto err_free_ring; } /* detect special T1/E1 WAN card by checking for MAC address */ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ lp->init_block->tlen_rlen = cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->filter[0] = 0x00000000; lp->init_block->filter[1] = 0x00000000; lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); if (pdev) { /* use the IRQ provided by PCI */ dev->irq = pdev->irq; if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(" assigned IRQ %d\n", dev->irq); } else { unsigned long irq_mask = probe_irq_on(); /* * To auto-IRQ we enable the initialization-done and DMA error * interrupts. For ISA boards we get a DMA error, but VLB and PCI * boards will work. */ /* Trigger an initialization just for the interrupt. */ a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); mdelay(1); dev->irq = probe_irq_off(irq_mask); if (!dev->irq) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(", failed to detect IRQ line\n"); ret = -ENODEV; goto err_free_ring; } if (pcnet32_debug & NETIF_MSG_PROBE) pr_cont(", probed IRQ %d\n", dev->irq); } /* Set the mii phy_id so that we can query the link state */ if (lp->mii) { /* lp->phycount and lp->phymask are set to 0 by memset above */ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; /* scan for PHYs */ for (i = 0; i < PCNET32_MAX_PHYS; i++) { unsigned short id1, id2; id1 = mdio_read(dev, i, MII_PHYSID1); if (id1 == 0xffff) continue; id2 = mdio_read(dev, i, MII_PHYSID2); if (id2 == 0xffff) continue; if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) continue; /* 79C971 & 79C972 have phantom phy at id 31 */ lp->phycount++; lp->phymask |= (1 << i); lp->mii_if.phy_id = i; if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("Found PHY %04x:%04x at address %d\n", id1, id2, i); } lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); if (lp->phycount > 1) lp->options |= PCNET32_PORT_MII; } init_timer(&lp->watchdog_timer); lp->watchdog_timer.data = (unsigned long)dev; lp->watchdog_timer.function = (void *)&pcnet32_watchdog; /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; dev->ethtool_ops = &pcnet32_ethtool_ops; dev->watchdog_timeo = (5 * HZ); /* Fill in the generic fields of the device structure. */ if (register_netdev(dev)) goto err_free_ring; if (pdev) { pci_set_drvdata(pdev, dev); } else { lp->next = pcnet32_dev; pcnet32_dev = dev; } if (pcnet32_debug & NETIF_MSG_PROBE) pr_info("%s: registered as %s\n", dev->name, lp->name); cards_found++; /* enable LED writes */ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); return 0; err_free_ring: pcnet32_free_ring(dev); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); err_free_netdev: free_netdev(dev); err_release_region: release_region(ioaddr, PCNET32_TOTAL_SIZE); return ret; } /* if any allocation fails, caller must also call pcnet32_free_ring */ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) { struct pcnet32_private *lp = netdev_priv(dev); lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, &lp->tx_ring_dma_addr); if (lp->tx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return -ENOMEM; } lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, &lp->rx_ring_dma_addr); if (lp->rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return -ENOMEM; } lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->tx_dma_addr) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->rx_dma_addr) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->tx_skbuff) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->rx_skbuff) { netif_err(lp, drv, dev, "Memory allocation failed\n"); return -ENOMEM; } return 0; } static void pcnet32_free_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); kfree(lp->tx_skbuff); lp->tx_skbuff = NULL; kfree(lp->rx_skbuff); lp->rx_skbuff = NULL; kfree(lp->tx_dma_addr); lp->tx_dma_addr = NULL; kfree(lp->rx_dma_addr); lp->rx_dma_addr = NULL; if (lp->tx_ring) { pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring = NULL; } if (lp->rx_ring) { pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring = NULL; } } static int pcnet32_open(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); struct pci_dev *pdev = lp->pci_dev; unsigned long ioaddr = dev->base_addr; u16 val; int i; int rc; unsigned long flags; if (request_irq(dev->irq, pcnet32_interrupt, lp->shared_irq ? IRQF_SHARED : 0, dev->name, (void *)dev)) { return -EAGAIN; } spin_lock_irqsave(&lp->lock, flags); /* Check for a valid station address */ if (!is_valid_ether_addr(dev->dev_addr)) { rc = -EINVAL; goto err_free_irq; } /* Reset the PCNET32 */ lp->a->reset(ioaddr); /* switch pcnet32 to 32bit mode */ lp->a->write_bcr(ioaddr, 20, 2); netif_printk(lp, ifup, KERN_DEBUG, dev, "%s() irq %d tx/rx rings %#x/%#x init %#x\n", __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), (u32) (lp->rx_ring_dma_addr), (u32) (lp->init_dma_addr)); /* set/reset autoselect bit */ val = lp->a->read_bcr(ioaddr, 2) & ~2; if (lp->options & PCNET32_PORT_ASEL) val |= 2; lp->a->write_bcr(ioaddr, 2, val); /* handle full duplex setting */ if (lp->mii_if.full_duplex) { val = lp->a->read_bcr(ioaddr, 9) & ~3; if (lp->options & PCNET32_PORT_FD) { val |= 1; if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ if (lp->chip_version == 0x2627) val |= 3; } lp->a->write_bcr(ioaddr, 9, val); } /* set/reset GPSI bit in test register */ val = lp->a->read_csr(ioaddr, 124) & ~0x10; if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) val |= 0x10; lp->a->write_csr(ioaddr, 124, val); /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { if (lp->options & PCNET32_PORT_ASEL) { lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; netif_printk(lp, link, KERN_DEBUG, dev, "Setting 100Mb-Full Duplex\n"); } } if (lp->phycount < 2) { /* * 24 Jun 2004 according AMD, in order to change the PHY, * DANAS (or DISPM for 79C976) must be set; then select the speed, * duplex, and/or enable auto negotiation, and clear DANAS */ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { lp->a->write_bcr(ioaddr, 32, lp->a->read_bcr(ioaddr, 32) | 0x0080); /* disable Auto Negotiation, set 10Mpbs, HD */ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; if (lp->options & PCNET32_PORT_FD) val |= 0x10; if (lp->options & PCNET32_PORT_100) val |= 0x08; lp->a->write_bcr(ioaddr, 32, val); } else { if (lp->options & PCNET32_PORT_ASEL) { lp->a->write_bcr(ioaddr, 32, lp->a->read_bcr(ioaddr, 32) | 0x0080); /* enable auto negotiate, setup, disable fd */ val = lp->a->read_bcr(ioaddr, 32) & ~0x98; val |= 0x20; lp->a->write_bcr(ioaddr, 32, val); } } } else { int first_phy = -1; u16 bmcr; u32 bcr9; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; /* * There is really no good other way to handle multiple PHYs * other than turning off all automatics */ val = lp->a->read_bcr(ioaddr, 2); lp->a->write_bcr(ioaddr, 2, val & ~2); val = lp->a->read_bcr(ioaddr, 32); lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ if (!(lp->options & PCNET32_PORT_ASEL)) { /* setup ecmd */ ecmd.port = PORT_MII; ecmd.transceiver = XCVR_INTERNAL; ecmd.autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(&ecmd, (lp->options & PCNET32_PORT_100) ? SPEED_100 : SPEED_10); bcr9 = lp->a->read_bcr(ioaddr, 9); if (lp->options & PCNET32_PORT_FD) { ecmd.duplex = DUPLEX_FULL; bcr9 |= (1 << 0); } else { ecmd.duplex = DUPLEX_HALF; bcr9 |= ~(1 << 0); } lp->a->write_bcr(ioaddr, 9, bcr9); } for (i = 0; i < PCNET32_MAX_PHYS; i++) { if (lp->phymask & (1 << i)) { /* isolate all but the first PHY */ bmcr = mdio_read(dev, i, MII_BMCR); if (first_phy == -1) { first_phy = i; mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); } else { mdio_write(dev, i, MII_BMCR, bmcr | BMCR_ISOLATE); } /* use mii_ethtool_sset to setup PHY */ lp->mii_if.phy_id = i; ecmd.phy_address = i; if (lp->options & PCNET32_PORT_ASEL) { mii_ethtool_gset(&lp->mii_if, &ecmd); ecmd.autoneg = AUTONEG_ENABLE; } mii_ethtool_sset(&lp->mii_if, &ecmd); } } lp->mii_if.phy_id = first_phy; netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); } #ifdef DO_DXSUFLO if (lp->dxsuflo) { /* Disable transmit stop on underflow */ val = lp->a->read_csr(ioaddr, CSR3); val |= 0x40; lp->a->write_csr(ioaddr, CSR3, val); } #endif lp->init_block->mode = cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); pcnet32_load_multicast(dev); if (pcnet32_init_ring(dev)) { rc = -ENOMEM; goto err_free_ring; } napi_enable(&lp->napi); /* Re-initialize the PCNET32, and start it when done. */ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); netif_start_queue(dev); if (lp->chip_version >= PCNET32_79C970A) { /* Print the link status and start the watchdog */ pcnet32_check_media(dev, 1); mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); } i = 0; while (i++ < 100) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974. */ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); netif_printk(lp, ifup, KERN_DEBUG, dev, "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", i, (u32) (lp->init_dma_addr), lp->a->read_csr(ioaddr, CSR0)); spin_unlock_irqrestore(&lp->lock, flags); return 0; /* Always succeed */ err_free_ring: /* free any allocated skbuffs */ pcnet32_purge_rx_ring(dev); /* * Switch back to 16bit mode to avoid problems with dumb * DOS packet driver after a warm reboot */ lp->a->write_bcr(ioaddr, 20, 4); err_free_irq: spin_unlock_irqrestore(&lp->lock, flags); free_irq(dev->irq, dev); return rc; } /* * The LANCE has been halted for one reason or another (busmaster memory * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, * etc.). Modern LANCE variants always reload their ring-buffer * configuration when restarted, so we must reinitialize our ring * context before restarting. As part of this reinitialization, * find all packets still on the Tx ring and pretend that they had been * sent (in effect, drop the packets on the floor) - the higher-level * protocols will time out and retransmit. It'd be better to shuffle * these skbs to a temp list and then actually re-Tx them after * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com */ static void pcnet32_purge_tx_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; } } /* Initialize the PCNET32 Rx and Tx rings. */ static int pcnet32_init_ring(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); int i; lp->tx_full = 0; lp->cur_rx = lp->cur_tx = 0; lp->dirty_rx = lp->dirty_tx = 0; for (i = 0; i < lp->rx_ring_size; i++) { struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; if (rx_skbuff == NULL) { lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = lp->rx_skbuff[i]; if (!rx_skbuff) { /* there is not much we can do at this point */ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", __func__); return -1; } skb_reserve(rx_skbuff, NET_IP_ALIGN); } rmb(); if (lp->rx_dma_addr[i] == 0) lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after all others are visible */ lp->rx_ring[i].status = cpu_to_le16(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear * the upper ownership bit. */ for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ lp->tx_ring[i].base = 0; lp->tx_dma_addr[i] = 0; } lp->init_block->tlen_rlen = cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } /* the pcnet32 has been issued a stop or reset. Wait for the stop bit * then flush the pending transmit operations, re-initialize the ring, * and tell the chip to initialize. */ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; int i; /* wait for stop */ for (i = 0; i < 100; i++) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) break; if (i >= 100) netif_err(lp, drv, dev, "%s timed out waiting for stop\n", __func__); pcnet32_purge_tx_ring(dev); if (pcnet32_init_ring(dev)) return; /* ReInit Ring */ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); i = 0; while (i++ < 1000) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) break; lp->a->write_csr(ioaddr, CSR0, csr0_bits); } static void pcnet32_tx_timeout(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr, flags; spin_lock_irqsave(&lp->lock, flags); /* Transmitter timeout, serious problems. */ if (pcnet32_debug & NETIF_MSG_DRV) pr_err("%s: transmit timed out, status %4.4x, resetting\n", dev->name, lp->a->read_csr(ioaddr, CSR0)); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); dev->stats.tx_errors++; if (netif_msg_tx_err(lp)) { int i; printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx); for (i = 0; i < lp->rx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->rx_ring[i].base), (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), le16_to_cpu(lp->rx_ring[i].status)); for (i = 0; i < lp->tx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->tx_ring[i].base), (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, le32_to_cpu(lp->tx_ring[i].misc), le16_to_cpu(lp->tx_ring[i].status)); printk("\n"); } pcnet32_restart(dev, CSR0_NORMAL); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); } static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 status; int entry; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); netif_printk(lp, tx_queued, KERN_DEBUG, dev, "%s() called, csr0 %4.4x\n", __func__, lp->a->read_csr(ioaddr, CSR0)); /* Default status -- will not enable Successful-TxDone * interrupt when that option is available to us. */ status = 0x8300; /* Fill in a Tx ring entry */ /* Mask to ring buffer boundary. */ entry = lp->cur_tx & lp->tx_mod_mask; /* Caution: the write order is important here, set the status * with the "ownership" bits last. */ lp->tx_ring[entry].length = cpu_to_le16(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[entry].status = cpu_to_le16(status); lp->cur_tx++; dev->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { lp->tx_full = 1; netif_stop_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } /* The PCNET32 interrupt handler. */ static irqreturn_t pcnet32_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct pcnet32_private *lp; unsigned long ioaddr; u16 csr0; int boguscnt = max_interrupt_work; ioaddr = dev->base_addr; lp = netdev_priv(dev); spin_lock(&lp->lock); csr0 = lp->a->read_csr(ioaddr, CSR0); while ((csr0 & 0x8f00) && --boguscnt >= 0) { if (csr0 == 0xffff) break; /* PCMCIA remove happened */ /* Acknowledge all of the current interrupt sources ASAP. */ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); netif_printk(lp, intr, KERN_DEBUG, dev, "interrupt csr0=%#2.2x new csr=%#2.2x\n", csr0, lp->a->read_csr(ioaddr, CSR0)); /* Log misc errors. */ if (csr0 & 0x4000) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & 0x1000) { /* * This happens when our receive ring is full. This * shouldn't be a problem as we will see normal rx * interrupts for the frames in the receive ring. But * there are some PCI chipsets (I can reproduce this * on SP3G with Intel saturn chipset) which have * sometimes problems and will fill up the receive * ring with error descriptors. In this situation we * don't get a rx interrupt, but a missed frame * interrupt sooner or later. */ dev->stats.rx_errors++; /* Missed a Rx frame. */ } if (csr0 & 0x0800) { netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", csr0); /* unlike for the lance, there is no restart needed */ } if (napi_schedule_prep(&lp->napi)) { u16 val; /* set interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val |= 0x5f00; lp->a->write_csr(ioaddr, CSR3, val); __napi_schedule(&lp->napi); break; } csr0 = lp->a->read_csr(ioaddr, CSR0); } netif_printk(lp, intr, KERN_DEBUG, dev, "exiting interrupt, csr0=%#4.4x\n", lp->a->read_csr(ioaddr, CSR0)); spin_unlock(&lp->lock); return IRQ_HANDLED; } static int pcnet32_close(struct net_device *dev) { unsigned long ioaddr = dev->base_addr; struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); napi_disable(&lp->napi); spin_lock_irqsave(&lp->lock, flags); dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); netif_printk(lp, ifdown, KERN_DEBUG, dev, "Shutting down ethercard, status was %2.2x\n", lp->a->read_csr(ioaddr, CSR0)); /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* * Switch back to 16bit mode to avoid problems with dumb * DOS packet driver after a warm reboot */ lp->a->write_bcr(ioaddr, 20, 4); spin_unlock_irqrestore(&lp->lock, flags); free_irq(dev->irq, dev); spin_lock_irqsave(&lp->lock, flags); pcnet32_purge_rx_ring(dev); pcnet32_purge_tx_ring(dev); spin_unlock_irqrestore(&lp->lock, flags); return 0; } static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); spin_unlock_irqrestore(&lp->lock, flags); return &dev->stats; } /* taken from the sunlance driver, which it took from the depca driver */ static void pcnet32_load_multicast(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; volatile __le16 *mcast_table = (__le16 *)ib->filter; struct netdev_hw_addr *ha; unsigned long ioaddr = dev->base_addr; int i; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) { ib->filter[0] = cpu_to_le32(~0U); ib->filter[1] = cpu_to_le32(~0U); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); return; } /* clear the multicast filter */ ib->filter[0] = 0; ib->filter[1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); } for (i = 0; i < 4; i++) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, le16_to_cpu(mcast_table[i])); } /* * Set or clear the multicast filter for this adaptor. */ static void pcnet32_set_multicast_list(struct net_device *dev) { unsigned long ioaddr = dev->base_addr, flags; struct pcnet32_private *lp = netdev_priv(dev); int csr15, suspended; spin_lock_irqsave(&lp->lock, flags); suspended = pcnet32_suspend(dev, &flags, 0); csr15 = lp->a->read_csr(ioaddr, CSR15); if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); lp->init_block->mode = cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { lp->init_block->mode = cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } if (suspended) { int csr5; /* clear SUSPEND (SPND) - CSR5 bit 0 */ csr5 = lp->a->read_csr(ioaddr, CSR5); lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } else { lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); pcnet32_restart(dev, CSR0_NORMAL); netif_wake_queue(dev); } spin_unlock_irqrestore(&lp->lock, flags); } /* This routine assumes that the lp->lock is held */ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; u16 val_out; if (!lp->mii) return 0; lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); val_out = lp->a->read_bcr(ioaddr, 34); return val_out; } /* This routine assumes that the lp->lock is held */ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; if (!lp->mii) return; lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); lp->a->write_bcr(ioaddr, 34, val); } static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct pcnet32_private *lp = netdev_priv(dev); int rc; unsigned long flags; /* SIOC[GS]MIIxxx ioctls */ if (lp->mii) { spin_lock_irqsave(&lp->lock, flags); rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irqrestore(&lp->lock, flags); } else { rc = -EOPNOTSUPP; } return rc; } static int pcnet32_check_otherphy(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); struct mii_if_info mii = lp->mii_if; u16 bmcr; int i; for (i = 0; i < PCNET32_MAX_PHYS; i++) { if (i == lp->mii_if.phy_id) continue; /* skip active phy */ if (lp->phymask & (1 << i)) { mii.phy_id = i; if (mii_link_ok(&mii)) { /* found PHY with active link */ netif_info(lp, link, dev, "Using PHY number %d\n", i); /* isolate inactive phy */ bmcr = mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, bmcr | BMCR_ISOLATE); /* de-isolate new phy */ bmcr = mdio_read(dev, i, MII_BMCR); mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); /* set new phy address */ lp->mii_if.phy_id = i; return 1; } } } return 0; } /* * Show the status of the media. Similar to mii_check_media however it * correctly shows the link speed for all (tested) pcnet32 variants. * Devices with no mii just report link state without speed. * * Caller is assumed to hold and release the lp->lock. */ static void pcnet32_check_media(struct net_device *dev, int verbose) { struct pcnet32_private *lp = netdev_priv(dev); int curr_link; int prev_link = netif_carrier_ok(dev) ? 1 : 0; u32 bcr9; if (lp->mii) { curr_link = mii_link_ok(&lp->mii_if); } else { ulong ioaddr = dev->base_addr; /* card base I/O address */ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); } if (!curr_link) { if (prev_link || verbose) { netif_carrier_off(dev); netif_info(lp, link, dev, "link down\n"); } if (lp->phycount > 1) { curr_link = pcnet32_check_otherphy(dev); prev_link = 0; } } else if (verbose || !prev_link) { netif_carrier_on(dev); if (lp->mii) { if (netif_msg_link(lp)) { struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; mii_ethtool_gset(&lp->mii_if, &ecmd); netdev_info(dev, "link up, %uMbps, %s-duplex\n", ethtool_cmd_speed(&ecmd), (ecmd.duplex == DUPLEX_FULL) ? "full" : "half"); } bcr9 = lp->a->read_bcr(dev->base_addr, 9); if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { if (lp->mii_if.full_duplex) bcr9 |= (1 << 0); else bcr9 &= ~(1 << 0); lp->a->write_bcr(dev->base_addr, 9, bcr9); } } else { netif_info(lp, link, dev, "link up\n"); } } } /* * Check for loss of link and link establishment. * Can not use mii_check_media because it does nothing if mode is forced. */ static void pcnet32_watchdog(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; /* Print the link status if it has changed */ spin_lock_irqsave(&lp->lock, flags); pcnet32_check_media(dev, 0); spin_unlock_irqrestore(&lp->lock, flags); mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); } static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); if (netif_running(dev)) { netif_device_detach(dev); pcnet32_close(dev); } pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int pcnet32_pm_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (netif_running(dev)) { pcnet32_open(dev); netif_device_attach(dev); } return 0; } static void __devexit pcnet32_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct pcnet32_private *lp = netdev_priv(dev); unregister_netdev(dev); pcnet32_free_ring(dev); release_region(dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } static struct pci_driver pcnet32_driver = { .name = DRV_NAME, .probe = pcnet32_probe_pci, .remove = __devexit_p(pcnet32_remove_one), .id_table = pcnet32_pci_tbl, .suspend = pcnet32_pm_suspend, .resume = pcnet32_pm_resume, }; /* An additional parameter that may be passed in... */ static int debug = -1; static int tx_start_pt = -1; static int pcnet32_have_pci; module_param(debug, int, 0); MODULE_PARM_DESC(debug, DRV_NAME " debug level"); module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); module_param(tx_start_pt, int, 0); MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); module_param(pcnet32vlb, int, 0); MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); module_param_array(options, int, NULL, 0); MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ module_param_array(homepna, int, NULL, 0); MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); MODULE_AUTHOR("Thomas Bogendoerfer"); MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); MODULE_LICENSE("GPL"); #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int __init pcnet32_init_module(void) { pr_info("%s", version); pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) tx_start = tx_start_pt; /* find the PCI devices */ if (!pci_register_driver(&pcnet32_driver)) pcnet32_have_pci = 1; /* should we find any remaining VLbus devices ? */ if (pcnet32vlb) pcnet32_probe_vlbus(pcnet32_portlist); if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) pr_info("%d cards_found\n", cards_found); return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; } static void __exit pcnet32_cleanup_module(void) { struct net_device *next_dev; while (pcnet32_dev) { struct pcnet32_private *lp = netdev_priv(pcnet32_dev); next_dev = lp->next; unregister_netdev(pcnet32_dev); pcnet32_free_ring(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), lp->init_block, lp->init_dma_addr); free_netdev(pcnet32_dev); pcnet32_dev = next_dev; } if (pcnet32_have_pci) pci_unregister_driver(&pcnet32_driver); } module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); /* * Local variables: * c-indent-level: 4 * tab-width: 8 * End: */
gpl-2.0
kerneldevs/fusX-univa-kernel
arch/cris/arch-v32/kernel/debugport.c
4792
3936
/* * Copyright (C) 2003, Axis Communications AB. */ #include <linux/console.h> #include <linux/init.h> #include <asm/system.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/ser_defs.h> #include <hwregs/dma_defs.h> #include <mach/pinmux.h> struct dbg_port { unsigned char nbr; unsigned long instance; unsigned int started; unsigned long baudrate; unsigned char parity; unsigned int bits; }; struct dbg_port ports[] = { { 0, regi_ser0, 0, 115200, 'N', 8 }, { 1, regi_ser1, 0, 115200, 'N', 8 }, { 2, regi_ser2, 0, 115200, 'N', 8 }, { 3, regi_ser3, 0, 115200, 'N', 8 }, #if CONFIG_ETRAX_SERIAL_PORTS == 5 { 4, regi_ser4, 0, 115200, 'N', 8 }, #endif }; static struct dbg_port *port = #if defined(CONFIG_ETRAX_DEBUG_PORT0) &ports[0]; #elif defined(CONFIG_ETRAX_DEBUG_PORT1) &ports[1]; #elif defined(CONFIG_ETRAX_DEBUG_PORT2) &ports[2]; #elif defined(CONFIG_ETRAX_DEBUG_PORT3) &ports[3]; #elif defined(CONFIG_ETRAX_DEBUG_PORT4) &ports[4]; #else NULL; #endif #ifdef CONFIG_ETRAX_KGDB static struct dbg_port *kgdb_port = #if defined(CONFIG_ETRAX_KGDB_PORT0) &ports[0]; #elif defined(CONFIG_ETRAX_KGDB_PORT1) &ports[1]; #elif defined(CONFIG_ETRAX_KGDB_PORT2) &ports[2]; #elif defined(CONFIG_ETRAX_KGDB_PORT3) &ports[3]; #elif defined(CONFIG_ETRAX_KGDB_PORT4) &ports[4]; #else NULL; #endif #endif static void start_port(struct dbg_port* p) { if (!p) return; if (p->started) return; p->started = 1; if (p->nbr == 1) crisv32_pinmux_alloc_fixed(pinmux_ser1); else if (p->nbr == 2) crisv32_pinmux_alloc_fixed(pinmux_ser2); else if (p->nbr == 3) crisv32_pinmux_alloc_fixed(pinmux_ser3); #if CONFIG_ETRAX_SERIAL_PORTS == 5 else if (p->nbr == 4) crisv32_pinmux_alloc_fixed(pinmux_ser4); #endif /* Set up serial port registers */ reg_ser_rw_tr_ctrl tr_ctrl = {0}; reg_ser_rw_tr_dma_en tr_dma_en = {0}; reg_ser_rw_rec_ctrl rec_ctrl = {0}; reg_ser_rw_tr_baud_div tr_baud_div = {0}; reg_ser_rw_rec_baud_div rec_baud_div = {0}; tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493; tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no; tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8; tr_ctrl.en = rec_ctrl.en = 1; if (p->parity == 'O') { tr_ctrl.par_en = regk_ser_yes; tr_ctrl.par = regk_ser_odd; rec_ctrl.par_en = regk_ser_yes; rec_ctrl.par = regk_ser_odd; } else if (p->parity == 'E') { tr_ctrl.par_en = regk_ser_yes; tr_ctrl.par = regk_ser_even; rec_ctrl.par_en = regk_ser_yes; rec_ctrl.par = regk_ser_odd; } if (p->bits == 7) { tr_ctrl.data_bits = regk_ser_bits7; rec_ctrl.data_bits = regk_ser_bits7; } REG_WR (ser, p->instance, rw_tr_baud_div, tr_baud_div); REG_WR (ser, p->instance, rw_rec_baud_div, rec_baud_div); REG_WR (ser, p->instance, rw_tr_dma_en, tr_dma_en); REG_WR (ser, p->instance, rw_tr_ctrl, tr_ctrl); REG_WR (ser, p->instance, rw_rec_ctrl, rec_ctrl); } #ifdef CONFIG_ETRAX_KGDB /* Use polling to get a single character from the kernel debug port */ int getDebugChar(void) { reg_ser_rs_stat_din stat; reg_ser_rw_ack_intr ack_intr = { 0 }; do { stat = REG_RD(ser, kgdb_port->instance, rs_stat_din); } while (!stat.dav); /* Ack the data_avail interrupt. */ ack_intr.dav = 1; REG_WR(ser, kgdb_port->instance, rw_ack_intr, ack_intr); return stat.data; } /* Use polling to put a single character to the kernel debug port */ void putDebugChar(int val) { reg_ser_r_stat_din stat; do { stat = REG_RD(ser, kgdb_port->instance, r_stat_din); } while (!stat.tr_rdy); REG_WR_INT(ser, kgdb_port->instance, rw_dout, val); } #endif /* CONFIG_ETRAX_KGDB */ /* Register console for printk's, etc. */ int __init init_etrax_debug(void) { start_port(port); #ifdef CONFIG_ETRAX_KGDB start_port(kgdb_port); #endif /* CONFIG_ETRAX_KGDB */ return 0; }
gpl-2.0
l0rdg3x/AK-OnePlusOne-CAF
drivers/scsi/lpfc/lpfc_init.c
4792
314030
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> #include <linux/aer.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" char *_dump_buf_data; unsigned long _dump_buf_data_order; char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); static void lpfc_free_sgl_list(struct lpfc_hba *); static int lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization prior to issuing the CONFIG_PORT * mailbox command. It retrieves the revision information from the HBA and * collects the Vital Product Data (VPD) about the HBA for preparing the * configuration of the HBA. * * Return codes: * 0 - success. * -ERESTART - requests the SLI layer to reset the HBA and try again. * Any other value - indicates an error. **/ int lpfc_config_port_prep(struct lpfc_hba *phba) { lpfc_vpd_t *vp = &phba->vpd; int i = 0, rc; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; char *lpfc_vpd_data = NULL; uint16_t offset = 0; static char licensed[56] = "key unlock for use with gnu public licensed code only\0"; static int init_key = 1; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { if (init_key) { uint32_t *ptext = (uint32_t *) licensed; for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) *ptext = cpu_to_be32(*ptext); init_key = 0; } lpfc_read_nv(phba, pmb); memset((char*)mb->un.varRDnvp.rsvd3, 0, sizeof (mb->un.varRDnvp.rsvd3)); memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, sizeof (licensed)); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0324 Config Port initialization " "error, mbxCmd x%x READ_NVPARM, " "mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, sizeof(phba->wwnn)); memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, sizeof(phba->wwpn)); } phba->sli3_options = 0x0; /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0439 Adapter failed to init, mbxCmd x%x " "READ_REV, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free( pmb, phba->mbox_mem_pool); return -ERESTART; } /* * The value of rr must be 1 since the driver set the cv field to 1. * This setting requires the FW to set all revision fields. */ if (mb->un.varRdRev.rr == 0) { vp->rev.rBit = 0; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0440 Adapter failed to init, READ_REV has " "missing revision information.\n"); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { mempool_free(pmb, phba->mbox_mem_pool); return -EINVAL; } /* Save information as VPD data */ vp->rev.rBit = 1; memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); vp->rev.biuRev = mb->un.varRdRev.biuRev; vp->rev.smRev = mb->un.varRdRev.smRev; vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; vp->rev.endecRev = mb->un.varRdRev.endecRev; vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; vp->rev.fcphLow = mb->un.varRdRev.fcphLow; vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; vp->rev.postKernRev = mb->un.varRdRev.postKernRev; vp->rev.opFwRev = mb->un.varRdRev.opFwRev; /* If the sli feature level is less then 9, we must * tear down all RPIs and VPIs on link down if NPIV * is enabled. */ if (vp->rev.feaLevelHigh < 9) phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; if (lpfc_is_LC_HBA(phba->pcidev->device)) memcpy(phba->RandomData, (char *)&mb->un.varWords[24], sizeof (phba->RandomData)); /* Get adapter VPD information */ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); if (!lpfc_vpd_data) goto out_free_mbox; do { lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0441 VPD not present on adapter, " "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } /* dump mem may return a zero when finished or we got a * mailbox error, either way we are done. */ if (mb->un.varDmp.word_cnt == 0) break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, lpfc_vpd_data + offset, mb->un.varDmp.word_cnt); offset += mb->un.varDmp.word_cnt; } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); lpfc_parse_vpd(phba, lpfc_vpd_data, offset); kfree(lpfc_vpd_data); out_free_mbox: mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for driver's configuring asynchronous event * mailbox command to the device. If the mailbox command returns successfully, * it will set internal async event support flag to 1; otherwise, it will * set internal async event support flag to 0. **/ static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for dump mailbox command for getting * wake up parameters. When this command complete, the response contain * Option rom version of the HBA. This function translate the version number * into a human readable string and store it in OptionROMVersion. **/ static void lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct prog_id *prg; uint32_t prog_id_word; char dist = ' '; /* character array used for decoding dist type. */ char dist_char[] = "nabx"; if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ prog_id_word = pmboxq->u.mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ if (prg->dist < 4) dist = dist_char[prg->dist]; if ((prg->dist == 3) && (prg->num == 0)) sprintf(phba->OptionROMVersion, "%d.%d%d", prg->ver, prg->rev, prg->lev); else sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", prg->ver, prg->rev, prg->lev, dist, prg->num); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, * cfg_soft_wwnn, cfg_soft_wwpn * @vport: pointer to lpfc vport data structure. * * * Return codes * None. **/ void lpfc_update_vport_wwn(struct lpfc_vport *vport) { /* If the soft name exists then update it using the service params */ if (vport->phba->cfg_soft_wwnn) u64_to_wwn(vport->phba->cfg_soft_wwnn, vport->fc_sparam.nodeName.u.wwn); if (vport->phba->cfg_soft_wwpn) u64_to_wwn(vport->phba->cfg_soft_wwpn, vport->fc_sparam.portName.u.wwn); /* * If the name is empty or there exists a soft name * then copy the service params name, otherwise use the fc name */ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.portName, &vport->fc_portname, sizeof(struct lpfc_name)); } /** * lpfc_config_port_post - Perform lpfc initialization after config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization after the CONFIG_PORT mailbox * command call. It performs all internal resource and state setups on the * port: post IOCB buffers, enable appropriate host interrupt attentions, * ELS ring timers, etc. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_config_port_post(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; struct lpfc_sli *psli = &phba->sli; uint32_t status, timeout; int i, j; int rc; spin_lock_irq(&phba->hbalock); /* * If the Config port completed correctly the HBA is not * over heated any more. */ if (phba->over_temp_state == HBA_OVER_TEMP) phba->over_temp_state = HBA_NORMAL_TEMP; spin_unlock_irq(&phba->hbalock); pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; /* Get login parameters for NID. */ rc = lpfc_read_sparam(phba, pmb, 0); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -ENOMEM; } pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0448 Adapter failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; mempool_free(pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; } mp = (struct lpfc_dmabuf *) pmb->context1; memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); pmb->context1 = NULL; lpfc_update_vport_wwn(vport); /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ if (phba->SerialNumber[0] == 0) { uint8_t *outptr; outptr = &vport->fc_nodename.u.s.IEEE[0]; for (i = 0; i < 12; i++) { status = *outptr++; j = ((status & 0xf0) >> 4); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); i++; j = (status & 0xf); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); } } lpfc_read_config(phba, pmb); pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0453 Adapter failed to init, mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mempool_free( pmb, phba->mbox_mem_pool); return -EIO; } /* Check if the port is disabled */ lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) phba->cfg_hba_queue_depth = (mb->un.varRdConfig.max_xri + 1) - lpfc_sli4_get_els_iocb_cnt(phba); phba->lmt = mb->un.varRdConfig.lmt; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ if (psli->ring[psli->extra_ring].cmdringaddr) psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; if (psli->ring[psli->fcp_ring].cmdringaddr) psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; if (psli->ring[psli->next_ring].cmdringaddr) psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ if (phba->sli_rev != 3) lpfc_post_rcv_buf(phba); /* * Configure HBA MSI-X attention conditions to messages if MSI-X mode */ if (phba->intr_type == MSIX) { rc = lpfc_config_msi(phba, pmb); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ if (lpfc_readl(phba->HCregaddr, &status)) { spin_unlock_irq(&phba->hbalock); return -EIO; } status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) status |= HC_R0INT_ENA; if (psli->num_rings > 1) status |= HC_R1INT_ENA; if (psli->num_rings > 2) status |= HC_R2INT_ENA; if (psli->num_rings > 3) status |= HC_R3INT_ENA; if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && (phba->cfg_poll & DISABLE_FCP_RING_INT)) status &= ~(HC_R0INT_ENA); writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2599 Adapter failed to issue DOWN_LINK" " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { mempool_free(pmb, phba->mbox_mem_pool); rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); if (rc) return rc; } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0456 Adapter failed to issue " "ASYNCEVT_ENABLE mbox status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " "to get Option ROM version status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } return 0; } /** * lpfc_hba_init_link - Initialize the FC link * @phba: pointer to lpfc hba data structure. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use as a delayed link up mechanism with the * module parameter lpfc_suppress_link_up. * * Return code * 0 - success * Any other value - error **/ int lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) { return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); } /** * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology * @phba: pointer to lpfc hba data structure. * @fc_topology: desired fc topology. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use as a delayed link up mechanism with the * module parameter lpfc_suppress_link_up. * * Return code * 0 - success * Any other value - error **/ int lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, uint32_t flag) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; pmb->vport = vport; if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { /* Reset link speed to auto */ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1302 Invalid speed for this board:%d " "Reset link speed to auto.\n", phba->cfg_link_speed); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; } lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (phba->sli_rev < LPFC_SLI_REV4) lpfc_set_loopback_flag(phba); rc = lpfc_sli_issue_mbox(phba, pmb, flag); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0498 Adapter failed to init, mbxCmd x%x " "INIT_LINK, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); if (phba->sli_rev <= LPFC_SLI_REV3) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ } phba->link_state = LPFC_HBA_ERROR; if (rc != MBX_BUSY || flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; if (flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_hba_down_link - this routine downs the FC link * @phba: pointer to lpfc hba data structure. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the DOWN_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use to stop the link. * * Return code * 0 - success * Any other value - error **/ int lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) { LPFC_MBOXQ_t *pmb; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0491 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, flag); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2522 Adapter failed to issue DOWN_LINK" " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } if (flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do LPFC uninitialization before the HBA is reset when * bringing down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; if (phba->sli_rev <= LPFC_SLI_REV3) { /* Disable interrupts */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } return 0; } /** * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ static int lpfc_hba_down_post_s3(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; LIST_HEAD(completions); int i; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->ring[LPFC_ELS_RING]; list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { list_del(&mp->list); pring->postbufq_cnt--; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } } spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on txcmplq as it will NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_sli_abort_iocb_ring(phba, pring); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); int ret; unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; ret = lpfc_hba_down_post_s3(phba); if (ret) return ret; /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_sgl_list so that it can either be freed if the * driver is unloading or reposted if the driver is restarting * the port. */ spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ /* scsl_buf_list */ /* abts_sgl_list_lock required because worker thread uses this * list. */ spin_lock(&phba->sli4_hba.abts_sgl_list_lock); list_for_each_entry(sglq_entry, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) sglq_entry->state = SGL_FREED; list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); /* abts_scsi_buf_list_lock required because worker thread uses this * list. */ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, &aborts); spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(psb, psb_next, &aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); list_splice(&aborts, &phba->lpfc_scsi_buf_list); spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); return 0; } /** * lpfc_hba_down_post - Wrapper func for hba down post routine * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 routine for performing * uninitialization after the HBA is reset when bring down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_hba_down_post(struct lpfc_hba *phba) { return (*phba->lpfc_hba_down_post)(phba); } /** * lpfc_hb_timeout - The HBA-timer timeout handler * @ptr: unsigned long holds the pointer to lpfc hba data structure. * * This is the HBA-timer timeout handler registered to the lpfc driver. When * this timer fires, a HBA timeout event shall be posted to the lpfc driver * work-port-events bitmap and the worker thread is notified. This timeout * event will be used by the worker thread to invoke the actual timeout * handler routine, lpfc_hb_timeout_handler. Any periodical operations will * be performed in the timeout handler and the HBA timeout event bit shall * be cleared by the worker thread after it has taken the event bitmap out. **/ static void lpfc_hb_timeout(unsigned long ptr) { struct lpfc_hba *phba; uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_HB_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); /* Tell the worker thread there is work to do */ if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_rrq_timeout - The RRQ-timer timeout handler * @ptr: unsigned long holds the pointer to lpfc hba data structure. * * This is the RRQ-timer timeout handler registered to the lpfc driver. When * this timer fires, a RRQ timeout event shall be posted to the lpfc driver * work-port-events bitmap and the worker thread is notified. This timeout * event will be used by the worker thread to invoke the actual timeout * handler routine, lpfc_rrq_handler. Any periodical operations will * be performed in the timeout handler and the RRQ timeout event bit shall * be cleared by the worker thread after it has taken the event bitmap out. **/ static void lpfc_rrq_timeout(unsigned long ptr) { struct lpfc_hba *phba; unsigned long iflag; phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); phba->hba_flag |= HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); lpfc_worker_wake_up(phba); } /** * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the callback function to the lpfc heart-beat mailbox command. * If configured, the lpfc driver issues the heart-beat mailbox command to * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the * heart-beat mailbox command is issued, the driver shall set up heart-beat * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks * heart-beat outstanding state. Once the mailbox command comes back and * no error conditions detected, the heart-beat mailbox command timer is * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding * state is cleared for the next heart-beat. If the timer expired with the * heart-beat outstanding state set, the driver will put the HBA offline. **/ static void lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { unsigned long drvr_flag; spin_lock_irqsave(&phba->hbalock, drvr_flag); phba->hb_outstanding = 0; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Check and reset heart-beat timer is necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } /** * lpfc_hb_timeout_handler - The HBA-timer timeout handler * @phba: pointer to lpfc hba data structure. * * This is the actual HBA-timer timeout handler to be invoked by the worker * thread whenever the HBA timer fired and HBA-timeout event posted. This * handler performs any periodic operations needed for the device. If such * periodic event has already been attended to either in the interrupt handler * or by processing slow-ring or fast-ring events within the HBA-timer * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets * the timer for the next timeout period. If lpfc heart-beat mailbox command * is configured and there is no heart-beat mailbox command outstanding, a * heart-beat mailbox is issued and timer set properly. Otherwise, if there * has been a heart-beat mailbox command outstanding, the HBA shall be put * to offline. **/ void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { struct lpfc_vport **vports; LPFC_MBOXQ_t *pmboxq; struct lpfc_dmabuf *buf_ptr; int retval, i; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_rcv_seq_check_edtov(vports[i]); lpfc_destroy_vport_work_array(phba, vports); if ((phba->link_state == LPFC_HBA_ERROR) || (phba->pport->load_flag & FC_UNLOADING) || (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); else mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); return; } spin_unlock_irq(&phba->pport->work_port_lock); if (phba->elsbuf_cnt && (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { spin_lock_irq(&phba->hbalock); list_splice_init(&phba->elsbuf, &completions); phba->elsbuf_cnt = 0; phba->elsbuf_prev_cnt = 0; spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { list_remove_head(&completions, buf_ptr, struct lpfc_dmabuf, list); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } } phba->elsbuf_prev_cnt = phba->elsbuf_cnt; /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { if (!phba->hb_outstanding) { if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && (list_empty(&psli->mboxq))) { pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } lpfc_heart_beat(phba, pmboxq); pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; pmboxq->vport = phba->pport; retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval != MBX_BUSY && retval != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); return; } phba->skipped_hb = 0; phba->hb_outstanding = 1; } else if (time_before_eq(phba->last_completion_time, phba->skipped_hb)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2857 Last completion time not " " updated in %d ms\n", jiffies_to_msecs(jiffies - phba->last_completion_time)); } else phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); return; } else { /* * If heart beat timeout called with hb_outstanding set * we need to give the hb mailbox cmd a chance to * complete or TMO. */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0459 Adapter heartbeat still out" "standing:last compl time was %d ms.\n", jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); } } } /** * lpfc_offline_eratt - Bring lpfc offline on hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring the HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ static void lpfc_offline_eratt(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_reset_barrier(phba); spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; return; } /** * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ static void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli4_brdreset(phba); lpfc_hba_down_post(phba); lpfc_sli4_post_status_check(phba); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; } /** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the deferred HBA hardware error * conditions. This type of error is indicated by HBA by setting ER1 * and another ER bit in the host status register. The driver will * wait until the ER1 bit clears before handling the error condition. **/ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggred erratt. That could cause the I/Os * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba); lpfc_offline(phba); /* Wait for the ER1 bit to clear.*/ while (phba->work_hs & HS_FFER1) { msleep(100); if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { phba->work_hs = UNPLUG_ERR ; break; } /* If driver is unloading let the worker thread continue */ if (phba->pport->load_flag & FC_UNLOADING) { phba->work_hs = 0; break; } } /* * This is to ptrotect against a race condition in which * first write to the host attention register clear the * host status register. */ if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } static void lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) { struct lpfc_board_event_header board_event; struct Scsi_Host *shost; board_event.event_type = FC_REG_BOARD_EVENT; board_event.subcategory = LPFC_EVENT_PORTINTERR; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(board_event), (char *) &board_event, LPFC_NL_VENDOR_ID); } /** * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error * conditions: * 1 - HBA error attention interrupt * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ static void lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { if (phba->work_hs & HS_FFER6) /* Re-establishing Link */ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1301 Re-establishing Link " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); if (phba->work_hs & HS_FFER8) /* Device Zeroization */ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "2861 Host Authentication device " "zeroization Data:x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggled erratt with HS_FFER6. * That could cause the I/Os dropped by the firmware. * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { /* Initialize the HBA */ lpfc_unblock_mgmt_io(phba); return; } lpfc_unblock_mgmt_io(phba); } else if (phba->work_hs & HS_CRIT_TEMP) { temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = (uint32_t)temperature; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0406 Adapter maximum temperature exceeded " "(%ld), taking this port offline " "Data: x%x x%x x%x\n", temperature, phba->work_hs, phba->work_status[0], phba->work_status[1]); shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *) &temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_offline_eratt(phba); } else { /* The if clause above forces this code path when the status * failure is a value other than FFER6. Do not call the offline * twice. This is the adapter hardware error path. */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0457 Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); lpfc_offline_eratt(phba); } return; } /** * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the SLI4 HBA hardware error attention * conditions. **/ static void lpfc_handle_eratt_s4(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; uint32_t event_data; struct Scsi_Host *shost; uint32_t if_type; struct lpfc_register portstat_reg = {0}; uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; uint32_t pci_rd_rc1, pci_rd_rc2; int rc; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) return; /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: pci_rd_rc1 = lpfc_readl( phba->sli4_hba.u.if_type0.UERRLOregaddr, &uerrlo_reg); pci_rd_rc2 = lpfc_readl( phba->sli4_hba.u.if_type0.UEMASKLOregaddr, &uemasklo_reg); /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; lpfc_sli4_offline_eratt(phba); break; case LPFC_SLI_INTF_IF_TYPE_2: pci_rd_rc1 = lpfc_readl( phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3151 PCI bus read access failure: x%x\n", readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); return; } reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { /* TODO: Register for Overtemp async events. */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2889 Port Overtemperature event, " "taking port offline\n"); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_sli4_offline_eratt(phba); break; } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3143 Port Down: Firmware Restarted\n"); else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3144 Port Down: Debug Dump\n"); else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3145 Port Down: Provisioning\n"); /* * On error status condition, driver need to wait for port * ready before performing reset. */ rc = lpfc_sli4_pdev_status_reg_wait(phba); if (!rc) { /* need reset: attempt for port recovery */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2887 Reset Needed: Attempting Port " "Recovery...\n"); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { lpfc_unblock_mgmt_io(phba); /* don't report event on forced debug dump */ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) return; else break; } /* fall through for not able to recover */ } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3152 Unrecoverable error, bring the port " "offline\n"); lpfc_sli4_offline_eratt(phba); break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3123 Report dump event to upper layer\n"); /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); } /** * lpfc_handle_eratt - Wrapper func for handling hba error attention * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 hba error attention handling * routine from the API jump table function pointer from the lpfc_hba struct. * * Return codes * 0 - success. * Any other value - error. **/ void lpfc_handle_eratt(struct lpfc_hba *phba) { (*phba->lpfc_handle_eratt)(phba); } /** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked from the worker thread to handle a HBA host * attention link event. **/ void lpfc_handle_latt(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmb; volatile uint32_t control; struct lpfc_dmabuf *mp; int rc = 0; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = 1; goto lpfc_handle_latt_err_exit; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { rc = 2; goto lpfc_handle_latt_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { rc = 3; goto lpfc_handle_latt_free_mp; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); psli->slistat.link_event++; lpfc_read_topology(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = vport; /* Block ELS IOCBs until we have processed this mbox command */ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; goto lpfc_handle_latt_free_mbuf; } /* Clear Link Attention in HA REG */ spin_lock_irq(&phba->hbalock); writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; lpfc_handle_latt_free_mbuf: phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_handle_latt_free_mp: kfree(mp); lpfc_handle_latt_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); lpfc_handle_latt_err_exit: /* Enable Link attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear Link Attention in HA REG */ writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); lpfc_linkdown(phba); phba->link_state = LPFC_HBA_ERROR; lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); return; } /** * lpfc_parse_vpd - Parse VPD (Vital Product Data) * @phba: pointer to lpfc hba data structure. * @vpd: pointer to the vital product data. * @len: length of the vital product data in bytes. * * This routine parses the Vital Product Data (VPD). The VPD is treated as * an array of characters. In this routine, the ModelName, ProgramType, and * ModelDesc, etc. fields of the phba data structure will be populated. * * Return codes * 0 - pointer to the VPD passed in is NULL * 1 - success **/ int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; int Length; int i, j; int finished = 0; int index = 0; if (!vpd) return 0; /* Vital Product */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0455 Vital Product Data: x%x x%x x%x x%x\n", (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], (uint32_t) vpd[3]); while (!finished && (index < (len - 4))) { switch (vpd[index]) { case 0x82: case 0x91: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; i = ((((unsigned short)lenhi) << 8) + lenlo); index += i; break; case 0x90: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; Length = ((((unsigned short)lenhi) << 8) + lenlo); if (Length > len - index) Length = len - index; while (Length > 0) { /* Look for Serial Number */ if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->SerialNumber[j++] = vpd[index++]; if (j == 31) break; } phba->SerialNumber[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { phba->vpd_flag |= VPD_MODEL_DESC; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ModelDesc[j++] = vpd[index++]; if (j == 255) break; } phba->ModelDesc[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { phba->vpd_flag |= VPD_MODEL_NAME; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ModelName[j++] = vpd[index++]; if (j == 79) break; } phba->ModelName[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { phba->vpd_flag |= VPD_PROGRAM_TYPE; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { phba->ProgramType[j++] = vpd[index++]; if (j == 255) break; } phba->ProgramType[j] = 0; continue; } else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { phba->vpd_flag |= VPD_PORT; index += 2; i = vpd[index]; index += 1; j = 0; Length -= (3+i); while(i--) { if ((phba->sli_rev == LPFC_SLI_REV4) && (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET)) { j++; index++; } else phba->Port[j++] = vpd[index++]; if (j == 19) break; } if ((phba->sli_rev != LPFC_SLI_REV4) || (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_NON)) phba->Port[j] = 0; continue; } else { index += 2; i = vpd[index]; index += 1; index += i; Length -= (3 + i); } } finished = 0; break; case 0x78: finished = 1; break; default: index ++; break; } } return(1); } /** * lpfc_get_hba_model_desc - Retrieve HBA device model name and description * @phba: pointer to lpfc hba data structure. * @mdp: pointer to the data structure to hold the derived model name. * @descp: pointer to the data structure to hold the derived description. * * This routine retrieves HBA's description based on its registered PCI device * ID. The @descp passed into this function points to an array of 256 chars. It * shall be returned with the model name, maximum speed, and the host bus type. * The @mdp passed into this function points to an array of 80 chars. When the * function returns, the @mdp will be filled with the model name. **/ static void lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) { lpfc_vpd_t *vp; uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; int oneConnect = 0; /* default is not a oneConnect */ struct { char *name; char *bus; char *function; } m = {"<Unknown>", "", ""}; if (mdp && mdp[0] != '\0' && descp && descp[0] != '\0') return; if (phba->lmt & LMT_16Gb) max_speed = 16; else if (phba->lmt & LMT_10Gb) max_speed = 10; else if (phba->lmt & LMT_8Gb) max_speed = 8; else if (phba->lmt & LMT_4Gb) max_speed = 4; else if (phba->lmt & LMT_2Gb) max_speed = 2; else max_speed = 1; vp = &phba->vpd; switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) m = (typeof(m)){"LP7000", "PCI", "Fibre Channel Adapter"}; else m = (typeof(m)){"LP7000E", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) m = (typeof(m)){"LP9002", "PCI", "Fibre Channel Adapter"}; else m = (typeof(m)){"LP9000", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZSMB: m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_MID: m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SMB: m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_DCSP: m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SCSP: m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_S: m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HORNET: m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)){"LPev12000", "PCIe IOV", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)){"LPemv12002-S", "PCIe IOV", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; break; case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; break; case PCI_DEVICE_ID_FALCON: m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", "EmulexSecure Fibre"}; break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FCOE: case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; default: m = (typeof(m)){"Unknown", "", ""}; break; } if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); /* * oneConnect hba requires special processing, they are all initiators * and we put the port number on the end */ if (descp && descp[0] == '\0') { if (oneConnect) snprintf(descp, 255, "Emulex OneConnect %s, %s Initiator, Port %s", m.name, m.function, phba->Port); else snprintf(descp, 255, "Emulex %s %d%s %s %s", m.name, max_speed, (GE) ? "GE" : "Gb", m.bus, m.function); } } /** * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a IOCB ring. * @cnt: the number of IOCBs to be posted to the IOCB ring. * * This routine posts a given number of IOCBs with the associated DMA buffer * descriptors specified by the cnt argument to the given IOCB ring. * * Return codes * The number of IOCBs NOT able to be posted to the IOCB ring. **/ int lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) { IOCB_t *icmd; struct lpfc_iocbq *iocb; struct lpfc_dmabuf *mp1, *mp2; cnt += pring->missbufcnt; /* While there are buffers to post */ while (cnt > 0) { /* Allocate buffer for command iocb */ iocb = lpfc_sli_get_iocbq(phba); if (iocb == NULL) { pring->missbufcnt = cnt; return cnt; } icmd = &iocb->iocb; /* 2 buffers can be posted per command */ /* Allocate buffer to post */ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp1) mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); if (!mp1 || !mp1->virt) { kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp1->list); /* Allocate buffer to post */ if (cnt > 1) { mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp2) mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp2->phys); if (!mp2 || !mp2->virt) { kfree(mp2); lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp2->list); } else { mp2 = NULL; } icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; icmd->ulpBdeCount = 1; cnt--; if (mp2) { icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; cnt--; icmd->ulpBdeCount = 2; } icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; if (mp2) { lpfc_mbuf_free(phba, mp2->virt, mp2->phys); kfree(mp2); cnt++; } lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } lpfc_sli_ringpostbuf_put(phba, pring, mp1); if (mp2) lpfc_sli_ringpostbuf_put(phba, pring, mp2); } pring->missbufcnt = 0; return 0; } /** * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring * @phba: pointer to lpfc hba data structure. * * This routine posts initial receive IOCB buffers to the ELS ring. The * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is * set to 64 IOCBs. * * Return codes * 0 - success (currently always success) **/ static int lpfc_post_rcv_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; } #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) /** * lpfc_sha_init - Set up initial array of hash table entries * @HashResultPointer: pointer to an array as hash table. * * This routine sets up the initial values to the array of hash table entries * for the LC HBAs. **/ static void lpfc_sha_init(uint32_t * HashResultPointer) { HashResultPointer[0] = 0x67452301; HashResultPointer[1] = 0xEFCDAB89; HashResultPointer[2] = 0x98BADCFE; HashResultPointer[3] = 0x10325476; HashResultPointer[4] = 0xC3D2E1F0; } /** * lpfc_sha_iterate - Iterate initial hash table with the working hash table * @HashResultPointer: pointer to an initial/result hash table. * @HashWorkingPointer: pointer to an working hash table. * * This routine iterates an initial hash table pointed by @HashResultPointer * with the values from the working hash table pointeed by @HashWorkingPointer. * The results are putting back to the initial hash table, returned through * the @HashResultPointer as the result hash table. **/ static void lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) { int t; uint32_t TEMP; uint32_t A, B, C, D, E; t = 16; do { HashWorkingPointer[t] = S(1, HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 8] ^ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); } while (++t <= 79); t = 0; A = HashResultPointer[0]; B = HashResultPointer[1]; C = HashResultPointer[2]; D = HashResultPointer[3]; E = HashResultPointer[4]; do { if (t < 20) { TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; } else if (t < 40) { TEMP = (B ^ C ^ D) + 0x6ED9EBA1; } else if (t < 60) { TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; } else { TEMP = (B ^ C ^ D) + 0xCA62C1D6; } TEMP += S(5, A) + E + HashWorkingPointer[t]; E = D; D = C; C = S(30, B); B = A; A = TEMP; } while (++t <= 79); HashResultPointer[0] += A; HashResultPointer[1] += B; HashResultPointer[2] += C; HashResultPointer[3] += D; HashResultPointer[4] += E; } /** * lpfc_challenge_key - Create challenge key based on WWPN of the HBA * @RandomChallenge: pointer to the entry of host challenge random number array. * @HashWorking: pointer to the entry of the working hash array. * * This routine calculates the working hash array referred by @HashWorking * from the challenge random numbers associated with the host, referred by * @RandomChallenge. The result is put into the entry of the working hash * array and returned by reference through @HashWorking. **/ static void lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) { *HashWorking = (*RandomChallenge ^ *HashWorking); } /** * lpfc_hba_init - Perform special handling for LC HBA initialization * @phba: pointer to lpfc hba data structure. * @hbainit: pointer to an array of unsigned 32-bit integers. * * This routine performs the special handling for LC HBA initialization. **/ void lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) { int t; uint32_t *HashWorking; uint32_t *pwwnn = (uint32_t *) phba->wwnn; HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); if (!HashWorking) return; HashWorking[0] = HashWorking[78] = *pwwnn++; HashWorking[1] = HashWorking[79] = *pwwnn; for (t = 0; t < 7; t++) lpfc_challenge_key(phba->RandomData + t, HashWorking + t); lpfc_sha_init(hbainit); lpfc_sha_iterate(hbainit, HashWorking); kfree(HashWorking); } /** * lpfc_cleanup - Performs vport cleanups before deleting a vport * @vport: pointer to a virtual N_Port data structure. * * This routine performs the necessary cleanups before deleting the @vport. * It invokes the discovery state machine to perform necessary state * transitions and to release the ndlps associated with the @vport. Note, * the physical port is treated as @vport 0. **/ void lpfc_cleanup(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int i = 0; if (phba->link_state > LPFC_LINK_DOWN) lpfc_port_link_failure(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) continue; spin_lock_irq(&phba->ndlp_lock); NLP_SET_FREE_REQ(ndlp); spin_unlock_irq(&phba->ndlp_lock); /* Trigger the release of the ndlp memory */ lpfc_nlp_put(ndlp); continue; } spin_lock_irq(&phba->ndlp_lock); if (NLP_CHK_FREE_REQ(ndlp)) { /* The ndlp should not be in memory free mode already */ spin_unlock_irq(&phba->ndlp_lock); continue; } else /* Indicate request for freeing ndlp memory */ NLP_SET_FREE_REQ(ndlp); spin_unlock_irq(&phba->ndlp_lock); if (vport->port_type != LPFC_PHYSICAL_PORT && ndlp->nlp_DID == Fabric_DID) { /* Just free up ndlp with Fabric_DID for vports */ lpfc_nlp_put(ndlp); continue; } /* take care of nodes in unused state before the state * machine taking action. */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_put(ndlp); continue; } if (ndlp->nlp_type & NLP_FABRIC) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } /* At this point, ALL ndlp's should be gone * because of the previous NLP_EVT_DEVICE_RM. * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_NODE, "0282 did:x%x ndlp:x%p " "usgmap:x%x refcnt:%d\n", ndlp->nlp_DID, (void *)ndlp, ndlp->nlp_usg_map, atomic_read( &ndlp->kref.refcount)); } break; } /* Wait for any activity on ndlps to settle */ msleep(10); } lpfc_cleanup_vports_rrqs(vport, NULL); } /** * lpfc_stop_vport_timers - Stop all the timers associated with a vport * @vport: pointer to a virtual N_Port data structure. * * This routine stops all the timers associated with a @vport. This function * is invoked before disabling or deleting a @vport. Note that the physical * port is treated as @vport 0. **/ void lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->fc_fdmitmo); del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } /** * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer * @phba: pointer to lpfc hba data structure. * * This routine stops the SLI4 FCF rediscover wait timer if it's on. The * caller of this routine should already hold the host lock. **/ void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { /* Clear pending FCF rediscovery wait flag */ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } /** * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer * @phba: pointer to lpfc hba data structure. * * This routine stops the SLI4 FCF rediscover wait timer if it's on. It * checks whether the FCF rediscovery wait timer is pending with the host * lock held before proceeding with disabling the timer and clearing the * wait timer pendig flag. **/ void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { spin_lock_irq(&phba->hbalock); if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { /* FCF rediscovery timer already fired or stopped */ spin_unlock_irq(&phba->hbalock); return; } __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); /* Clear failover in progress flags */ phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); spin_unlock_irq(&phba->hbalock); } /** * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ void lpfc_stop_hba_timers(struct lpfc_hba *phba) { lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { del_timer_sync(&phba->rrq_tmr); phba->hba_flag &= ~HBA_RRQ_ACTIVE; } phba->hb_outstanding = 0; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ del_timer_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device sepcific driver timers */ lpfc_sli4_stop_fcf_redisc_wait_timer(phba); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0297 Invalid device group (x%x)\n", phba->pci_dev_grp); break; } return; } /** * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked * @phba: pointer to lpfc hba data structure. * * This routine marks a HBA's management interface as blocked. Once the HBA's * management interface is marked as blocked, all the user space access to * the HBA, whether they are from sysfs interface or libdfc interface will * all be blocked. The HBA is set to block the management interface when the * driver prepares the HBA interface for online or offline. **/ static void lpfc_block_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, phba->sli.mbox_active) * 1000) + jiffies; } spin_unlock_irqrestore(&phba->hbalock, iflag); /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2813 Mgmt IO is Blocked %x " "- mbox cmd %x still active\n", phba->sli.sli_flag, actcmd); break; } } } /** * lpfc_sli4_node_prep - Assign RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever * an SLI4 adapter is reset and the driver is not unloading. Its purpose * is to fixup the temporary rpi assignments. **/ void lpfc_sli4_node_prep(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; int i; if (phba->sli_rev != LPFC_SLI_REV4) return; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->load_flag & FC_UNLOADING) continue; list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { if (NLP_CHK_NODE_ACT(ndlp)) ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(phba); } } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_online - Initialize and bring a HBA online * @phba: pointer to lpfc hba data structure. * * This routine initializes the HBA and brings a HBA online. During this * process, the management interface is blocked to prevent user space access * to the HBA interfering with the driver initialization. * * Return codes * 0 - successful * 1 - failed **/ int lpfc_online(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; int i; if (!phba) return 0; vport = phba->pport; if (!(vport->fc_flag & FC_OFFLINE_MODE)) return 0; lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0458 Bring Adapter online\n"); lpfc_block_mgmt_io(phba); if (!lpfc_sli_queue_setup(phba)) { lpfc_unblock_mgmt_io(phba); return 1; } if (phba->sli_rev == LPFC_SLI_REV4) { if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; if (phba->sli_rev == LPFC_SLI_REV4) vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); lpfc_unblock_mgmt_io(phba); return 0; } /** * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked * @phba: pointer to lpfc hba data structure. * * This routine marks a HBA's management interface as not blocked. Once the * HBA's management interface is marked as not blocked, all the user space * access to the HBA, whether they are from sysfs interface or libdfc * interface will be allowed. The HBA is set to block the management interface * when the driver prepares the HBA interface for online or offline and then * set to unblock the management interface afterwards. **/ void lpfc_unblock_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_offline_prep - Prepare a HBA to be brought offline * @phba: pointer to lpfc hba data structure. * * This routine is invoked to prepare a HBA to be brought offline. It performs * unregistration login to all the nodes on all vports and flushes the mailbox * queue to make it ready to be brought offline. **/ void lpfc_offline_prep(struct lpfc_hba * phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; struct Scsi_Host *shost; int i; if (vport->fc_flag & FC_OFFLINE_MODE) return; lpfc_block_mgmt_io(phba); lpfc_linkdown(phba); /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->load_flag & FC_UNLOADING) continue; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vports[i]->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RM); } spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); /* * Whenever an SLI4 port goes offline, free the * RPI. Get a new RPI when the adapter port * comes back online. */ if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); lpfc_unreg_rpi(vports[i], ndlp); } } } lpfc_destroy_vport_work_array(phba, vports); lpfc_sli_mbox_sys_shutdown(phba); } /** * lpfc_offline - Bring a HBA offline * @phba: pointer to lpfc hba data structure. * * This routine actually brings a HBA offline. It stops all the timers * associated with the HBA, brings down the SLI layer, and eventually * marks the HBA as in offline state for the upper layer protocol. **/ void lpfc_offline(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_vport **vports; int i; if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; /* stop port and all timers associated with this hba */ lpfc_stop_port(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0460 Bring Adapter offline\n"); /* Bring down the SLI Layer and cleanup. The HBA is offline now. */ lpfc_sli_hba_down(phba); spin_lock_irq(&phba->hbalock); phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; vports[i]->fc_flag |= FC_OFFLINE_MODE; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated. * @phba: pointer to lpfc hba data structure. * * This routine goes through all the scsi buffers in the system and updates the * Physical XRIs assigned to the SCSI buffer because these may change after any * firmware reset * * Return codes * 0 - successful (for now, it always returns 0) **/ int lpfc_scsi_buf_update(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; spin_lock_irq(&phba->hbalock); spin_lock(&phba->scsi_buf_list_lock); list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { sb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask); phba->sli4_hba.max_cfg_param.xri_used++; phba->sli4_hba.xri_count++; } spin_unlock(&phba->scsi_buf_list_lock); spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the SCSI buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. * * Return codes * 0 - successful (for now, it always returns 0) **/ static int lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); /* Release all the lpfc_scsi_bufs maintained by this host. */ spin_lock(&phba->scsi_buf_list_lock); list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } spin_unlock(&phba->scsi_buf_list_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { list_del(&io->list); kfree(io); phba->total_iocbq_bufs--; } spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. * @instance: a unique integer ID to this FC port. * @dev: pointer to the device data structure. * * This routine creates a FC port for the upper layer protocol. The FC port * can be created on top of either a physical port or a virtual port provided * by the HBA. This routine also allocates a SCSI host data structure (shost) * and associates the FC port created before adding the shost into the SCSI * layer. * * Return codes * @vport - pointer to the virtual N_Port data structure. * NULL - port create failed. **/ struct lpfc_vport * lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; struct Scsi_Host *shost; int error = 0; if (dev != &phba->pcidev->dev) shost = scsi_host_alloc(&lpfc_vport_template, sizeof(struct lpfc_vport)); else shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport)); if (!shost) goto out; vport = (struct lpfc_vport *) shost->hostdata; vport->phba = phba; vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_rscn_flush = 0; lpfc_get_vport_cfgparam(vport); shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; shost->sg_tablesize = phba->cfg_sg_seg_cnt; } /* * Set initial can_queue value since 0 is no longer supported and * scsi_add_host will fail. This will be adjusted later based on the * max xri value determined in hba setup. */ shost->can_queue = phba->cfg_hba_queue_depth - 10; if (dev != &phba->pcidev->dev) { shost->transportt = lpfc_vport_transport_template; vport->port_type = LPFC_NPIV_PORT; } else { shost->transportt = lpfc_transport_template; vport->port_type = LPFC_PHYSICAL_PORT; } /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); vport->fc_disctmo.function = lpfc_disc_timeout; vport->fc_disctmo.data = (unsigned long)vport; init_timer(&vport->fc_fdmitmo); vport->fc_fdmitmo.function = lpfc_fdmi_tmo; vport->fc_fdmitmo.data = (unsigned long)vport; init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; init_timer(&vport->delayed_disc_tmo); vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; vport->delayed_disc_tmo.data = (unsigned long)vport; error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; spin_lock_irq(&phba->hbalock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->hbalock); return vport; out_put_shost: scsi_host_put(shost); out: return NULL; } /** * destroy_port - destroy an FC port * @vport: pointer to an lpfc virtual N_Port data structure. * * This routine destroys a FC port from the upper layer protocol. All the * resources associated with the port are released. **/ void destroy_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; lpfc_debugfs_terminate(vport); fc_remove_host(shost); scsi_remove_host(shost); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); lpfc_cleanup(vport); return; } /** * lpfc_get_instance - Get a unique integer ID * * This routine allocates a unique integer ID from lpfc_hba_index pool. It * uses the kernel idr facility to perform the task. * * Return codes: * instance - a unique integer ID allocated as the new instance. * -1 - lpfc get instance failed. **/ int lpfc_get_instance(void) { int instance = 0; /* Assign an unused number */ if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) return -1; if (idr_get_new(&lpfc_hba_index, NULL, &instance)) return -1; return instance; } /** * lpfc_scan_finished - method for SCSI layer to detect whether scan is done * @shost: pointer to SCSI host data structure. * @time: elapsed time of the scan in jiffies. * * This routine is called by the SCSI layer with a SCSI host to determine * whether the scan host is finished. * * Note: there is no scan_start function as adapter initialization will have * asynchronously kicked off the link initialization. * * Return codes * 0 - SCSI host scan is not over yet. * 1 - SCSI host scan is over. **/ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int stat = 0; spin_lock_irq(shost->host_lock); if (vport->load_flag & FC_UNLOADING) { stat = 1; goto finished; } if (time >= 30 * HZ) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (vport->port_state != LPFC_VPORT_READY) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (vport->fc_map_cnt == 0 && time < 2 * HZ) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; stat = 1; finished: spin_unlock_irq(shost->host_lock); return stat; } /** * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port * @shost: pointer to SCSI host data structure. * * This routine initializes a given SCSI host attributes on a FC port. The * SCSI host can be either on top of a physical port or a virtual port. **/ void lpfc_host_attrib_init(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; /* * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_supported_classes(shost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(shost), 0, sizeof(fc_host_supported_fc4s(shost))); fc_host_supported_fc4s(shost)[2] = 1; fc_host_supported_fc4s(shost)[7] = 1; lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), sizeof fc_host_symbolic_name(shost)); fc_host_supported_speeds(shost) = 0; if (phba->lmt & LMT_16Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; if (phba->lmt & LMT_10Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; if (phba->lmt & LMT_8Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; if (phba->lmt & LMT_4Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; if (phba->lmt & LMT_2Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; if (phba->lmt & LMT_1Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; fc_host_maxframe_size(shost) = (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; fc_host_max_npiv_vports(shost) = phba->max_vpi; spin_lock_irq(shost->host_lock); vport->load_flag &= ~FC_LOADING; spin_unlock_irq(shost->host_lock); } /** * lpfc_stop_port_s3 - Stop SLI3 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI3 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s3(struct lpfc_hba *phba) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ /* Reset some HBA SLI setup states */ lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; } /** * lpfc_stop_port_s4 - Stop SLI4 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI4 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s4(struct lpfc_hba *phba) { /* Reset some HBA SLI4 setup states */ lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; phba->sli4_hba.intr_enable = 0; } /** * lpfc_stop_port - Wrapper function for stopping hba port * @phba: Pointer to HBA context object. * * This routine wraps the actual SLI3 or SLI4 hba stop port routine from * the API jump table function pointer from the lpfc_hba struct. **/ void lpfc_stop_port(struct lpfc_hba *phba) { phba->lpfc_stop_port(phba); } /** * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer * @phba: Pointer to hba for which this call is being executed. * * This routine starts the timer waiting for the FCF rediscovery to complete. **/ void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) { unsigned long fcf_redisc_wait_tmo = (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); /* Start fcf rediscovery wait period timer */ mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); spin_lock_irq(&phba->hbalock); /* Allow action to new fcf asynchronous event */ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); /* Mark the FCF rediscovery pending state */ phba->fcf.fcf_flag |= FCF_REDISC_PEND; spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout * @ptr: Map to lpfc_hba data structure pointer. * * This routine is invoked when waiting for FCF table rediscover has been * timed out. If new FCF record(s) has (have) been discovered during the * wait period, a new FCF event shall be added to the FCOE async event * list, and then worker thread shall be waked up for processing from the * worker thread context. **/ void lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) { struct lpfc_hba *phba = (struct lpfc_hba *)ptr; /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); return; } /* Clear FCF rediscovery timer pending flag */ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* FCF rediscovery event to worker thread */ phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2776 FCF rediscover quiescent timer expired\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } /** * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link-attention link fault code and * translate it into the base driver's read link attention mailbox command * status. * * Return: Link-attention status in terms of base driver's coding. **/ static uint16_t lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint16_t latt_fault; switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { case LPFC_ASYNC_LINK_FAULT_NONE: case LPFC_ASYNC_LINK_FAULT_LOCAL: case LPFC_ASYNC_LINK_FAULT_REMOTE: latt_fault = 0; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0398 Invalid link fault code: x%x\n", bf_get(lpfc_acqe_link_fault, acqe_link)); latt_fault = MBXERR_ERROR; break; } return latt_fault; } /** * lpfc_sli4_parse_latt_type - Parse sli4 link attention type * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link attention type and translate it * into the base driver's link attention type coding. * * Return: Link attention type in terms of base driver's coding. **/ static uint8_t lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint8_t att_type; switch (bf_get(lpfc_acqe_link_status, acqe_link)) { case LPFC_ASYNC_LINK_STATUS_DOWN: case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: att_type = LPFC_ATT_LINK_DOWN; break; case LPFC_ASYNC_LINK_STATUS_UP: /* Ignore physical link up events - wait for logical link up */ att_type = LPFC_ATT_RESERVED; break; case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: att_type = LPFC_ATT_LINK_UP; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0399 Invalid link attention type: x%x\n", bf_get(lpfc_acqe_link_status, acqe_link)); att_type = LPFC_ATT_RESERVED; break; } return att_type; } /** * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link-attention link speed and translate * it into the base driver's link-attention link speed coding. * * Return: Link-attention link speed in terms of base driver's coding. **/ static uint8_t lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint8_t link_speed; switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { case LPFC_ASYNC_LINK_SPEED_ZERO: case LPFC_ASYNC_LINK_SPEED_10MBPS: case LPFC_ASYNC_LINK_SPEED_100MBPS: link_speed = LPFC_LINK_SPEED_UNKNOWN; break; case LPFC_ASYNC_LINK_SPEED_1GBPS: link_speed = LPFC_LINK_SPEED_1GHZ; break; case LPFC_ASYNC_LINK_SPEED_10GBPS: link_speed = LPFC_LINK_SPEED_10GHZ; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0483 Invalid link-attention link speed: x%x\n", bf_get(lpfc_acqe_link_speed, acqe_link)); link_speed = LPFC_LINK_SPEED_UNKNOWN; break; } return link_speed; } /** * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to handle the SLI4 asynchronous FCoE link event. **/ static void lpfc_sli4_async_link_evt(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_mbx_read_top *la; uint8_t att_type; int rc; att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) return; phba->fcoe_eventtag = acqe_link->event_tag; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0395 The mboxq allocation failed\n"); return; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0396 The lpfc_dmabuf allocation failed\n"); goto out_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0397 The mbuf allocation failed\n"); goto out_free_dmabuf; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; /* Create lpfc_handle_latt mailbox command from link ACQE */ lpfc_read_topology(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = bf_get(lpfc_acqe_link_speed, acqe_link); phba->sli4_hba.link_state.duplex = bf_get(lpfc_acqe_link_duplex, acqe_link); phba->sli4_hba.link_state.status = bf_get(lpfc_acqe_link_status, acqe_link); phba->sli4_hba.link_state.type = bf_get(lpfc_acqe_link_type, acqe_link); phba->sli4_hba.link_state.number = bf_get(lpfc_acqe_link_number, acqe_link); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_link); phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_logical_link_speed, acqe_link); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2900 Async FC/FCoE Link event - Speed:%dGBit " "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " "Logical speed:%dMbps Fault:%d\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, phba->sli4_hba.link_state.logical_speed * 10, phba->sli4_hba.link_state.fault); /* * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch * topology info. Note: Optional for non FC-AL ports. */ if (!(phba->hba_flag & HBA_FCOE_MODE)) { rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_dmabuf; return; } /* * For FCoE Mode: fill in all the topology information we need and call * the READ_TOPOLOGY completion routine to continue without actually * sending the READ_TOPOLOGY mailbox command to the port. */ /* Parse and translate status field */ mb = &pmb->u.mb; mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); /* Parse and translate link attention fields */ la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; la->eventTag = acqe_link->event_tag; bf_set(lpfc_mbx_read_top_att_type, la, att_type); bf_set(lpfc_mbx_read_top_link_spd, la, lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); /* Fake the the following irrelvant fields */ bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); bf_set(lpfc_mbx_read_top_il, la, 0); bf_set(lpfc_mbx_read_top_pb, la, 0); bf_set(lpfc_mbx_read_top_fa, la, 0); bf_set(lpfc_mbx_read_top_mm, la, 0); /* Invoke the lpfc_handle_latt mailbox command callback function */ lpfc_mbx_cmpl_read_topology(phba, pmb); return; out_free_dmabuf: kfree(mp); out_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); } /** * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event * @phba: pointer to lpfc hba data structure. * @acqe_fc: pointer to the async fc completion queue entry. * * This routine is to handle the SLI4 asynchronous FC event. It will simply log * that the event was received and then issue a read_topology mailbox command so * that the rest of the driver will treat it the same as SLI3. **/ static void lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) { struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; int rc; if (bf_get(lpfc_trailer_type, acqe_fc) != LPFC_FC_LA_EVENT_TYPE_FC_LINK) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2895 Non FC link Event detected.(%d)\n", bf_get(lpfc_trailer_type, acqe_fc)); return; } /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = bf_get(lpfc_acqe_fc_la_speed, acqe_fc); phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; phba->sli4_hba.link_state.topology = bf_get(lpfc_acqe_fc_la_topology, acqe_fc); phba->sli4_hba.link_state.status = bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); phba->sli4_hba.link_state.type = bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); phba->sli4_hba.link_state.number = bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_fc); phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2896 Async FC event - Speed:%dGBaud Topology:x%x " "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" "%dMbps Fault:%d\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, phba->sli4_hba.link_state.logical_speed * 10, phba->sli4_hba.link_state.fault); pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2897 The mboxq allocation failed\n"); return; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2898 The lpfc_dmabuf allocation failed\n"); goto out_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2899 The mbuf allocation failed\n"); goto out_free_dmabuf; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; /* Create lpfc_handle_latt mailbox command from link ACQE */ lpfc_read_topology(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_dmabuf; return; out_free_dmabuf: kfree(mp); out_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); } /** * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event * @phba: pointer to lpfc hba data structure. * @acqe_fc: pointer to the async SLI completion queue entry. * * This routine is to handle the SLI4 asynchronous SLI events. **/ static void lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2901 Async SLI event - Event Data1:x%08x Event Data2:" "x%08x SLI Event Type:%d", acqe_sli->event_data1, acqe_sli->event_data2, bf_get(lpfc_trailer_type, acqe_sli)); return; } /** * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport * @vport: pointer to vport data structure. * * This routine is to perform Clear Virtual Link (CVL) on a vport in * response to a CVL event. * * Return the pointer to the ndlp with the vport if successful, otherwise * return NULL. **/ static struct lpfc_nodelist * lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; struct lpfc_hba *phba; if (!vport) return NULL; phba = vport->phba; if (!phba) return NULL; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); /* Set the node type */ ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } else if (!NLP_CHK_NODE_ACT(ndlp)) { /* re-setup ndlp without removing from node list */ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) return 0; } if ((phba->pport->port_state < LPFC_FLOGI) && (phba->pport->port_state != LPFC_VPORT_FAILED)) return NULL; /* If virtual link is not yet instantiated ignore CVL */ if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) && (vport->port_state != LPFC_VPORT_FAILED)) return NULL; shost = lpfc_shost_from_vport(vport); if (!shost) return NULL; lpfc_linkdown_port(vport); lpfc_cleanup_pending_mbox(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_CVL_RCVD; spin_unlock_irq(shost->host_lock); return ndlp; } /** * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports * @vport: pointer to lpfc hba data structure. * * This routine is to perform Clear Virtual Link (CVL) on all vports in * response to a FCF dead event. **/ static void lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (vports) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_sli4_perform_vport_cvl(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async fcoe completion queue entry. * * This routine is to handle the SLI4 asynchronous fcoe event. **/ static void lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, struct lpfc_acqe_fip *acqe_fip) { uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); int rc; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; int active_vlink_present; struct lpfc_vport **vports; int i; phba->fc_eventTag = acqe_fip->event_tag; phba->fcoe_eventtag = acqe_fip->event_tag; switch (event_type) { case LPFC_FIP_EVENT_TYPE_NEW_FCF: case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2546 New FCF event, evt_tag:x%x, " "index:x%x\n", acqe_fip->event_tag, acqe_fip->index); else lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_DISCOVERY, "2788 FCF param modified event, " "evt_tag:x%x, index:x%x\n", acqe_fip->event_tag, acqe_fip->index); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { /* * During period of FCF discovery, read the FCF * table record indexed by the event to update * FCF roundrobin failover eligible FCF bmask. */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2779 Read FCF (x%x) for updating " "roundrobin FCF failover bmask\n", acqe_fip->index); rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); } /* If the FCF discovery is in progress, do nothing. */ spin_lock_irq(&phba->hbalock); if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irq(&phba->hbalock); break; } /* If fast FCF failover rescan event is pending, do nothing */ if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { spin_unlock_irq(&phba->hbalock); break; } /* If the FCF has been in discovered state, do nothing. */ if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { spin_unlock_irq(&phba->hbalock); break; } spin_unlock_irq(&phba->hbalock); /* Otherwise, scan the entire FCF table and re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2770 Start FCF table scan per async FCF " "event, evt_tag:x%x, index:x%x\n", acqe_fip->event_tag, acqe_fip->index); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2547 Issue FCF scan read FCF mailbox " "command failed (x%x)\n", rc); break; case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2548 FCF Table full count 0x%x tag 0x%x\n", bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), acqe_fip->event_tag); break; case LPFC_FIP_EVENT_TYPE_FCF_DEAD: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF (x%x) disconnected from network, " "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); /* * If we are in the middle of FCF failover process, clear * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { spin_unlock_irq(&phba->hbalock); /* Update FLOGI FCF failover eligible FCF bmask */ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); break; } spin_unlock_irq(&phba->hbalock); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) break; /* * Otherwise, request the port to rediscover the entire FCF * table for a fast recovery from case that the current FCF * is no longer valid as we are not in the middle of FCF * failover process already. */ spin_lock_irq(&phba->hbalock); /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2771 Start FCF fast failover process due to " "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " "\n", acqe_fip->event_tag, acqe_fip->index); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2772 Issue FCF rediscover mabilbox " "command failed, fail through to FCF " "dead event\n"); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will fail over by treating this * as a link down to FCF registration. */ lpfc_sli4_fcf_dead_failthrough(phba); } else { /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); /* * Handling fast FCF failover to a DEAD FCF event is * considered equalivant to receiving CVL to all vports. */ lpfc_sli4_perform_all_vport_cvl(phba); } break; case LPFC_FIP_EVENT_TYPE_CVL: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); vport = lpfc_find_vport_by_vpid(phba, acqe_fip->index); ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; active_vlink_present = 0; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if ((!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) && (vports[i]->port_state > LPFC_FDISC)) { active_vlink_present = 1; break; } } lpfc_destroy_vport_work_array(phba, vports); } if (active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { /* * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF * is no longer valid if we are not already * in the FCF failover process. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { spin_unlock_irq(&phba->hbalock); break; } /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2773 Start FCF failover per CVL, " "evt_tag:x%x\n", acqe_fip->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2774 Issue FCF rediscover " "mabilbox command failed, " "through to CVL event\n"); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); } else /* * Reset FCF roundrobin bmask for new * discovery. */ lpfc_sli4_clear_fcf_rr_bmask(phba); } break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0288 Unknown FCoE event type 0x%x event tag " "0x%x\n", event_type, acqe_fip->event_tag); break; } } /** * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async dcbx completion queue entry. * * This routine is to handle the SLI4 asynchronous dcbx event. **/ static void lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, struct lpfc_acqe_dcbx *acqe_dcbx) { phba->fc_eventTag = acqe_dcbx->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0290 The SLI4 DCBX asynchronous event is not " "handled yet\n"); } /** * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async grp5 completion queue entry. * * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event * is an asynchronous notified of a logical link speed change. The Port * reports the logical link speed in units of 10Mbps. **/ static void lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, struct lpfc_acqe_grp5 *acqe_grp5) { uint16_t prev_ll_spd; phba->fc_eventTag = acqe_grp5->event_tag; phba->fcoe_eventtag = acqe_grp5->event_tag; prev_ll_spd = phba->sli4_hba.link_state.logical_speed; phba->sli4_hba.link_state.logical_speed = (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2789 GRP5 Async Event: Updating logical link speed " "from %dMbps to %dMbps\n", (prev_ll_spd * 10), (phba->sli4_hba.link_state.logical_speed*10)); } /** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending * SLI4 asynchronous events. **/ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; /* First, declare the async event has been handled */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~ASYNC_EVENT; spin_unlock_irq(&phba->hbalock); /* Now, handle all the async events */ while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { /* Get the first event from the head of the event queue */ spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, cq_event, struct lpfc_cq_event, list); spin_unlock_irq(&phba->hbalock); /* Process the asynchronous event */ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { case LPFC_TRAILER_CODE_LINK: lpfc_sli4_async_link_evt(phba, &cq_event->cqe.acqe_link); break; case LPFC_TRAILER_CODE_FCOE: lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); break; case LPFC_TRAILER_CODE_DCBX: lpfc_sli4_async_dcbx_evt(phba, &cq_event->cqe.acqe_dcbx); break; case LPFC_TRAILER_CODE_GRP5: lpfc_sli4_async_grp5_evt(phba, &cq_event->cqe.acqe_grp5); break; case LPFC_TRAILER_CODE_FC: lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); break; case LPFC_TRAILER_CODE_SLI: lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "1804 Invalid asynchrous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; } /* Free the completion event processed to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); } } /** * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process FCF table * rediscovery pending completion event. **/ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) { int rc; spin_lock_irq(&phba->hbalock); /* Clear FCF rediscovery timeout event */ phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; /* Clear driver fast failover FCF record flag */ phba->fcf.failover_rec.flag = 0; /* Set state for FCF fast failover */ phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); /* Scan FCF table from the first entry to re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2777 Start post-quiescent FCF table scan\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2747 Issue FCF scan read FCF mailbox " "command failed 0x%x\n", rc); } /** * lpfc_api_table_setup - Set up per hba pci-device group func api jump table * @phba: pointer to lpfc hba data structure. * @dev_grp: The HBA PCI-Device group number. * * This routine is invoked to set up the per HBA PCI-Device group function * API jump table entries. * * Return: 0 if success, otherwise -ENODEV **/ int lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { int rc; /* Set up lpfc PCI-device group */ phba->pci_dev_grp = dev_grp; /* The LPFC_PCI_DEV_OC uses SLI4 */ if (dev_grp == LPFC_PCI_DEV_OC) phba->sli_rev = LPFC_SLI_REV4; /* Set up device INIT API function jump table */ rc = lpfc_init_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SCSI API function jump table */ rc = lpfc_scsi_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SLI API function jump table */ rc = lpfc_sli_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up MBOX API function jump table */ rc = lpfc_mbox_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; return 0; } /** * lpfc_log_intr_mode - Log the active interrupt mode * @phba: pointer to lpfc hba data structure. * @intr_mode: active interrupt mode adopted. * * This routine it invoked to log the currently used active interrupt mode * to the device. **/ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) { switch (intr_mode) { case 0: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0470 Enable INTx interrupt mode.\n"); break; case 1: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0481 Enabled MSI interrupt mode.\n"); break; case 2: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0480 Enabled MSI-X interrupt mode.\n"); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0482 Illegal interrupt mode.\n"); break; } return; } /** * lpfc_enable_pci_dev - Enable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the PCI device that is common to all * PCI devices. * * Return codes * 0 - successful * other values - error **/ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; int bars = 0; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; /* Select PCI BARs */ bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); pci_try_set_mwi(pdev); pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) pdev->needs_freset = 1; return 0; out_disable_device: pci_disable_device(pdev); out_error: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1401 Failed to enable pci device, bars:x%x\n", bars); return -ENODEV; } /** * lpfc_disable_pci_dev - Disable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the PCI device that is common to all * PCI devices. **/ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; int bars; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Select PCI BARs */ bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Release PCI resource and disable PCI device */ pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); /* Null out PCI private reference to driver */ pci_set_drvdata(pdev, NULL); return; } /** * lpfc_reset_hba - Reset a hba * @phba: pointer to lpfc hba data structure. * * This routine is invoked to reset a hba device. It brings the HBA * offline, performs a board restart, and then brings the board back * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up * on outstanding mailbox commands. **/ void lpfc_reset_hba(struct lpfc_hba *phba) { /* If resets are disabled then set error state and return. */ if (!phba->cfg_enable_hba_reset) { phba->link_state = LPFC_HBA_ERROR; return; } lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); lpfc_unblock_mgmt_io(phba); } /** * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions * @phba: pointer to lpfc hba data structure. * * This function enables the PCI SR-IOV virtual functions to a physical * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to * enable the number of virtual functions to the physical function. As * not all devices support SR-IOV, the return code from the pci_enable_sriov() * API call does not considered as an error condition for most of the device. **/ uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; uint16_t nr_virtfn; int pos; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos == 0) return 0; pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); return nr_virtfn; } /** * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions * @phba: pointer to lpfc hba data structure. * @nr_vfn: number of virtual functions to be enabled. * * This function enables the PCI SR-IOV virtual functions to a physical * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to * enable the number of virtual functions to the physical function. As * not all devices support SR-IOV, the return code from the pci_enable_sriov() * API call does not considered as an error condition for most of the device. **/ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) { struct pci_dev *pdev = phba->pcidev; uint16_t max_nr_vfn; int rc; max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); if (nr_vfn > max_nr_vfn) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3057 Requested vfs (%d) greater than " "supported vfs (%d)", nr_vfn, max_nr_vfn); return -EINVAL; } rc = pci_enable_sriov(pdev, nr_vfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2806 Failed to enable sriov on this device " "with vfn number nr_vf:%d, rc:%d\n", nr_vfn, rc); } else lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2807 Successful enable sriov on this device " "with vfn number nr_vf:%d\n", nr_vfn); return rc; } /** * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-3 HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) { struct lpfc_sli *psli; int rc; /* * Initialize timers used by driver */ /* Heartbeat timer */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; psli = &phba->sli; /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; /* FCP polling mode timer */ init_timer(&phba->fcp_poll_timer); phba->fcp_poll_timer.function = lpfc_poll_timeout; phba->fcp_poll_timer.data = (unsigned long) phba; /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { phba->menlo_flag |= HBA_MENLO_SUPPORT; /* check for menlo minimum sg count */ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; } /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. * 2 segments are added since the IOCB needs a command and response bde. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); if (phba->cfg_enable_bg) { phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; phba->cfg_sg_dma_buf_size += phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); } /* Also reinitialize the host templates with new values. */ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ phba->max_vports = 0; /* * Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); /* Allocate device driver memory */ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) return -ENOMEM; /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. */ if (phba->cfg_sriov_nr_virtfn > 0) { rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2808 Requested number of SR-IOV " "virtual functions (%d) is not " "supported\n", phba->cfg_sriov_nr_virtfn); phba->cfg_sriov_nr_virtfn = 0; } } return 0; } /** * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-3 HBA device it attached to. **/ static void lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) { /* Free device driver memory allocated */ lpfc_mem_free_all(phba); return; } /** * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-4 HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs, sli_family; int sges_per_segment; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) return -ENODEV; /* * Initialize timers used by driver */ /* Heartbeat timer */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; init_timer(&phba->rrq_tmr); phba->rrq_tmr.function = lpfc_rrq_timeout; phba->rrq_tmr.data = (unsigned long)phba; psli = &phba->sli; /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; /* Fabric block timer */ init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; /* FCF rediscover timer */ init_timer(&phba->fcf.redisc_wait); phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; phba->fcf.redisc_wait.data = (unsigned long)phba; /* * Control structure for handling external multi-buffer mailbox * command pass-through. */ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); /* * We need to do a READ_CONFIG mailbox command here before * calling lpfc_get_cfgparam. For VFs this will report the * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. * All of the resources allocated * for this Port are tied to these values. */ /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; /* Program the default value of vlan_id and fc_map */ phba->valid_vlan = 0; phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; /* With BlockGuard we can have multiple SGEs per Data Segemnt */ sges_per_segment = 1; if (phba->cfg_enable_bg) sges_per_segment = 2; /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. * 2 segments are added since the IOCB needs a command and response bde. * To insure that the scsi sgl does not cross a 4k page boundary only * sgl sizes of must be a power of 2. */ buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * sizeof(struct sli4_sge))); sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; switch (sli_family) { case LPFC_SLI_INTF_FAMILY_BE2: case LPFC_SLI_INTF_FAMILY_BE3: /* There is a single hint for BE - 2 pages per BPL. */ if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_SLI_HINT1_1) max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; break; case LPFC_SLI_INTF_FAMILY_LNCR_A0: case LPFC_SLI_INTF_FAMILY_LNCR_B0: default: break; } for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; dma_buf_size < max_buf_size && buf_size > dma_buf_size; dma_buf_size = dma_buf_size << 1) ; if (dma_buf_size == max_buf_size) phba->cfg_sg_seg_cnt = (dma_buf_size - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - (2 * sizeof(struct sli4_sge))) / sizeof(struct sli4_sge); phba->cfg_sg_dma_buf_size = dma_buf_size; /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); for (i = 0; i < hbq_count; ++i) INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); INIT_LIST_HEAD(&phba->rb_pend_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ /* Initialize the Abort scsi buffer list used by driver */ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); /* * Initialize driver internal slow-path work queues */ /* Driver internel slow-path CQ Event pool */ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); /* Response IOCB work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); /* Asynchronous event CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); /* Fast-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); /* Slow-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Receive queue CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); /* Initialize extent block lists. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); /* Initialize the driver internal SLI layer lists. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) return -ENOMEM; /* IF Type 2 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) { rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) return -ENODEV; } /* Create the bootstrap mailbox command */ rc = lpfc_create_bootstrap_mbox(phba); if (unlikely(rc)) goto out_free_mem; /* Set up the host's endian order with the device. */ rc = lpfc_setup_endian_order(phba); if (unlikely(rc)) goto out_free_bsmbx; /* Set up the hba's configuration parameters. */ rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) goto out_free_bsmbx; /* IF Type 0 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) goto out_free_bsmbx; } mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { rc = -ENOMEM; goto out_free_bsmbx; } /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (!rc) { mqe = &mboxq->u.mqe; memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), LPFC_MAX_SUPPORTED_PAGES); for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { switch (pn_page[i]) { case LPFC_SLI4_PARAMETERS: phba->sli4_hba.pc_sli4_params.supported = 1; break; default: break; } } /* Read the port's SLI4 Parameters capabilities if supported. */ if (phba->sli4_hba.pc_sli4_params.supported) rc = lpfc_pc_sli4_params_get(phba, mboxq); if (rc) { mempool_free(mboxq, phba->mbox_mem_pool); rc = -EIO; goto out_free_bsmbx; } } /* * Get sli4 parameters that override parameters from Port capabilities. * If this call fails, it isn't critical unless the SLI4 parameters come * back in conflict. */ rc = lpfc_get_sli4_parameters(phba, mboxq); if (rc) { if (phba->sli4_hba.extents_in_use && phba->sli4_hba.rpi_hdrs_in_use) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2999 Unsupported SLI4 Parameters " "Extents and RPI headers enabled.\n"); goto out_free_bsmbx; } } mempool_free(mboxq, phba->mbox_mem_pool); /* Verify all the SLI4 queues */ rc = lpfc_sli4_queue_verify(phba); if (rc) goto out_free_bsmbx; /* Create driver internal CQE event pool */ rc = lpfc_sli4_cq_event_pool_create(phba); if (rc) goto out_free_bsmbx; /* Initialize and populate the iocb list per host */ rc = lpfc_init_sgl_list(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1400 Failed to initialize sgl list.\n"); goto out_destroy_cq_event_pool; } rc = lpfc_init_active_sgl_array(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1430 Failed to initialize sgl list.\n"); goto out_free_sgl_list; } rc = lpfc_sli4_init_rpi_hdrs(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1432 Failed to initialize rpi headers.\n"); goto out_free_active_sgl; } /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->fcf.fcf_rr_bmask) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2759 Failed allocate memory for FCF round " "robin failover bmask\n"); rc = -ENOMEM; goto out_remove_rpi_hdrs; } /* * The cfg_fcp_eq_count can be zero whenever there is exactly one * interrupt vector. This is not an error */ if (phba->cfg_fcp_eq_count) { phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " "fast-path per-EQ handle array\n"); rc = -ENOMEM; goto out_free_fcf_rr_bmask; } } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * phba->sli4_hba.cfg_eqn), GFP_KERNEL); if (!phba->sli4_hba.msix_entries) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2573 Failed allocate memory for msi-x " "interrupt vector entries\n"); rc = -ENOMEM; goto out_free_fcp_eq_hdl; } /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. */ if (phba->cfg_sriov_nr_virtfn > 0) { rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3020 Requested number of SR-IOV " "virtual functions (%d) is not " "supported\n", phba->cfg_sriov_nr_virtfn); phba->cfg_sriov_nr_virtfn = 0; } } return 0; out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: lpfc_free_active_sgl(phba); out_free_sgl_list: lpfc_free_sgl_list(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: lpfc_mem_free(phba); return rc; } /** * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-4 HBA device it attached to. **/ static void lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); /* Free memory allocated for fast-path work queue handles */ kfree(phba->sli4_hba.fcp_eq_hdl); /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); /* Free eligible FCF index bmask */ kfree(phba->fcf.fcf_rr_bmask); /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_sgl_list(phba); /* Free the SCSI sgl management array */ kfree(phba->sli4_hba.lpfc_scsi_psb_array); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); lpfc_sli4_cq_event_pool_destroy(phba); /* Release resource identifiers. */ lpfc_sli4_dealloc_resource_identifiers(phba); /* Free the bsmbx region. */ lpfc_destroy_bootstrap_mbox(phba); /* Free the SLI Layer memory with SLI4 HBAs */ lpfc_mem_free_all(phba); /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) { list_del_init(&conn_entry->list); kfree(conn_entry); } return; } /** * lpfc_init_api_table_setup - Set up init api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the device INIT interface API function jump table * in @phba struct. * * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { phba->lpfc_hba_init_link = lpfc_hba_init_link; phba->lpfc_hba_down_link = lpfc_hba_down_link; phba->lpfc_selective_reset = lpfc_selective_reset; switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; phba->lpfc_stop_port = lpfc_stop_port_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; phba->lpfc_stop_port = lpfc_stop_port_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1431 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; break; } return 0; } /** * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources before the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) { /* * Driver resources common to all SLI revisions */ atomic_set(&phba->fast_event_count, 0); spin_lock_init(&phba->hbalock); /* Initialize ndlp management spinlock */ spin_lock_init(&phba->ndlp_lock); INIT_LIST_HEAD(&phba->port_list); INIT_LIST_HEAD(&phba->work_list); init_waitqueue_head(&phba->wait_4_mlo_m_q); /* Initialize the wait queue head for the kernel thread */ init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ spin_lock_init(&phba->scsi_buf_list_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); /* Initialize list to save ELS buffers */ INIT_LIST_HEAD(&phba->elsbuf); /* Initialize FCF connection rec list */ INIT_LIST_HEAD(&phba->fcf_conn_rec_list); return 0; } /** * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources after the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) { int error; /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); return error; } return 0; } /** * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up after * the device specific resource setup for supporting the HBA device it * attached to. **/ static void lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) { /* Stop kernel worker thread */ kthread_stop(phba->worker_thread); } /** * lpfc_free_iocb_list - Free iocb list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's IOCB list and memory. **/ static void lpfc_free_iocb_list(struct lpfc_hba *phba) { struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocbq_entry, iocbq_next, &phba->lpfc_iocb_list, list) { list_del(&iocbq_entry->list); kfree(iocbq_entry); phba->total_iocbq_bufs--; } spin_unlock_irq(&phba->hbalock); return; } /** * lpfc_init_iocb_list - Allocate and initialize iocb list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and initizlize the driver's IOCB * list and set up the IOCB tag array accordingly. * * Return codes * 0 - successful * other values - error **/ static int lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) { struct lpfc_iocbq *iocbq_entry = NULL; uint16_t iotag; int i; /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); for (i = 0; i < iocb_count; i++) { iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", __func__, i, LPFC_IOCB_LIST_CNT); goto out_free_iocbq; } iotag = lpfc_sli_next_iotag(phba, iocbq_entry); if (iotag == 0) { kfree(iocbq_entry); printk(KERN_ERR "%s: failed to allocate IOTAG. " "Unloading driver.\n", __func__); goto out_free_iocbq; } iocbq_entry->sli4_lxritag = NO_XRI; iocbq_entry->sli4_xritag = NO_XRI; spin_lock_irq(&phba->hbalock); list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); phba->total_iocbq_bufs++; spin_unlock_irq(&phba->hbalock); } return 0; out_free_iocbq: lpfc_free_iocb_list(phba); return -ENOMEM; } /** * lpfc_free_sgl_list - Free sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's sgl list and memory. **/ static void lpfc_free_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; LIST_HEAD(sglq_list); spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { list_del(&sglq_entry->list); lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); phba->sli4_hba.total_sglq_bufs--; } kfree(phba->sli4_hba.lpfc_els_sgl_array); } /** * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate the driver's active sgl memory. * This array will hold the sglq_entry's for active IOs. **/ static int lpfc_init_active_sgl_array(struct lpfc_hba *phba) { int size; size = sizeof(struct lpfc_sglq *); size *= phba->sli4_hba.max_cfg_param.max_xri; phba->sli4_hba.lpfc_sglq_active_list = kzalloc(size, GFP_KERNEL); if (!phba->sli4_hba.lpfc_sglq_active_list) return -ENOMEM; return 0; } /** * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to walk through the array of active sglq entries * and free all of the resources. * This is just a place holder for now. **/ static void lpfc_free_active_sgl(struct lpfc_hba *phba) { kfree(phba->sli4_hba.lpfc_sglq_active_list); } /** * lpfc_init_sgl_list - Allocate and initialize sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and initizlize the driver's sgl * list and set up the sgl xritag tag array accordingly. * * Return codes * 0 - successful * other values - error **/ static int lpfc_init_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL; int i; int els_xri_cnt; els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2400 ELS XRI count %d.\n", els_xri_cnt); /* Initialize and populate the sglq list per host/VF. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); /* Sanity check on XRI management */ if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2562 No room left for SCSI XRI allocation: " "max_xri=%d, els_xri=%d\n", phba->sli4_hba.max_cfg_param.max_xri, els_xri_cnt); return -ENOMEM; } /* Allocate memory for the ELS XRI management array */ phba->sli4_hba.lpfc_els_sgl_array = kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), GFP_KERNEL); if (!phba->sli4_hba.lpfc_els_sgl_array) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2401 Failed to allocate memory for ELS " "XRI management array of size %d.\n", els_xri_cnt); return -ENOMEM; } /* Keep the SCSI XRI into the XRI management array */ phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; phba->sli4_hba.scsi_xri_cnt = 0; phba->sli4_hba.lpfc_scsi_psb_array = kzalloc((sizeof(struct lpfc_scsi_buf *) * phba->sli4_hba.scsi_xri_max), GFP_KERNEL); if (!phba->sli4_hba.lpfc_scsi_psb_array) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2563 Failed to allocate memory for SCSI " "XRI management array of size %d.\n", phba->sli4_hba.scsi_xri_max); kfree(phba->sli4_hba.lpfc_els_sgl_array); return -ENOMEM; } for (i = 0; i < els_xri_cnt; i++) { sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d sgls of " "expected %d count. Unloading driver.\n", __func__, i, els_xri_cnt); goto out_free_mem; } sglq_entry->buff_type = GEN_BUFF_TYPE; sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); printk(KERN_ERR "%s: failed to allocate mbuf.\n" "Unloading driver.\n", __func__); goto out_free_mem; } sglq_entry->sgl = sglq_entry->virt; memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); /* The list order is used by later block SGL registraton */ spin_lock_irq(&phba->hbalock); sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.total_sglq_bufs++; spin_unlock_irq(&phba->hbalock); } return 0; out_free_mem: kfree(phba->sli4_hba.lpfc_scsi_psb_array); lpfc_free_sgl_list(phba); return -ENOMEM; } /** * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * port for those SLI4 ports that do not support extents. This routine * posts a PAGE_SIZE memory region to the port to hold up to * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine * and should be called only when interrupts are disabled. * * Return codes * 0 - successful * -ERROR - otherwise. **/ int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) { int rc = 0; struct lpfc_rpi_hdr *rpi_hdr; INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); if (!phba->sli4_hba.rpi_hdrs_in_use) return rc; if (phba->sli4_hba.extents_in_use) return -EIO; rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0391 Error during rpi post operation\n"); lpfc_sli4_remove_rpis(phba); rc = -ENODEV; } return rc; } /** * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate a single 4KB memory region to * support rpis and stores them in the phba. This single region * provides support for up to 64 rpis. The region is used globally * by the device. * * Returns: * A valid rpi hdr on success. * A NULL pointer on any failure. **/ struct lpfc_rpi_hdr * lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) { uint16_t rpi_limit, curr_rpi_range; struct lpfc_dmabuf *dmabuf; struct lpfc_rpi_hdr *rpi_hdr; uint32_t rpi_count; /* * If the SLI4 port supports extents, posting the rpi header isn't * required. Set the expected maximum count and let the actual value * get set when extents are fully allocated. */ if (!phba->sli4_hba.rpi_hdrs_in_use) return NULL; if (phba->sli4_hba.extents_in_use) return NULL; /* The limit on the logical index is just the max_rpi count. */ rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + phba->sli4_hba.max_cfg_param.max_rpi - 1; spin_lock_irq(&phba->hbalock); /* * Establish the starting RPI in this header block. The starting * rpi is normalized to a zero base because the physical rpi is * port based. */ curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); /* * The port has a limited number of rpis. The increment here * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value * and to allow the full max_rpi range per port. */ if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) rpi_count = rpi_limit - curr_rpi_range; else rpi_count = LPFC_RPI_HDR_COUNT; if (!rpi_count) return NULL; /* * First allocate the protocol header region for the port. The * port expects a 4KB DMA-mapped memory region that is 4K aligned. */ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return NULL; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { rpi_hdr = NULL; goto err_free_dmabuf; } memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { rpi_hdr = NULL; goto err_free_coherent; } /* Save the rpi header data for cleanup later. */ rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); if (!rpi_hdr) goto err_free_coherent; rpi_hdr->dmabuf = dmabuf; rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; rpi_hdr->page_count = 1; spin_lock_irq(&phba->hbalock); /* The rpi_hdr stores the logical index only. */ rpi_hdr->start_rpi = curr_rpi_range; list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); /* * The next_rpi stores the next logical module-64 rpi value used * to post physical rpis in subsequent rpi postings. */ phba->sli4_hba.next_rpi += rpi_count; spin_unlock_irq(&phba->hbalock); return rpi_hdr; err_free_coherent: dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, dmabuf->virt, dmabuf->phys); err_free_dmabuf: kfree(dmabuf); return NULL; } /** * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions * @phba: pointer to lpfc hba data structure. * * This routine is invoked to remove all memory resources allocated * to support rpis for SLI4 ports not supporting extents. This routine * presumes the caller has released all rpis consumed by fabric or port * logins and is prepared to have the header pages removed. **/ void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) { struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; if (!phba->sli4_hba.rpi_hdrs_in_use) goto exit; list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { list_del(&rpi_hdr->list); dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); kfree(rpi_hdr->dmabuf); kfree(rpi_hdr); } exit: /* There are no rpis available to the port now. */ phba->sli4_hba.next_rpi = 0; } /** * lpfc_hba_alloc - Allocate driver hba data structure for a device. * @pdev: pointer to pci device data structure. * * This routine is invoked to allocate the driver hba data structure for an * HBA device. If the allocation is successful, the phba reference to the * PCI device data structure is set. * * Return codes * pointer to @phba - successful * NULL - error **/ static struct lpfc_hba * lpfc_hba_alloc(struct pci_dev *pdev) { struct lpfc_hba *phba; /* Allocate memory for HBA structure */ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); if (!phba) { dev_err(&pdev->dev, "failed to allocate hba struct\n"); return NULL; } /* Set reference to PCI device in HBA structure */ phba->pcidev = pdev; /* Assign an unused board number */ phba->brd_no = lpfc_get_instance(); if (phba->brd_no < 0) { kfree(phba); return NULL; } spin_lock_init(&phba->ct_ev_lock); INIT_LIST_HEAD(&phba->ct_ev_waiters); return phba; } /** * lpfc_hba_free - Free driver hba data structure with a device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver hba data structure with an * HBA device. **/ static void lpfc_hba_free(struct lpfc_hba *phba) { /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); kfree(phba); return; } /** * lpfc_create_shost - Create hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create HBA physical port and associate a SCSI * host with it. * * Return codes * 0 - successful * other values - error **/ static int lpfc_create_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct Scsi_Host *shost; /* Initialize HBA FC structure */ phba->fc_edtov = FF_DEF_EDTOV; phba->fc_ratov = FF_DEF_RATOV; phba->fc_altov = FF_DEF_ALTOV; phba->fc_arbtov = FF_DEF_ARBTOV; atomic_set(&phba->sdev_cnt, 0); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; shost = lpfc_shost_from_vport(vport); phba->pport = vport; lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); return 0; } /** * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to destroy HBA physical port and the associated * SCSI host. **/ static void lpfc_destroy_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; /* Destroy physical port that associated with the SCSI host */ destroy_port(vport); return; } /** * lpfc_setup_bg - Setup Block guard structures and debug areas. * @phba: pointer to lpfc hba data structure. * @shost: the shost to be used to detect Block guard settings. * * This routine sets up the local Block guard protocol settings for @shost. * This routine also allocates memory for debugging bg buffers. **/ static void lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) { int pagecnt = 10; if (lpfc_prot_mask && lpfc_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); scsi_host_set_prot(shost, lpfc_prot_mask); scsi_host_set_guard(shost, lpfc_prot_guard); } if (!_dump_buf_data) { while (pagecnt) { spin_lock_init(&_dump_buf_lock); _dump_buf_data = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_data) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9043 BLKGRD: allocated %d pages for " "_dump_buf_data at 0x%p\n", (1 << pagecnt), _dump_buf_data); _dump_buf_data_order = pagecnt; memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) << pagecnt)); break; } else --pagecnt; } if (!_dump_buf_data_order) lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9044 BLKGRD: ERROR unable to allocate " "memory for hexdump\n"); } else lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9045 BLKGRD: already allocated _dump_buf_data=0x%p" "\n", _dump_buf_data); if (!_dump_buf_dif) { while (pagecnt) { _dump_buf_dif = (char *) __get_free_pages(GFP_KERNEL, pagecnt); if (_dump_buf_dif) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9046 BLKGRD: allocated %d pages for " "_dump_buf_dif at 0x%p\n", (1 << pagecnt), _dump_buf_dif); _dump_buf_dif_order = pagecnt; memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) << pagecnt)); break; } else --pagecnt; } if (!_dump_buf_dif_order) lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9047 BLKGRD: ERROR unable to allocate " "memory for hexdump\n"); } else lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", _dump_buf_dif); } /** * lpfc_post_init_setup - Perform necessary device post initialization setup. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to perform all the necessary post initialization * setup for the device. **/ static void lpfc_post_init_setup(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_adapter_event_header adapter_event; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* * hba setup may have changed the hba_queue_depth so we need to * adjust the value of can_queue. */ shost = pci_get_drvdata(phba->pcidev); shost->can_queue = phba->cfg_hba_queue_depth - 10; if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) lpfc_setup_bg(phba, shost); lpfc_host_attrib_init(shost); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(shost->host_lock); lpfc_poll_start_timer(phba); spin_unlock_irq(shost->host_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0428 Perform SCSI scan\n"); /* Send board arrival event to upper layer */ adapter_event.event_type = FC_REG_ADAPTER_EVENT; adapter_event.subcategory = LPFC_EVENT_ARRIVAL; fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(adapter_event), (char *) &adapter_event, LPFC_NL_VENDOR_ID); return; } /** * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-3 interface spec. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev; unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; int error = -ENODEV; /* Obtain PCI device reference */ if (!phba->pcidev) return error; else pdev = phba->pcidev; /* Set the device DMA mask size */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { return error; } } /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. */ phba->pci_bar0_map = pci_resource_start(pdev, 0); bar0map_len = pci_resource_len(pdev, 0); phba->pci_bar2_map = pci_resource_start(pdev, 2); bar2map_len = pci_resource_len(pdev, 2); /* Map HBA SLIM to a kernel virtual address. */ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->slim_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLIM memory.\n"); goto out; } /* Map HBA Control Registers to a kernel virtual address. */ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->ctrl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for HBA control registers.\n"); goto out_iounmap_slim; } /* Allocate memory for SLI-2 structures */ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, &phba->slim2p.phys, GFP_KERNEL); if (!phba->slim2p.virt) goto out_iounmap; memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); phba->mbox_ext = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx_ext_words)); phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); phba->IOCBs = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, IOCBs)); phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, lpfc_sli_hbq_size(), &phba->hbqslimp.phys, GFP_KERNEL); if (!phba->hbqslimp.virt) goto out_free_slim; hbq_count = lpfc_sli_hbq_count(); ptr = phba->hbqslimp.virt; for (i = 0; i < hbq_count; ++i) { phba->hbqs[i].hbq_virt = ptr; INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); ptr += (lpfc_hbq_defs[i]->entry_count * sizeof(struct lpfc_hbq_entry)); } phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); INIT_LIST_HEAD(&phba->rb_pend_list); phba->MBslimaddr = phba->slim_memmap_p; phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; return 0; out_free_slim: dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); out_iounmap: iounmap(phba->ctrl_regs_memmap_p); out_iounmap_slim: iounmap(phba->slim_memmap_p); out: return error; } /** * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-3 interface spec. **/ static void lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Free coherent DMA memory allocated */ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* I/O memory unmap */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); return; } /** * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status * @phba: pointer to lpfc hba data structure. * * This routine is invoked to wait for SLI4 device Power On Self Test (POST) * done and check status. * * Return 0 if successful, otherwise -ENODEV. **/ int lpfc_sli4_post_status_check(struct lpfc_hba *phba) { struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; struct lpfc_register reg_data; int i, port_error = 0; uint32_t if_type; memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); memset(&reg_data, 0, sizeof(reg_data)); if (!phba->sli4_hba.PSMPHRregaddr) return -ENODEV; /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, &portsmphr_reg.word0) || (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { /* Port has a fatal POST error, break out */ port_error = -ENODEV; break; } if (LPFC_POST_STAGE_PORT_READY == bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) break; msleep(10); } /* * If there was a port error during POST, then don't proceed with * other register reads as the data may not be valid. Just exit. */ if (port_error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1408 Port Failed POST - portsmphr=0x%x, " "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " "scr2=x%x, hscratch=x%x, pstatus=x%x\n", portsmphr_reg.word0, bf_get(lpfc_port_smphr_perr, &portsmphr_reg), bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), bf_get(lpfc_port_smphr_nip, &portsmphr_reg), bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2534 Device Info: SLIFamily=0x%x, " "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " "SLIHint_2=0x%x, FT=0x%x\n", bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_slirev, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_sli_hint2, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_func_type, &phba->sli4_hba.sli_intf)); /* * Check for other Port errors during the initialization * process. Fail the load if the port did not come up * correctly. */ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); uerrlo_reg.word0 = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); uerrhi_reg.word0 = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1422 Unrecoverable Error " "Detected during POST " "uerr_lo_reg=0x%x, " "uerr_hi_reg=0x%x, " "ue_mask_lo_reg=0x%x, " "ue_mask_hi_reg=0x%x\n", uerrlo_reg.word0, uerrhi_reg.word0, phba->sli4_hba.ue_mask_lo, phba->sli4_hba.ue_mask_hi); port_error = -ENODEV; } break; case LPFC_SLI_INTF_IF_TYPE_2: /* Final checks. The port status should be clean. */ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &reg_data.word0) || (bf_get(lpfc_sliport_status_err, &reg_data) && !bf_get(lpfc_sliport_status_rn, &reg_data))) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2. ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2. ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2888 Unrecoverable port error " "following POST: port status reg " "0x%x, port_smphr reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", reg_data.word0, portsmphr_reg.word0, phba->work_status[0], phba->work_status[1]); port_error = -ENODEV; } break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } } return port_error; } /** * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. * @phba: pointer to lpfc hba data structure. * @if_type: The SLI4 interface type getting configured. * * This routine is invoked to set up SLI4 BAR0 PCI config space register * memory map. **/ static void lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) { switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: phba->sli4_hba.u.if_type0.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; phba->sli4_hba.u.if_type0.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; phba->sli4_hba.u.if_type0.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; phba->sli4_hba.u.if_type0.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; break; case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.u.if_type2.ERR1regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET; phba->sli4_hba.u.if_type2.ERR2regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET; phba->sli4_hba.u.if_type2.CTRLregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET; phba->sli4_hba.u.if_type2.STATUSregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET; phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET; phba->sli4_hba.RQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; phba->sli4_hba.WQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; phba->sli4_hba.EQCQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; phba->sli4_hba.MQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; phba->sli4_hba.BMBXregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; break; case LPFC_SLI_INTF_IF_TYPE_1: default: dev_printk(KERN_ERR, &phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; } } /** * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up SLI4 BAR1 control status register (CSR) * memory map. **/ static void lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) { phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_SLIPORT_IF0_SMPHR; phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISR0; phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_IMR0; phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISCR0; } /** * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. * @phba: pointer to lpfc hba data structure. * @vf: virtual function number * * This routine is invoked to set up SLI4 BAR2 doorbell register memory map * based on the given viftual function number, @vf. * * Return 0 if successful, otherwise -ENODEV. **/ static int lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) { if (vf > LPFC_VIR_FUNC_MAX) return -ENODEV; phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); return 0; } /** * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create the bootstrap mailbox * region consistent with the SLI-4 interface spec. This * routine allocates all memory necessary to communicate * mailbox commands to the port and sets up all alignment * needs. No locks are expected to be held when calling * this routine. * * Return codes * 0 - successful * -ENOMEM - could not allocated memory. **/ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) { uint32_t bmbx_size; struct lpfc_dmabuf *dmabuf; struct dma_address *dma_address; uint32_t pa_addr; uint64_t phys_addr; dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return -ENOMEM; /* * The bootstrap mailbox region is comprised of 2 parts * plus an alignment restriction of 16 bytes. */ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } memset(dmabuf->virt, 0, bmbx_size); /* * Initialize the bootstrap mailbox pointers now so that the register * operations are simple later. The mailbox dma address is required * to be 16-byte aligned. Also align the virtual memory as each * maibox is copied into the bmbx mailbox region before issuing the * command to the port. */ phba->sli4_hba.bmbx.dmabuf = dmabuf; phba->sli4_hba.bmbx.bmbx_size = bmbx_size; phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, LPFC_ALIGN_16_BYTE); phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, LPFC_ALIGN_16_BYTE); /* * Set the high and low physical addresses now. The SLI4 alignment * requirement is 16 bytes and the mailbox is posted to the port * as two 30-bit addresses. The other data is a bit marking whether * the 30-bit address is the high or low address. * Upcast bmbx aphys to 64bits so shift instruction compiles * clean on 32 bit machines. */ dma_address = &phba->sli4_hba.bmbx.dma_address; phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_HI); pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_LO); return 0; } /** * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources * @phba: pointer to lpfc hba data structure. * * This routine is invoked to teardown the bootstrap mailbox * region and release all host resources. This routine requires * the caller to ensure all mailbox commands recovered, no * additional mailbox comands are sent, and interrupts are disabled * before calling this routine. * **/ static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) { dma_free_coherent(&phba->pcidev->dev, phba->sli4_hba.bmbx.bmbx_size, phba->sli4_hba.bmbx.dmabuf->virt, phba->sli4_hba.bmbx.dmabuf->phys); kfree(phba->sli4_hba.bmbx.dmabuf); memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } /** * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to read the configuration parameters from the HBA. * The configuration parameters are used to set the base and maximum values * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource * allocation for the port. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_read_config(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb; struct lpfc_mbx_read_config *rd_config; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct lpfc_mbx_get_func_cfg *get_func_cfg; struct lpfc_rsrc_desc_fcfcoe *desc; uint32_t desc_count; int length, i, rc = 0; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2011 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; } lpfc_read_config(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2012 Mailbox failed , mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); rc = -EIO; } else { rd_config = &pmb->u.mqe.un.rd_config; if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; phba->sli4_hba.lnk_info.lnk_tp = bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3081 lnk_type:%d, lnk_numb:%d\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no); } else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3082 Mailbox (x%x) returned ldv:x0\n", bf_get(lpfc_mqe_command, &pmb->u.mqe)); phba->sli4_hba.extents_in_use = bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); phba->sli4_hba.max_cfg_param.max_xri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); phba->sli4_hba.max_cfg_param.xri_base = bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); phba->sli4_hba.max_cfg_param.max_vpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); phba->sli4_hba.max_cfg_param.vpi_base = bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_rpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); phba->sli4_hba.max_cfg_param.rpi_base = bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_vfi = bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); phba->sli4_hba.max_cfg_param.vfi_base = bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); phba->sli4_hba.max_cfg_param.max_fcfi = bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); phba->sli4_hba.max_cfg_param.max_eq = bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); phba->sli4_hba.max_cfg_param.max_rq = bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); phba->sli4_hba.max_cfg_param.max_wq = bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); phba->sli4_hba.max_cfg_param.max_cq = bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params Extents? %d " "XRI(B:%d M:%d), " "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " "FCFI(Count:%d)\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.max_cfg_param.vpi_base, phba->sli4_hba.max_cfg_param.max_vpi, phba->sli4_hba.max_cfg_param.vfi_base, phba->sli4_hba.max_cfg_param.max_vfi, phba->sli4_hba.max_cfg_param.rpi_base, phba->sli4_hba.max_cfg_param.max_rpi, phba->sli4_hba.max_cfg_param.max_fcfi); } if (rc) goto read_cfg_out; /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri - lpfc_sli4_get_els_iocb_cnt(phba))) phba->cfg_hba_queue_depth = phba->sli4_hba.max_cfg_param.max_xri - lpfc_sli4_get_els_iocb_cnt(phba); if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2) goto read_cfg_out; /* get the pf# and vf# for SLI4 if_type 2 port */ length = (sizeof(struct lpfc_mbx_get_func_cfg) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, length, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (rc || shdr_status || shdr_add_status) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3026 Mailbox failed , mbxCmd x%x " "GET_FUNCTION_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); rc = -EIO; goto read_cfg_out; } /* search for fc_fcoe resrouce descriptor */ get_func_cfg = &pmb->u.mqe.un.get_func_cfg; desc_count = get_func_cfg->func_cfg.rsrc_desc_count; for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { desc = (struct lpfc_rsrc_desc_fcfcoe *) &get_func_cfg->func_cfg.desc[i]; if (LPFC_RSRC_DESC_TYPE_FCFCOE == bf_get(lpfc_rsrc_desc_pcie_type, desc)) { phba->sli4_hba.iov.pf_number = bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); phba->sli4_hba.iov.vf_number = bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); break; } } if (i < LPFC_RSRC_DESC_MAX_NUM) lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3027 GET_FUNCTION_CONFIG: pf_number:%d, " "vf_number:%d\n", phba->sli4_hba.iov.pf_number, phba->sli4_hba.iov.vf_number); else { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3028 GET_FUNCTION_CONFIG: failed to find " "Resrouce Descriptor:x%x\n", LPFC_RSRC_DESC_TYPE_FCFCOE); rc = -EIO; } read_cfg_out: mempool_free(pmb, phba->mbox_mem_pool); return rc; } /** * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to setup the port-side endian order when * the port if_type is 0. This routine has no function for other * if_types. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static int lpfc_setup_endian_order(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t if_type, rc = 0; uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, HOST_ENDIAN_HIGH_WORD1}; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0492 Unable to allocate memory for " "issuing SLI_CONFIG_SPECIAL mailbox " "command\n"); return -ENOMEM; } /* * The SLI4_CONFIG_SPECIAL mailbox command requires the first * two words to contain special data values and no other data. */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0493 SLI_CONFIG_SPECIAL mailbox " "failed with status x%x\n", rc); rc = -EIO; } mempool_free(mboxq, phba->mbox_mem_pool); break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_1: default: break; } return rc; } /** * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts * @phba: pointer to lpfc hba data structure. * * This routine is invoked to check the user settable queue counts for EQs and * CQs. after this routine is called the counts will be set to valid values that * adhere to the constraints of the system's interrupt vectors and the port's * queue resources. * * Return codes * 0 - successful * -ENOMEM - No available memory **/ static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { int cfg_fcp_wq_count; int cfg_fcp_eq_count; /* * Sanity check for confiugred queue parameters against the run-time * device parameters */ /* Sanity check on FCP fast-path WQ parameters */ cfg_fcp_wq_count = phba->cfg_fcp_wq_count; if (cfg_fcp_wq_count > (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF; if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2581 Not enough WQs (%d) from " "the pci function for supporting " "FCP WQs (%d)\n", phba->sli4_hba.max_cfg_param.max_wq, phba->cfg_fcp_wq_count); goto out_error; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2582 Not enough WQs (%d) from the pci " "function for supporting the requested " "FCP WQs (%d), the actual FCP WQs can " "be supported: %d\n", phba->sli4_hba.max_cfg_param.max_wq, phba->cfg_fcp_wq_count, cfg_fcp_wq_count); } /* The actual number of FCP work queues adopted */ phba->cfg_fcp_wq_count = cfg_fcp_wq_count; /* Sanity check on FCP fast-path EQ parameters */ cfg_fcp_eq_count = phba->cfg_fcp_eq_count; if (cfg_fcp_eq_count > (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF; if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2574 Not enough EQs (%d) from the " "pci function for supporting FCP " "EQs (%d)\n", phba->sli4_hba.max_cfg_param.max_eq, phba->cfg_fcp_eq_count); goto out_error; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2575 Not enough EQs (%d) from the pci " "function for supporting the requested " "FCP EQs (%d), the actual FCP EQs can " "be supported: %d\n", phba->sli4_hba.max_cfg_param.max_eq, phba->cfg_fcp_eq_count, cfg_fcp_eq_count); } /* It does not make sense to have more EQs than WQs */ if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2593 The FCP EQ count(%d) cannot be greater " "than the FCP WQ count(%d), limiting the " "FCP EQ count to %d\n", cfg_fcp_eq_count, phba->cfg_fcp_wq_count, phba->cfg_fcp_wq_count); cfg_fcp_eq_count = phba->cfg_fcp_wq_count; } /* The actual number of FCP event queues adopted */ phba->cfg_fcp_eq_count = cfg_fcp_eq_count; /* The overall number of event queues used */ phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; /* Get CQ depth from module parameter, fake the default for now */ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; return 0; out_error: return -ENOMEM; } /** * lpfc_sli4_queue_create - Create all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA * operation. For each SLI4 queue type, the parameters such as queue entry * count (queue depth) shall be taken from the module parameter. For now, * we just use some constant number as place holder. * * Return codes * 0 - sucessful * -ENOMEM - No availble memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; int fcp_eqidx, fcp_cqidx, fcp_wqidx; /* * Create Event Queues (EQs) */ /* Create slow path event queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0496 Failed allocate slow-path EQ\n"); goto out_error; } phba->sli4_hba.sp_eq = qdesc; /* * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be * zero whenever there is exactly one interrupt vector. This is not * an error. */ if (phba->cfg_fcp_eq_count) { phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fp_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2576 Failed allocate memory for " "fast-path EQ record array\n"); goto out_free_sp_eq; } } for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0497 Failed allocate fast-path EQ\n"); goto out_free_fp_eq; } phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; } /* * Create Complete Queues (CQs) */ /* Create slow-path Mailbox Command Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0500 Failed allocate slow-path mailbox CQ\n"); goto out_free_fp_eq; } phba->sli4_hba.mbx_cq = qdesc; /* Create slow-path ELS Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0501 Failed allocate slow-path ELS CQ\n"); goto out_free_mbx_cq; } phba->sli4_hba.els_cq = qdesc; /* * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. * If there are no FCP EQs then create exactly one FCP CQ. */ if (phba->cfg_fcp_eq_count) phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_eq_count), GFP_KERNEL); else phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.fcp_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2577 Failed allocate memory for fast-path " "CQ record array\n"); goto out_free_els_cq; } fcp_cqidx = 0; do { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0499 Failed allocate fast-path FCP " "CQ (%d)\n", fcp_cqidx); goto out_free_fcp_cq; } phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; } while (++fcp_cqidx < phba->cfg_fcp_eq_count); /* Create Mailbox Command Queue */ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0505 Failed allocate slow-path MQ\n"); goto out_free_fcp_cq; } phba->sli4_hba.mbx_wq = qdesc; /* * Create all the Work Queues (WQs) */ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; /* Create slow-path ELS Work Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0504 Failed allocate slow-path ELS WQ\n"); goto out_free_mbx_wq; } phba->sli4_hba.els_wq = qdesc; /* Create fast-path FCP Work Queue(s) */ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * phba->cfg_fcp_wq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2578 Failed allocate memory for fast-path " "WQ record array\n"); goto out_free_els_wq; } for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0503 Failed allocate fast-path FCP " "WQ (%d)\n", fcp_wqidx); goto out_free_fcp_wq; } phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; } /* * Create Receive Queue (RQ) */ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; /* Create Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0506 Failed allocate receive HRQ\n"); goto out_free_fcp_wq; } phba->sli4_hba.hdr_rq = qdesc; /* Create Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0507 Failed allocate receive DRQ\n"); goto out_free_hdr_rq; } phba->sli4_hba.dat_rq = qdesc; return 0; out_free_hdr_rq: lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); phba->sli4_hba.hdr_rq = NULL; out_free_fcp_wq: for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; } kfree(phba->sli4_hba.fcp_wq); phba->sli4_hba.fcp_wq = NULL; out_free_els_wq: lpfc_sli4_queue_free(phba->sli4_hba.els_wq); phba->sli4_hba.els_wq = NULL; out_free_mbx_wq: lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); phba->sli4_hba.mbx_wq = NULL; out_free_fcp_cq: for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; } kfree(phba->sli4_hba.fcp_cq); phba->sli4_hba.fcp_cq = NULL; out_free_els_cq: lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; out_free_mbx_cq: lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); phba->sli4_hba.mbx_cq = NULL; out_free_fp_eq: for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; } kfree(phba->sli4_hba.fp_eq); phba->sli4_hba.fp_eq = NULL; out_free_sp_eq: lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); phba->sli4_hba.sp_eq = NULL; out_error: return -ENOMEM; } /** * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { int fcp_qidx; /* Release mailbox command work queue */ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); phba->sli4_hba.mbx_wq = NULL; /* Release ELS work queue */ lpfc_sli4_queue_free(phba->sli4_hba.els_wq); phba->sli4_hba.els_wq = NULL; /* Release FCP work queue */ if (phba->sli4_hba.fcp_wq != NULL) for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); kfree(phba->sli4_hba.fcp_wq); phba->sli4_hba.fcp_wq = NULL; /* Release unsolicited receive queue */ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); phba->sli4_hba.hdr_rq = NULL; lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); phba->sli4_hba.dat_rq = NULL; /* Release ELS complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.els_cq); phba->sli4_hba.els_cq = NULL; /* Release mailbox command complete queue */ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); phba->sli4_hba.mbx_cq = NULL; /* Release FCP response complete queue */ fcp_qidx = 0; if (phba->sli4_hba.fcp_cq != NULL) do lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); while (++fcp_qidx < phba->cfg_fcp_eq_count); kfree(phba->sli4_hba.fcp_cq); phba->sli4_hba.fcp_cq = NULL; /* Release fast-path event queue */ if (phba->sli4_hba.fp_eq != NULL) for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); kfree(phba->sli4_hba.fp_eq); phba->sli4_hba.fp_eq = NULL; /* Release slow-path event queue */ lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); phba->sli4_hba.sp_eq = NULL; return; } /** * lpfc_sli4_queue_setup - Set up all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up all the SLI4 queues for the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { int rc = -ENOMEM; int fcp_eqidx, fcp_cqidx, fcp_wqidx; int fcp_cq_index = 0; /* * Set up Event Queues (EQs) */ /* Set up slow-path event queue */ if (!phba->sli4_hba.sp_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0520 Slow-path EQ not allocated\n"); goto out_error; } rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, LPFC_SP_DEF_IMAX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0521 Failed setup of slow-path EQ: " "rc = 0x%x\n", rc); goto out_error; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2583 Slow-path EQ setup: queue-id=%d\n", phba->sli4_hba.sp_eq->queue_id); /* Set up fast-path event queue */ if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_destroy_sp_eq; } for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " "allocated\n", fcp_eqidx); rc = -ENOMEM; goto out_destroy_fp_eq; } rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " "(%d), rc = 0x%x\n", fcp_eqidx, rc); goto out_destroy_fp_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2584 Fast-path EQ setup: " "queue[%d]-id=%d\n", fcp_eqidx, phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); } /* * Set up Complete Queues (CQs) */ /* Set up slow-path MBOX Complete Queue as the first CQ */ if (!phba->sli4_hba.mbx_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0528 Mailbox CQ not allocated\n"); rc = -ENOMEM; goto out_destroy_fp_eq; } rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, LPFC_MCQ, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0529 Failed setup of slow-path mailbox CQ: " "rc = 0x%x\n", rc); goto out_destroy_fp_eq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.mbx_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); /* Set up slow-path ELS Complete Queue */ if (!phba->sli4_hba.els_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0530 ELS CQ not allocated\n"); rc = -ENOMEM; goto out_destroy_mbx_cq; } rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, LPFC_WCQ, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0531 Failed setup of slow-path ELS CQ: " "rc = 0x%x\n", rc); goto out_destroy_mbx_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.sp_eq->queue_id); /* Set up fast-path FCP Response Complete Queue */ if (!phba->sli4_hba.fcp_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3148 Fast-path FCP CQ array not " "allocated\n"); rc = -ENOMEM; goto out_destroy_els_cq; } fcp_cqidx = 0; do { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0526 Fast-path FCP CQ (%d) not " "allocated\n", fcp_cqidx); rc = -ENOMEM; goto out_destroy_fcp_cq; } if (phba->cfg_fcp_eq_count) rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], phba->sli4_hba.fp_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); else rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], phba->sli4_hba.sp_eq, LPFC_WCQ, LPFC_FCP); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0527 Failed setup of fast-path FCP " "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); goto out_destroy_fcp_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2588 FCP CQ setup: cq[%d]-id=%d, " "parent %seq[%d]-id=%d\n", fcp_cqidx, phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, (phba->cfg_fcp_eq_count) ? "" : "sp_", fcp_cqidx, (phba->cfg_fcp_eq_count) ? phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : phba->sli4_hba.sp_eq->queue_id); } while (++fcp_cqidx < phba->cfg_fcp_eq_count); /* * Set up all the Work Queues (WQs) */ /* Set up Mailbox Command Queue */ if (!phba->sli4_hba.mbx_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0538 Slow-path MQ not allocated\n"); rc = -ENOMEM; goto out_destroy_fcp_cq; } rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, phba->sli4_hba.mbx_cq, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0539 Failed setup of slow-path MQ: " "rc = 0x%x\n", rc); goto out_destroy_fcp_cq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.mbx_wq->queue_id, phba->sli4_hba.mbx_cq->queue_id); /* Set up slow-path ELS Work Queue */ if (!phba->sli4_hba.els_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0536 Slow-path ELS WQ not allocated\n"); rc = -ENOMEM; goto out_destroy_mbx_wq; } rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, phba->sli4_hba.els_cq, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0537 Failed setup of slow-path ELS WQ: " "rc = 0x%x\n", rc); goto out_destroy_mbx_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); /* Set up fast-path FCP Work Queue */ if (!phba->sli4_hba.fcp_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3149 Fast-path FCP WQ array not " "allocated\n"); rc = -ENOMEM; goto out_destroy_els_wq; } for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0534 Fast-path FCP WQ (%d) not " "allocated\n", fcp_wqidx); rc = -ENOMEM; goto out_destroy_fcp_wq; } rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], phba->sli4_hba.fcp_cq[fcp_cq_index], LPFC_FCP); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed setup of fast-path FCP " "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2591 FCP WQ setup: wq[%d]-id=%d, " "parent cq[%d]-id=%d\n", fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, fcp_cq_index, phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); /* Round robin FCP Work Queue's Completion Queue assignment */ if (phba->cfg_fcp_eq_count) fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); } /* * Create Receive Queue (RQ) */ if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; goto out_destroy_fcp_wq; } lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", rc); goto out_destroy_fcp_wq; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " "parent cq-id=%d\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); return 0; out_destroy_fcp_wq: for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); out_destroy_els_wq: lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); out_destroy_mbx_wq: lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); out_destroy_fcp_cq: for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); out_destroy_els_cq: lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); out_destroy_mbx_cq: lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); out_destroy_fp_eq: for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); out_destroy_sp_eq: lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); out_error: return rc; } /** * lpfc_sli4_queue_unset - Unset all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { int fcp_qidx; /* Unset mailbox command work queue */ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); /* Unset ELS work queue */ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); /* Unset unsolicited receive queue */ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); /* Unset FCP work queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); /* Unset mailbox command complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); /* Unset ELS complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); /* Unset FCP response complete queue */ if (phba->sli4_hba.fcp_cq) { fcp_qidx = 0; do { lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); } while (++fcp_qidx < phba->cfg_fcp_eq_count); } /* Unset fast-path event queue */ if (phba->sli4_hba.fp_eq) { for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); } /* Unset slow-path event queue */ lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); } /** * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and set up a pool of completion queue * events. The body of the completion queue event is a completion queue entry * CQE. For now, this pool is used for the interrupt service routine to queue * the following HBA completion queue events for the worker thread to process: * - Mailbox asynchronous events * - Receive queue completion unsolicited events * Later, this can be used for all the slow-path events. * * Return codes * 0 - successful * -ENOMEM - No available memory **/ static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; int i; for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); if (!cq_event) goto out_pool_create_fail; list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } return 0; out_pool_create_fail: lpfc_sli4_cq_event_pool_destroy(phba); return -ENOMEM; } /** * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the pool of completion queue events at * driver unload time. Note that, it is the responsibility of the driver * cleanup routine to free all the outstanding completion-queue events * allocated from this pool back into the pool before invoking this routine * to destroy the pool. **/ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event, *next_cq_event; list_for_each_entry_safe(cq_event, next_cq_event, &phba->sli4_hba.sp_cqe_event_pool, list) { list_del(&cq_event->list); kfree(cq_event); } } /** * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock free version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event = NULL; list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, struct lpfc_cq_event, list); return cq_event; } /** * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); cq_event = __lpfc_sli4_cq_event_alloc(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); return cq_event; } /** * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock free version of the API invoked to release a * completion-queue event back into the free pool. **/ void __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } /** * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock version of the API invoked to release a * completion-queue event back into the free pool. **/ void lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_sli4_cq_event_release(phba, cq_event); spin_unlock_irqrestore(&phba->hbalock, iflags); } /** * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool * @phba: pointer to lpfc hba data structure. * * This routine is to free all the pending completion-queue events to the * back into the free pool for device reset. **/ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) { LIST_HEAD(cqelist); struct lpfc_cq_event *cqe; unsigned long iflags; /* Retrieve all the pending WCQEs from pending WCQE lists */ spin_lock_irqsave(&phba->hbalock, iflags); /* Pending FCP XRI abort events */ list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, &cqelist); /* Pending ELS XRI abort events */ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, &cqelist); /* Pending asynnc events */ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, &cqelist); spin_unlock_irqrestore(&phba->hbalock, iflags); while (!list_empty(&cqelist)) { list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); lpfc_sli4_cq_event_release(phba, cqe); } } /** * lpfc_pci_function_reset - Reset pci function. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to request a PCI function reset. It will destroys * all resources assigned to the PCI function which originates this request. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_pci_function_reset(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t rc = 0, if_type; uint32_t shdr_status, shdr_add_status; uint32_t rdy_chk, num_resets = 0, reset_again = 0; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_register reg_data; uint16_t devid; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0494 Unable to allocate memory for " "issuing SLI_FUNCTION_RESET mailbox " "command\n"); return -ENOMEM; } /* Setup PCI function reset mailbox-ioctl command */ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0495 SLI_FUNCTION_RESET mailbox " "failed with status x%x add_status x%x," " mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } break; case LPFC_SLI_INTF_IF_TYPE_2: for (num_resets = 0; num_resets < MAX_IF_TYPE_2_RESETS; num_resets++) { reg_data.word0 = 0; bf_set(lpfc_sliport_ctrl_end, &reg_data, LPFC_SLIPORT_LITTLE_ENDIAN); bf_set(lpfc_sliport_ctrl_ip, &reg_data, LPFC_SLIPORT_INIT_PORT); writel(reg_data.word0, phba->sli4_hba.u.if_type2. CTRLregaddr); /* flush */ pci_read_config_word(phba->pcidev, PCI_DEVICE_ID, &devid); /* * Poll the Port Status Register and wait for RDY for * up to 10 seconds. If the port doesn't respond, treat * it as an error. If the port responds with RN, start * the loop again. */ for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { msleep(10); if (lpfc_readl(phba->sli4_hba.u.if_type2. STATUSregaddr, &reg_data.word0)) { rc = -ENODEV; goto out; } if (bf_get(lpfc_sliport_status_rn, &reg_data)) reset_again++; if (bf_get(lpfc_sliport_status_rdy, &reg_data)) break; } /* * If the port responds to the init request with * reset needed, delay for a bit and restart the loop. */ if (reset_again && (rdy_chk < 1000)) { msleep(10); reset_again = 0; continue; } /* Detect any port errors. */ if ((bf_get(lpfc_sliport_status_err, &reg_data)) || (rdy_chk >= 1000)) { phba->work_status[0] = readl( phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl( phba->sli4_hba.u.if_type2.ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2890 Port error detected during port " "reset(%d): port status reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", num_resets, reg_data.word0, phba->work_status[0], phba->work_status[1]); rc = -ENODEV; } /* * Terminate the outer loop provided the Port indicated * ready within 10 seconds. */ if (rdy_chk < 1000) break; } /* delay driver action following IF_TYPE_2 function reset */ msleep(100); break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } out: /* Catch the not-ready port failure after a port reset. */ if (num_resets >= MAX_IF_TYPE_2_RESETS) rc = -ENODEV; return rc; } /** * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands * @phba: pointer to lpfc hba data structure. * @cnt: number of nop mailbox commands to send. * * This routine is invoked to send a number @cnt of NOP mailbox command and * wait for each command to complete. * * Return: the number of NOP mailbox command completed. **/ static int lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) { LPFC_MBOXQ_t *mboxq; int length, cmdsent; uint32_t mbox_tmo; uint32_t rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; if (cnt == 0) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2518 Requested to send 0 NOP mailbox cmd\n"); return cnt; } mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2519 Unable to allocate memory for issuing " "NOP mailbox command\n"); return 0; } /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ length = (sizeof(struct lpfc_mbx_nop) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); for (cmdsent = 0; cmdsent < cnt; cmdsent++) { if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } if (rc == MBX_TIMEOUT) break; /* Check return status */ shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2520 NOP mailbox command failed " "status x%x add_status x%x mbx " "status x%x\n", shdr_status, shdr_add_status, rc); break; } } if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); return cmdsent; } /** * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-4 interface spec. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev; unsigned long bar0map_len, bar1map_len, bar2map_len; int error = -ENODEV; uint32_t if_type; /* Obtain PCI device reference */ if (!phba->pcidev) return error; else pdev = phba->pcidev; /* Set the device DMA mask size */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { return error; } } /* * The BARs and register set definitions and offset locations are * dependent on the if_type. */ if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &phba->sli4_hba.sli_intf.word0)) { return error; } /* There is no SLI3 failback for SLI4 devices. */ if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_VALID) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2894 SLI_INTF reg contents invalid " "sli_intf reg 0x%x\n", phba->sli4_hba.sli_intf.word0); return error; } if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); /* * Get the bus address of SLI4 device Bar regions and the * number of bytes required by each mapping. The mapping of the * particular PCI BARs regions is dependent on the type of * SLI4 device. */ if (pci_resource_start(pdev, 0)) { phba->pci_bar0_map = pci_resource_start(pdev, 0); bar0map_len = pci_resource_len(pdev, 0); /* * Map SLI4 PCI Config Space Register base to a kernel virtual * addr */ phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->sli4_hba.conf_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); goto out; } /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba, if_type); } else { phba->pci_bar0_map = pci_resource_start(pdev, 1); bar0map_len = pci_resource_len(pdev, 1); if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { dev_printk(KERN_ERR, &pdev->dev, "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); goto out; } phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->sli4_hba.conf_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); goto out; } lpfc_sli4_bar0_register_memmap(phba, if_type); } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && (pci_resource_start(pdev, 2))) { /* * Map SLI4 if type 0 HBA Control Register base to a kernel * virtual address and setup the registers. */ phba->pci_bar1_map = pci_resource_start(pdev, 2); bar1map_len = pci_resource_len(pdev, 2); phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA control registers.\n"); goto out_iounmap_conf; } lpfc_sli4_bar1_register_memmap(phba); } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && (pci_resource_start(pdev, 4))) { /* * Map SLI4 if type 0 HBA Doorbell Register base to a kernel * virtual address and setup the registers. */ phba->pci_bar2_map = pci_resource_start(pdev, 4); bar2map_len = pci_resource_len(pdev, 4); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA doorbell registers.\n"); goto out_iounmap_ctrl; } error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; } return 0; out_iounmap_all: iounmap(phba->sli4_hba.drbl_regs_memmap_p); out_iounmap_ctrl: iounmap(phba->sli4_hba.ctrl_regs_memmap_p); out_iounmap_conf: iounmap(phba->sli4_hba.conf_regs_memmap_p); out: return error; } /** * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-4 interface spec. **/ static void lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) { uint32_t if_type; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.ctrl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_2: iounmap(phba->sli4_hba.conf_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_1: default: dev_printk(KERN_ERR, &phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; } } /** * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-3 interface specs. The kernel function pci_enable_msix() is * called to enable the MSI-X vectors. Note that pci_enable_msix(), once * invoked, enables either all or nothing, depending on the current * availability of PCI vector resources. The device driver is responsible * for calling the individual request_irq() to register each MSI-X vector * with a interrupt handler, which is done in this function. Note that * later when device is unloading, the driver should always call free_irq() * on all MSI-X vectors it has done request_irq() on before calling * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device * will be left with MSI-X enabled and leaks its vectors. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { int rc, i; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ for (i = 0; i < LPFC_MSIX_VECTORS; i++) phba->msix_entries[i].entry = i; /* Configure MSI-X capability structure */ rc = pci_enable_msix(phba->pcidev, phba->msix_entries, ARRAY_SIZE(phba->msix_entries)); if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; } for (i = 0; i < LPFC_MSIX_VECTORS; i++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0477 MSI-X entry[%d]: vector=x%x " "message=%d\n", i, phba->msix_entries[i].vector, phba->msix_entries[i].entry); /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ rc = request_irq(phba->msix_entries[0].vector, &lpfc_sli_sp_intr_handler, IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0421 MSI-X slow-path request_irq failed " "(%d)\n", rc); goto msi_fail_out; } /* vector-1 is associated to fast-path handler */ rc = request_irq(phba->msix_entries[1].vector, &lpfc_sli_fp_intr_handler, IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0429 MSI-X fast-path request_irq failed " "(%d)\n", rc); goto irq_fail_out; } /* * Configure HBA MSI-X attention conditions to messages */ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = -ENOMEM; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0474 Unable to allocate memory for issuing " "MBOX_CONFIG_MSI command\n"); goto mem_fail_out; } rc = lpfc_config_msi(phba, pmb); if (rc) goto mbx_fail_out; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0351 Config MSI mailbox command failed, " "mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); goto mbx_fail_out; } /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); return rc; mbx_fail_out: /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); mem_fail_out: /* free the irq already requested */ free_irq(phba->msix_entries[1].vector, phba); irq_fail_out: /* free the irq already requested */ free_irq(phba->msix_entries[0].vector, phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ pci_disable_msix(phba->pcidev); return rc; } /** * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release the MSI-X vectors and then disable the * MSI-X interrupt mode to device with SLI-3 interface spec. **/ static void lpfc_sli_disable_msix(struct lpfc_hba *phba) { int i; /* Free up MSI-X multi-message vectors */ for (i = 0; i < LPFC_MSIX_VECTORS; i++) free_irq(phba->msix_entries[i].vector, phba); /* Disable MSI-X */ pci_disable_msix(phba->pcidev); return; } /** * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-3 interface spec. The kernel function pci_enable_msi() is called to * enable the MSI vector. The device driver is responsible for calling the * request_irq() to register MSI vector with a interrupt the handler, which * is done in this function. * * Return codes * 0 - successful * other values - error */ static int lpfc_sli_enable_msi(struct lpfc_hba *phba) { int rc; rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0462 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); return rc; } rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0478 MSI request_irq failed (%d)\n", rc); } return rc; } /** * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the MSI interrupt mode to device with * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has * done request_irq() on before calling pci_disable_msi(). Failure to do so * results in a BUG_ON() and a device will be left with MSI enabled and leaks * its vector. */ static void lpfc_sli_disable_msi(struct lpfc_hba *phba) { free_irq(phba->pcidev->irq, phba); pci_disable_msi(phba->pcidev); return; } /** * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface * spec. Depends on the interrupt mode configured to the driver, the driver * will try to fallback from the configured interrupt mode to an interrupt * mode which is supported by the platform, kernel, and device in the order * of: * MSI-X -> MSI -> IRQ. * * Return codes * 0 - successful * other values - error **/ static uint32_t lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval; if (cfg_mode == 2) { /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (!retval) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; } } return intr_mode; } /** * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate the * driver's interrupt handler(s) from interrupt vector(s) to device with * SLI-3 interface spec. Depending on the interrupt mode, the driver will * release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) lpfc_sli_disable_msix(phba); else if (phba->intr_type == MSI) lpfc_sli_disable_msi(phba); else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; return; } /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-4 interface spec. The kernel function pci_enable_msix() is called * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, * enables either all or nothing, depending on the current availability of * PCI vector resources. The device driver is responsible for calling the * individual request_irq() to register each MSI-X vector with a interrupt * handler, which is done in this function. Note that later when device is * unloading, the driver should always call free_irq() on all MSI-X vectors * it has done request_irq() on before calling pci_disable_msix(). Failure * to do so results in a BUG_ON() and a device will be left with MSI-X * enabled and leaks its vectors. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int vectors, rc, index; /* Set up MSI-X multi-message vectors */ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) phba->sli4_hba.msix_entries[index].entry = index; /* Configure MSI-X capability structure */ vectors = phba->sli4_hba.cfg_eqn; enable_msix_vectors: rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, vectors); if (rc > 1) { vectors = rc; goto enable_msix_vectors; } else if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; } /* Log MSI-X vector assignment */ for (index = 0; index < vectors; index++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0489 MSI-X entry[%d]: vector=x%x " "message=%d\n", index, phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); /* * Assign MSI-X vectors to interrupt handlers */ if (vectors > 1) rc = request_irq(phba->sli4_hba.msix_entries[0].vector, &lpfc_sli4_sp_intr_handler, IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); else /* All Interrupts need to be handled by one EQ */ rc = request_irq(phba->sli4_hba.msix_entries[0].vector, &lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0485 MSI-X slow-path request_irq failed " "(%d)\n", rc); goto msi_fail_out; } /* The rest of the vector(s) are associated to fast-path handler(s) */ for (index = 1; index < vectors; index++) { phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; rc = request_irq(phba->sli4_hba.msix_entries[index].vector, &lpfc_sli4_fp_intr_handler, IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, &phba->sli4_hba.fcp_eq_hdl[index - 1]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " "request_irq failed (%d)\n", index, rc); goto cfg_fail_out; } } phba->sli4_hba.msix_vec_nr = vectors; return rc; cfg_fail_out: /* free the irq already requested */ for (--index; index >= 1; index--) free_irq(phba->sli4_hba.msix_entries[index - 1].vector, &phba->sli4_hba.fcp_eq_hdl[index - 1]); /* free the irq already requested */ free_irq(phba->sli4_hba.msix_entries[0].vector, phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ pci_disable_msix(phba->pcidev); return rc; } /** * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release the MSI-X vectors and then disable the * MSI-X interrupt mode to device with SLI-4 interface spec. **/ static void lpfc_sli4_disable_msix(struct lpfc_hba *phba) { int index; /* Free up MSI-X multi-message vectors */ free_irq(phba->sli4_hba.msix_entries[0].vector, phba); for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index - 1]); /* Disable MSI-X */ pci_disable_msix(phba->pcidev); return; } /** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-4 interface spec. The kernel function pci_enable_msi() is called * to enable the MSI vector. The device driver is responsible for calling * the request_irq() to register MSI vector with a interrupt the handler, * which is done in this function. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_enable_msi(struct lpfc_hba *phba) { int rc, index; rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0487 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0488 PCI enable MSI mode failed (%d)\n", rc); return rc; } rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0490 MSI request_irq failed (%d)\n", rc); return rc; } for (index = 0; index < phba->cfg_fcp_eq_count; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } return 0; } /** * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the MSI interrupt mode to device with * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has * done request_irq() on before calling pci_disable_msi(). Failure to do so * results in a BUG_ON() and a device will be left with MSI enabled and leaks * its vector. **/ static void lpfc_sli4_disable_msi(struct lpfc_hba *phba) { free_irq(phba->pcidev->irq, phba); pci_disable_msi(phba->pcidev); return; } /** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-4 * interface spec. Depends on the interrupt mode configured to the driver, * the driver will try to fallback from the configured interrupt mode to an * interrupt mode which is supported by the platform, kernel, and device in * the order of: * MSI-X -> MSI -> IRQ. * * Return codes * 0 - successful * other values - error **/ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval, index; if (cfg_mode == 2) { /* Preparation before conf_msi mbox cmd */ retval = 0; if (!retval) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli4_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli4_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; for (index = 0; index < phba->cfg_fcp_eq_count; index++) { phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } } } return intr_mode; } /** * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate * the driver's interrupt handler(s) from interrupt vector(s) to device * with SLI-4 interface spec. Depending on the interrupt mode, the driver * will release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) lpfc_sli4_disable_msix(phba); else if (phba->intr_type == MSI) lpfc_sli4_disable_msi(phba); else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; return; } /** * lpfc_unset_hba - Unset SLI3 hba device initialization * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the HBA device initialization steps to * a device with SLI-3 interface spec. **/ static void lpfc_unset_hba(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); kfree(phba->vpi_bmask); kfree(phba->vpi_ids); lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; lpfc_sli_hba_down(phba); lpfc_sli_brdrestart(phba); lpfc_sli_disable_intr(phba); return; } /** * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the HBA device initialization steps to * a device with SLI-4 interface spec. **/ static void lpfc_sli4_unset_hba(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); phba->pport->work_port_events = 0; /* Stop the SLI4 device port */ lpfc_stop_port(phba); lpfc_sli4_disable_intr(phba); /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); lpfc_sli4_queue_destroy(phba); return; } /** * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy * @phba: Pointer to HBA context object. * * This function is called in the SLI4 code path to wait for completion * of device's XRIs exchange busy. It will check the XRI exchange busy * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after * that, it will check the XRI exchange busy on outstanding FCP and ELS * I/Os every 30 seconds, log error message, and wait forever. Only when * all XRI exchange busy complete, the driver unload shall proceed with * invoking the function reset ioctl mailbox command to the CNA and the * the rest of the driver unload resource release. **/ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { int wait_time = 0; int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); while (!fcp_xri_cmpl || !els_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!fcp_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2877 FCP XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!els_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2878 ELS XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; } else { msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); } } /** * lpfc_sli4_hba_unset - Unset the fcoe hba * @phba: Pointer to HBA context object. * * This function is called in the SLI4 code path to reset the HBA's FCoE * function. The caller is not required to hold any lock. This routine * issues PCI function reset mailbox command to reset the FCoE function. * At the end of the function, it calls lpfc_hba_down_post function to * free any pending commands. **/ static void lpfc_sli4_hba_unset(struct lpfc_hba *phba) { int wait_cnt = 0; LPFC_MBOXQ_t *mboxq; struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); phba->sli4_hba.intr_enable = 0; /* * Gracefully wait out the potential current outstanding asynchronous * mailbox command. */ /* First, block any pending async mailbox command from posted */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); /* Now, trying to wait it out if we can */ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { msleep(10); if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) break; } /* Forcefully release the outstanding mailbox command if timed out */ if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_lock_irq(&phba->hbalock); mboxq = phba->sli.mbox_active; mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; __lpfc_mbox_cmpl_put(phba, mboxq); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; spin_unlock_irq(&phba->hbalock); } /* Abort all iocbs associated with the hba */ lpfc_sli_hba_iocb_abort(phba); /* Wait for completion of device XRI exchange busy */ lpfc_sli4_xri_exchange_busy_wait(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); /* Disable SR-IOV if enabled */ if (phba->cfg_sriov_nr_virtfn) pci_disable_sriov(pdev); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); lpfc_sli4_queue_destroy(phba); /* Stop the SLI4 device port */ phba->pport->work_port_events = 0; } /** * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. * @phba: Pointer to HBA context object. * @mboxq: Pointer to the mailboxq memory for the mailbox command response. * * This function is called in the SLI4 code path to read the port's * sli4 capabilities. * * This function may be be called from any context that can block-wait * for the completion. The expectation is that this routine is called * typically from probe_one or from the online routine. **/ int lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { int rc; struct lpfc_mqe *mqe; struct lpfc_pc_sli4_params *sli4_params; uint32_t mbox_tmo; rc = 0; mqe = &mboxq->u.mqe; /* Read the port's SLI4 Parameters port capabilities */ lpfc_pc_sli4_params(mboxq); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } if (unlikely(rc)) return 1; sli4_params = &phba->sli4_hba.pc_sli4_params; sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); sli4_params->featurelevel_1 = bf_get(featurelevel_1, &mqe->un.sli4_params); sli4_params->featurelevel_2 = bf_get(featurelevel_2, &mqe->un.sli4_params); sli4_params->proto_types = mqe->un.sli4_params.word3; sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; return rc; } /** * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. * @phba: Pointer to HBA context object. * @mboxq: Pointer to the mailboxq memory for the mailbox command response. * * This function is called in the SLI4 code path to read the port's * sli4 capabilities. * * This function may be be called from any context that can block-wait * for the completion. The expectation is that this routine is called * typically from probe_one or from the online routine. **/ int lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { int rc; struct lpfc_mqe *mqe = &mboxq->u.mqe; struct lpfc_pc_sli4_params *sli4_params; uint32_t mbox_tmo; int length; struct lpfc_sli4_parameters *mbx_sli4_parameters; /* * By default, the driver assumes the SLI4 port requires RPI * header postings. The SLI4_PARAM response will correct this * assumption. */ phba->sli4_hba.rpi_hdrs_in_use = 1; /* Read the port's SLI4 Config Parameters */ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, length, LPFC_SLI4_MBX_EMBED); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } if (unlikely(rc)) return rc; sli4_params = &phba->sli4_hba.pc_sli4_params; mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, mbx_sli4_parameters); sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, mbx_sli4_parameters); if (bf_get(cfg_phwq, mbx_sli4_parameters)) phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; else phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; return 0; } /** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be called to attach a device with SLI-3 interface spec * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it can * support this kind of device. If the match is successful, the driver core * invokes this routine. If this routine determines it can claim the HBA, it * does all the initialization that it needs to do to handle the HBA properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) goto out_free_phba; /* Set up SLI API function jump table for PCI-device group-0 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); if (error) goto out_disable_pci_dev; /* Set up SLI-3 specific device PCI memory space */ error = lpfc_sli_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1402 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up phase-1 common device driver resources */ error = lpfc_setup_driver_resource_phase1(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1403 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s3; } /* Set up SLI-3 specific device driver resources */ error = lpfc_sli_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1404 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s3; } /* Initialize and populate the iocb list per host */ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1405 Failed to initialize iocb list.\n"); goto out_unset_driver_resource_s3; } /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1406 Failed to set up driver resource.\n"); goto out_free_iocb_list; } /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1407 Failed to create scsi host.\n"); goto out_unset_driver_resource; } /* Configure sysfs attributes */ vport = phba->pport; error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1476 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0431 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1477 Failed to set up hba\n"); error = -ENODEV; goto out_remove_device; } /* Wait 50ms for the interrupts of previous mailbox commands */ msleep(50); /* Check active interrupts on message signaled interrupts */ if (intr_mode == 0 || phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0447 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); /* Disable the current interrupt mode */ lpfc_sli_disable_intr(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } } /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); return 0; out_remove_device: lpfc_unset_hba(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); out_unset_driver_resource_s3: lpfc_sli_driver_resource_unset(phba); out_unset_pci_mem_s3: lpfc_sli_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); if (shost) scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. * @pdev: pointer to PCI device * * This routine is to be called to disattach a device with SLI-3 interface * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; int bars = pci_select_bars(pdev, IORESOURCE_MEM); spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); lpfc_cleanup(vport); /* * Bring down the SLI Layer. This step disable all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA. */ /* HBA interrupt will be disabled after this call */ lpfc_sli_hba_down(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); kfree(phba->vpi_bmask); kfree(phba->vpi_ids); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); lpfc_debugfs_terminate(vport); /* Disable SR-IOV if enabled */ if (phba->cfg_sriov_nr_virtfn) pci_disable_sriov(pdev); /* Disable interrupt */ lpfc_sli_disable_intr(phba); pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* * Call scsi_free before mem_free since scsi bufs are released to their * corresponding pools here. */ lpfc_scsi_free(phba); lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); /* Free resources associated with SLI2 interface */ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* unmap adapter SLIM and Control Registers */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); lpfc_hba_free(phba); pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } /** * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When * PM invokes this method, it quiesces the device by stopping the driver's * worker thread for the device, turning off device's interrupt and DMA, * and bring the device offline. Note that as the driver implements the * minimum PM requirements to a power-aware driver's PM support for the * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver will * fully reinitialize its device during resume() method call, the driver will * set device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } /** * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt * @pdev: pointer to PCI device * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When PM * invokes this method, it restores the device's PCI config space state and * fully reinitializes the device and brings it online. Note that as the * driver implements the minimum PM requirements to a power-aware driver's * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, * FREEZE) to the suspend() method call will be treated as SUSPEND and the * driver will fully reinitialize its device during resume() method call, * the device will be set to PCI_D0 directly in PCI config space before * restoring the state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0452 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0434 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0430 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot recover. It * aborts all the outstanding SCSI I/Os to the pci device. **/ static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2723 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); } /** * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot reset. It * disables the device interrupt and pci device, and aborts the internal FCP * pending I/Os. **/ static void lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2710 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ lpfc_block_mgmt_io(phba); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); /* Flush all driver's outstanding SCSI I/Os as we are to reset */ lpfc_sli_flush_fcp_rings(phba); } /** * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot permanently * disabling. It blocks the SCSI transport layer traffic and flushes the FCP * pending I/Os. **/ static void lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2711 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); } /** * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for I/O error handling to * device with SLI-3 interface spec. This function is called by the PCI * subsystem after a PCI bus error affecting this device has been detected. * When this function is invoked, it will need to stop all the I/Os and * interrupt(s) to the device. Once that is done, it will return * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery * as desired. * * Return codes * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ lpfc_sli_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ lpfc_sli_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent failure, prepare for device down */ lpfc_sli_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0472 Unknown PCI error state: x%x\n", state); lpfc_sli_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; } } /** * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to * device with SLI-3 interface spec. This is called after PCI bus has been * reset to restart the PCI card from scratch, as if from a cold-boot. * During the PCI subsystem error recovery, after driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method * to recover the device. This function will initialize the HBA device, * enable the interrupt, but it will just put the HBA to offline state * without passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0427 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } else phba->intr_mode = intr_mode; /* Take device offline, it will perform cleanup */ lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-3 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. */ static void lpfc_io_resume_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; /* Bring device online, it will be no-op for non-fatal error resume */ lpfc_online(phba); /* Clean up Advanced Error Reporting (AER) if needed */ if (phba->hba_flag & HBA_AER_ENABLED) pci_cleanup_aer_uncorrect_error_status(pdev); } /** * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve * @phba: pointer to lpfc hba data structure. * * returns the number of ELS/CT IOCBs to reserve **/ int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) { int max_xri = phba->sli4_hba.max_cfg_param.max_xri; if (phba->sli_rev == LPFC_SLI_REV4) { if (max_xri <= 100) return 10; else if (max_xri <= 256) return 25; else if (max_xri <= 512) return 50; else if (max_xri <= 1024) return 100; else return 150; } else return 0; } /** * lpfc_write_firmware - attempt to write a firmware image to the port * @phba: pointer to lpfc hba data structure. * @fw: pointer to firmware image returned from request_firmware. * * returns the number of bytes written if write is successful. * returns a negative error value if there were errors. * returns 0 if firmware matches currently active firmware on port. **/ int lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) { char fwrev[FW_REV_STR_SIZE]; struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; struct list_head dma_buffer_list; int i, rc = 0; struct lpfc_dmabuf *dmabuf, *next; uint32_t offset = 0, temp_offset = 0; INIT_LIST_HEAD(&dma_buffer_list); if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || (bf_get_be32(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) || (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || (be32_to_cpu(image->size) != fw->size)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3022 Invalid FW image found. " "Magic:%x Type:%x ID:%x\n", be32_to_cpu(image->magic_number), bf_get_be32(lpfc_grp_hdr_file_type, image), bf_get_be32(lpfc_grp_hdr_id, image)); return -EINVAL; } lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3023 Updating Firmware. Current Version:%s " "New Version:%s\n", fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) { rc = -ENOMEM; goto out; } dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; goto out; } list_add_tail(&dmabuf->list, &dma_buffer_list); } while (offset < fw->size) { temp_offset = offset; list_for_each_entry(dmabuf, &dma_buffer_list, list) { if (temp_offset + SLI4_PAGE_SIZE > fw->size) { memcpy(dmabuf->virt, fw->data + temp_offset, fw->size - temp_offset); temp_offset = fw->size; break; } memcpy(dmabuf->virt, fw->data + temp_offset, SLI4_PAGE_SIZE); temp_offset += SLI4_PAGE_SIZE; } rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3024 Firmware update failed. " "%d\n", rc); goto out; } } rc = offset; } out: list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { list_del(&dmabuf->list); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } return rc; } /** * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it * can support this kind of device. If the match is successful, the driver * core invokes this routine. If this routine determines it can claim the HBA, * it does all the initialization that it needs to do to handle the HBA * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; int mcnt; int adjusted_fcp_eq_count; const struct firmware *fw; uint8_t file_name[16]; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) goto out_free_phba; /* Set up SLI API function jump table for PCI-device group-1 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); if (error) goto out_disable_pci_dev; /* Set up SLI-4 specific device PCI memory space */ error = lpfc_sli4_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1410 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up phase-1 common device driver resources */ error = lpfc_setup_driver_resource_phase1(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1411 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s4; } /* Set up SLI-4 Specific device driver resources */ error = lpfc_sli4_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1412 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s4; } /* Initialize and populate the iocb list per host */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2821 initialize iocb list %d.\n", phba->cfg_iocb_cnt*1024); error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1413 Failed to initialize iocb list.\n"); goto out_unset_driver_resource_s4; } INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1414 Failed to set up driver resource.\n"); goto out_free_iocb_list; } /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1415 Failed to create scsi host.\n"); goto out_unset_driver_resource; } /* Configure sysfs attributes */ vport = phba->pport; error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1416 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0426 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* Default to single EQ for non-MSI-X */ if (phba->intr_type != MSIX) adjusted_fcp_eq_count = 0; else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count + 1) adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; else adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1421 Failed to set up hba\n"); error = -ENODEV; goto out_disable_intr; } /* Send NOP mbx cmds for non-INTx mode active interrupt test */ if (intr_mode != 0) mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, LPFC_ACT_INTR_CNT); /* Check active interrupts received only for MSI/MSI-X */ if (intr_mode == 0 || phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0451 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); /* Unset the previous SLI-4 HBA setup. */ /* * TODO: Is this operation compatible with IF TYPE 2 * devices? All port state is deleted and cleared. */ lpfc_sli4_unset_hba(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* check for firmware upgrade or downgrade (if_type 2 only) */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) { snprintf(file_name, 16, "%s.grp", phba->ModelName); error = request_firmware(&fw, file_name, &phba->pcidev->dev); if (!error) { lpfc_write_firmware(phba, fw); release_firmware(fw); } } /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); return 0; out_disable_intr: lpfc_sli4_disable_intr(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); out_unset_driver_resource_s4: lpfc_sli4_driver_resource_unset(phba); out_unset_pci_mem_s4: lpfc_sli4_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); if (shost) scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem * @pdev: pointer to PCI device * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; /* Mark the device unloading flag */ spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); /* Free the HBA sysfs attributes */ lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); /* Perform cleanup on the physical port */ lpfc_cleanup(vport); /* * Bring down the SLI Layer. This step disables all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA FCoE function. */ lpfc_debugfs_terminate(vport); lpfc_sli4_hba_unset(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); lpfc_sli4_driver_resource_unset(phba); /* Unmap adapter Control and Doorbell registers */ lpfc_sli4_pci_mem_unset(phba); /* Release PCI resources and disable device's PCI function */ scsi_host_put(shost); lpfc_disable_pci_dev(phba); /* Finally, free the driver's device data structure */ lpfc_hba_free(phba); return; } /** * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spec. When PM invokes * this method, it quiesces the device by stopping the driver's worker * thread for the device, turning off device's interrupt and DMA, and bring * the device offline. Note that as the driver implements the minimum PM * requirements to a power-aware driver's PM support for suspend/resume -- all * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() * method call will be treated as SUSPEND and the driver will fully * reinitialize its device during resume() method call, the driver will set * device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2843 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); /* Save device state to PCI config space */ pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } /** * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt * @pdev: pointer to PCI device * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spac. When PM invokes * this method, it restores the device's PCI config space state and fully * reinitializes the device and brings it online. Note that as the driver * implements the minimum PM requirements to a power-aware driver's PM for * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver * will fully reinitialize its device during resume() method call, the device * will be set to PCI_D0 directly in PCI config space before restoring the * state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0292 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0293 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0294 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot recover. It * aborts all the outstanding SCSI I/Os to the pci device. **/ static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); } /** * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot reset. It * disables the device interrupt and pci device, and aborts the internal FCP * pending I/Os. **/ static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2826 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ lpfc_block_mgmt_io(phba); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); /* Flush all driver's outstanding SCSI I/Os as we are to reset */ lpfc_sli_flush_fcp_rings(phba); } /** * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot permanently * disabling. It blocks the SCSI transport layer traffic and flushes the FCP * pending I/Os. **/ static void lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2827 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); } /** * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. This function is called by the PCI subsystem * after a PCI bus error affecting this device has been detected. When this * function is invoked, it will need to stop all the I/Os and interrupt(s) * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET * for the PCI subsystem to perform proper recovery as desired. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ lpfc_sli4_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ lpfc_sli4_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent failure, prepare for device down */ lpfc_sli4_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2825 Unknown PCI error state: x%x\n", state); lpfc_sli4_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; } } /** * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called after PCI bus has been reset to * restart the PCI card from scratch, as if from a cold-boot. During the * PCI subsystem error recovery, after the driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method to * recover the device. This function will initialize the HBA device, enable * the interrupt, but it will just put the HBA to offline state without * passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2824 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } else phba->intr_mode = intr_mode; /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. **/ static void lpfc_io_resume_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; /* * In case of slot reset, as function reset is performed through * mailbox command which needs DMA to be enabled, this operation * has to be moved to the io resume phase. Taking device offline * will perform the necessary cleanup. */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { /* Perform device reset */ lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Bring the device back online */ lpfc_online(phba); } /* Clean up Advanced Error Reporting (AER) if needed */ if (phba->hba_flag & HBA_AER_ENABLED) pci_cleanup_aer_uncorrect_error_status(pdev); } /** * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks * at PCI device-specific information of the device and driver to see if the * driver state that it can support this kind of device. If the match is * successful, the driver core invokes this routine. This routine dispatches * the action to the proper SLI-3 or SLI-4 device probing routine, which will * do all the initialization that it needs to do to handle the HBA device * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; struct lpfc_sli_intf intf; if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) return -ENODEV; if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) rc = lpfc_pci_probe_one_s4(pdev, pid); else rc = lpfc_pci_probe_one_s3(pdev, pid); return rc; } /** * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem * @pdev: pointer to PCI device * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA is removed from PCI bus, the driver core invokes this routine. * This routine dispatches the action to the proper SLI-3 or SLI-4 device * remove routine, which will perform all the necessary cleanup for the * device to be removed from the PCI subsystem properly. **/ static void __devexit lpfc_pci_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_pci_remove_one_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_pci_remove_one_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1424 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } /** * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management * @pdev: pointer to PCI device * @msg: power management message * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device suspend routine, which will * suspend the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_suspend_one_s3(pdev, msg); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_suspend_one_s4(pdev, msg); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1425 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management * @pdev: pointer to PCI device * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device resume routine, which will * resume the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int lpfc_pci_resume_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_resume_one_s3(pdev); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_resume_one_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1426 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_error_detected - lpfc method for handling PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is registered to the PCI subsystem for error handling. This * function is called by the PCI subsystem after a PCI bus error affecting * this device has been detected. When this routine is invoked, it dispatches * the action to the proper SLI-3 or SLI-4 device error detected handling * routine, which will perform the proper error detected operation. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_error_detected_s3(pdev, state); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_error_detected_s4(pdev, state); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1427 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch * @pdev: pointer to PCI device. * * This routine is registered to the PCI subsystem for error handling. This * function is called after PCI bus has been reset to restart the PCI card * from scratch, as if from a cold-boot. When this routine is invoked, it * dispatches the action to the proper SLI-3 or SLI-4 device reset handling * routine, which will perform the proper device reset. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_slot_reset_s3(pdev); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_slot_reset_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1428 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_resume - lpfc method for resuming PCI I/O operation * @pdev: pointer to PCI device * * This routine is registered to the PCI subsystem for error handling. It * is called when kernel error recovery tells the lpfc driver that it is * OK to resume normal PCI operation after PCI bus error recovery. When * this routine is invoked, it dispatches the action to the proper SLI-3 * or SLI-4 device io_resume routine, which will resume the device operation. **/ static void lpfc_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_io_resume_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_io_resume_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1429 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } /** * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace * @inode: pointer to the inode representing the lpfcmgmt device * @filep: pointer to the file representing the open lpfcmgmt device * * This routine puts a reference count on the lpfc module whenever the * character device is opened **/ static int lpfc_mgmt_open(struct inode *inode, struct file *filep) { try_module_get(THIS_MODULE); return 0; } /** * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace * @inode: pointer to the inode representing the lpfcmgmt device * @filep: pointer to the file representing the open lpfcmgmt device * * This routine removes a reference count from the lpfc module when the * character device is closed **/ static int lpfc_mgmt_release(struct inode *inode, struct file *filep) { module_put(THIS_MODULE); return 0; } static struct pci_device_id lpfc_id_table[] = { {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; MODULE_DEVICE_TABLE(pci, lpfc_id_table); static struct pci_error_handlers lpfc_err_handler = { .error_detected = lpfc_io_error_detected, .slot_reset = lpfc_io_slot_reset, .resume = lpfc_io_resume, }; static struct pci_driver lpfc_driver = { .name = LPFC_DRIVER_NAME, .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; static const struct file_operations lpfc_mgmt_fop = { .open = lpfc_mgmt_open, .release = lpfc_mgmt_release, }; static struct miscdevice lpfc_mgmt_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "lpfcmgmt", .fops = &lpfc_mgmt_fop, }; /** * lpfc_init - lpfc module initialization routine * * This routine is to be invoked when the lpfc module is loaded into the * kernel. The special kernel macro module_init() is used to indicate the * role of this routine to the kernel as lpfc module entry point. * * Return codes * 0 - successful * -ENOMEM - FC attach transport failed * all others - failed */ static int __init lpfc_init(void) { int error = 0; printk(LPFC_MODULE_DESC "\n"); printk(LPFC_COPYRIGHT "\n"); error = misc_register(&lpfc_mgmt_dev); if (error) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); if (lpfc_enable_npiv) { lpfc_transport_functions.vport_create = lpfc_vport_create; lpfc_transport_functions.vport_delete = lpfc_vport_delete; } lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) return -ENOMEM; if (lpfc_enable_npiv) { lpfc_vport_transport_template = fc_attach_transport(&lpfc_vport_transport_functions); if (lpfc_vport_transport_template == NULL) { fc_release_transport(lpfc_transport_template); return -ENOMEM; } } error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); } return error; } /** * lpfc_exit - lpfc module removal routine * * This routine is invoked when the lpfc module is removed from the kernel. * The special kernel macro module_exit() is used to indicate the role of * this routine to the kernel as lpfc module exit point. */ static void __exit lpfc_exit(void) { misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); if (lpfc_enable_npiv) fc_release_transport(lpfc_vport_transport_template); if (_dump_buf_data) { printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " "_dump_buf_data at 0x%p\n", (1L << _dump_buf_data_order), _dump_buf_data); free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); } if (_dump_buf_dif) { printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " "_dump_buf_dif at 0x%p\n", (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } } module_init(lpfc_init); module_exit(lpfc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
gpl-2.0
stedman420/android_kernel_zte_hera
arch/sh/drivers/pci/pci-sh5.c
8376
5947
/* * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) * Copyright (C) 2003, 2004 Paul Mundt * Copyright (C) 2004 Richard Curnow * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * * Support functions for the SH5 PCI hardware. */ #include <linux/kernel.h> #include <linux/rwsem.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/irq.h> #include <cpu/irq.h> #include <asm/pci.h> #include <asm/io.h> #include "pci-sh5.h" unsigned long pcicr_virt; unsigned long PCI_IO_AREA; /* Rounds a number UP to the nearest power of two. Used for * sizing the PCI window. */ static u32 __init r2p2(u32 num) { int i = 31; u32 tmp = num; if (num == 0) return 0; do { if (tmp & (1 << 31)) break; i--; tmp <<= 1; } while (i >= 0); tmp = 1 << i; /* If the original number isn't a power of 2, round it up */ if (tmp != num) tmp <<= 1; return tmp; } static irqreturn_t pcish5_err_irq(int irq, void *dev_id) { struct pt_regs *regs = get_irq_regs(); unsigned pci_int, pci_air, pci_cir, pci_aint; pci_int = SH5PCI_READ(INT); pci_cir = SH5PCI_READ(CIR); pci_air = SH5PCI_READ(AIR); if (pci_int) { printk("PCI INTERRUPT (at %08llx)!\n", regs->pc); printk("PCI INT -> 0x%x\n", pci_int & 0xffff); printk("PCI AIR -> 0x%x\n", pci_air); printk("PCI CIR -> 0x%x\n", pci_cir); SH5PCI_WRITE(INT, ~0); } pci_aint = SH5PCI_READ(AINT); if (pci_aint) { printk("PCI ARB INTERRUPT!\n"); printk("PCI AINT -> 0x%x\n", pci_aint); printk("PCI AIR -> 0x%x\n", pci_air); printk("PCI CIR -> 0x%x\n", pci_cir); SH5PCI_WRITE(AINT, ~0); } return IRQ_HANDLED; } static irqreturn_t pcish5_serr_irq(int irq, void *dev_id) { printk("SERR IRQ\n"); return IRQ_NONE; } static struct resource sh5_pci_resources[2]; static struct pci_channel sh5pci_controller = { .pci_ops = &sh5_pci_ops, .resources = sh5_pci_resources, .nr_resources = ARRAY_SIZE(sh5_pci_resources), .mem_offset = 0x00000000, .io_offset = 0x00000000, }; static int __init sh5pci_init(void) { unsigned long memStart = __pa(memory_start); unsigned long memSize = __pa(memory_end) - memStart; u32 lsr0; u32 uval; if (request_irq(IRQ_ERR, pcish5_err_irq, 0, "PCI Error",NULL) < 0) { printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n"); return -EINVAL; } if (request_irq(IRQ_SERR, pcish5_serr_irq, 0, "PCI SERR interrupt", NULL) < 0) { printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n"); return -EINVAL; } pcicr_virt = (unsigned long)ioremap_nocache(SH5PCI_ICR_BASE, 1024); if (!pcicr_virt) { panic("Unable to remap PCICR\n"); } PCI_IO_AREA = (unsigned long)ioremap_nocache(SH5PCI_IO_BASE, 0x10000); if (!PCI_IO_AREA) { panic("Unable to remap PCIIO\n"); } /* Clear snoop registers */ SH5PCI_WRITE(CSCR0, 0); SH5PCI_WRITE(CSCR1, 0); /* Switch off interrupts */ SH5PCI_WRITE(INTM, 0); SH5PCI_WRITE(AINTM, 0); SH5PCI_WRITE(PINTM, 0); /* Set bus active, take it out of reset */ uval = SH5PCI_READ(CR); /* Set command Register */ SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM); uval=SH5PCI_READ(CR); /* Allow it to be a master */ /* NB - WE DISABLE I/O ACCESS to stop overlap */ /* set WAIT bit to enable stepping, an attempt to improve stability */ SH5PCI_WRITE_SHORT(CSR_CMD, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT); /* ** Set translation mapping memory in order to convert the address ** used for the main bus, to the PCI internal address. */ SH5PCI_WRITE(MBR,0x40000000); /* Always set the max size 512M */ SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024)); /* ** I/O addresses are mapped at internal PCI specific address ** as is described into the configuration bridge table. ** These are changed to 0, to allow cards that have legacy ** io such as vga to function correctly. We set the SH5 IOBAR to ** 256K, which is a bit big as we can only have 64K of address space */ SH5PCI_WRITE(IOBR,0x0); /* Set up a 256K window. Totally pointless waste of address space */ SH5PCI_WRITE(IOBMR,0); /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. * Ideally, we would want to map the I/O region somewhere, but it * is so big this is not that easy! */ SH5PCI_WRITE(CSR_IBAR0,~0); /* Set memory size value */ memSize = memory_end - memory_start; /* Now we set up the mbars so the PCI bus can see the memory of * the machine */ if (memSize < (1024 * 1024)) { printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n", memSize); return -EINVAL; } /* Set LSR 0 */ lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1); SH5PCI_WRITE(LSR0, lsr0); /* Set MBAR 0 */ SH5PCI_WRITE(CSR_MBAR0, memory_start); SH5PCI_WRITE(LAR0, memory_start); SH5PCI_WRITE(CSR_MBAR1,0); SH5PCI_WRITE(LAR1,0); SH5PCI_WRITE(LSR1,0); /* Enable the PCI interrupts on the device */ SH5PCI_WRITE(INTM, ~0); SH5PCI_WRITE(AINTM, ~0); SH5PCI_WRITE(PINTM, ~0); sh5_pci_resources[0].start = PCI_IO_AREA; sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000; sh5_pci_resources[1].start = memStart; sh5_pci_resources[1].end = memStart + memSize; return register_pci_controller(&sh5pci_controller); } arch_initcall(sh5pci_init);
gpl-2.0
Bi-Turbo/android_kernel_lge_msm8610
net/unix/sysctl_net_unix.c
9144
1330
/* * NET4: Sysctl interface to net af_unix subsystem. * * Authors: Mike Shaver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <net/af_unix.h> static ctl_table unix_table[] = { { .procname = "max_dgram_qlen", .data = &init_net.unx.sysctl_max_dgram_qlen, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { } }; static struct ctl_path unix_path[] = { { .procname = "net", }, { .procname = "unix", }, { }, }; int __net_init unix_sysctl_register(struct net *net) { struct ctl_table *table; table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->unx.sysctl_max_dgram_qlen; net->unx.ctl = register_net_sysctl_table(net, unix_path, table); if (net->unx.ctl == NULL) goto err_reg; return 0; err_reg: kfree(table); err_alloc: return -ENOMEM; } void unix_sysctl_unregister(struct net *net) { struct ctl_table *table; table = net->unx.ctl->ctl_table_arg; unregister_sysctl_table(net->unx.ctl); kfree(table); }
gpl-2.0
1119553797/sprd-kernel-common
Documentation/spi/spidev_fdx.c
11960
2758
#include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <fcntl.h> #include <string.h> #include <sys/ioctl.h> #include <sys/types.h> #include <sys/stat.h> #include <linux/types.h> #include <linux/spi/spidev.h> static int verbose; static void do_read(int fd, int len) { unsigned char buf[32], *bp; int status; /* read at least 2 bytes, no more than 32 */ if (len < 2) len = 2; else if (len > sizeof(buf)) len = sizeof(buf); memset(buf, 0, sizeof buf); status = read(fd, buf, len); if (status < 0) { perror("read"); return; } if (status != len) { fprintf(stderr, "short read\n"); return; } printf("read(%2d, %2d): %02x %02x,", len, status, buf[0], buf[1]); status -= 2; bp = buf + 2; while (status-- > 0) printf(" %02x", *bp++); printf("\n"); } static void do_msg(int fd, int len) { struct spi_ioc_transfer xfer[2]; unsigned char buf[32], *bp; int status; memset(xfer, 0, sizeof xfer); memset(buf, 0, sizeof buf); if (len > sizeof buf) len = sizeof buf; buf[0] = 0xaa; xfer[0].tx_buf = (unsigned long)buf; xfer[0].len = 1; xfer[1].rx_buf = (unsigned long) buf; xfer[1].len = len; status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer); if (status < 0) { perror("SPI_IOC_MESSAGE"); return; } printf("response(%2d, %2d): ", len, status); for (bp = buf; len; len--) printf(" %02x", *bp++); printf("\n"); } static void dumpstat(const char *name, int fd) { __u8 mode, lsb, bits; __u32 speed; if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) { perror("SPI rd_mode"); return; } if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) { perror("SPI rd_lsb_fist"); return; } if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) { perror("SPI bits_per_word"); return; } if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) { perror("SPI max_speed_hz"); return; } printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n", name, mode, bits, lsb ? "(lsb first) " : "", speed); } int main(int argc, char **argv) { int c; int readcount = 0; int msglen = 0; int fd; const char *name; while ((c = getopt(argc, argv, "hm:r:v")) != EOF) { switch (c) { case 'm': msglen = atoi(optarg); if (msglen < 0) goto usage; continue; case 'r': readcount = atoi(optarg); if (readcount < 0) goto usage; continue; case 'v': verbose++; continue; case 'h': case '?': usage: fprintf(stderr, "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n", argv[0]); return 1; } } if ((optind + 1) != argc) goto usage; name = argv[optind]; fd = open(name, O_RDWR); if (fd < 0) { perror("open"); return 1; } dumpstat(name, fd); if (msglen) do_msg(fd, msglen); if (readcount) do_read(fd, readcount); close(fd); return 0; }
gpl-2.0
CyanogenMod/android_kernel_htc_msm8960
arch/cris/arch-v10/mm/tlb.c
12984
4706
/* * linux/arch/cris/arch-v10/mm/tlb.c * * Low level TLB handling * * * Copyright (C) 2000-2007 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <asm/tlb.h> #include <asm/mmu_context.h> #include <arch/svinto.h> #define D(x) /* The TLB can host up to 64 different mm contexts at the same time. * The running context is R_MMU_CONTEXT, and each TLB entry contains a * page_id that has to match to give a hit. In page_id_map, we keep track * of which mm's we have assigned which page_id's, so that we know when * to invalidate TLB entries. * * The last page_id is never running - it is used as an invalid page_id * so we can make TLB entries that will never match. * * Notice that we need to make the flushes atomic, otherwise an interrupt * handler that uses vmalloced memory might cause a TLB load in the middle * of a flush causing. */ /* invalidate all TLB entries */ void flush_tlb_all(void) { int i; unsigned long flags; /* the vpn of i & 0xf is so we dont write similar TLB entries * in the same 4-way entry group. details... */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } local_irq_restore(flags); D(printk("tlb: flushed all\n")); } /* invalidate the selected mm context only */ void flush_tlb_mm(struct mm_struct *mm) { int i; int page_id = mm->context.page_id; unsigned long flags; D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); if(page_id == NO_CONTEXT) return; /* mark the TLB entries that match the page_id as invalid. * here we could also check the _PAGE_GLOBAL bit and NOT flush * global pages. is it worth the extra I/O ? */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); } /* invalidate a single page */ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int page_id = mm->context.page_id; int i; unsigned long flags; D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); if(page_id == NO_CONTEXT) return; addr &= PAGE_MASK; /* perhaps not necessary */ /* invalidate those TLB entries that match both the mm context * and the virtual address requested */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); tlb_hi = *R_TLB_HI; if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && (tlb_hi & PAGE_MASK) == addr) { *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | addr; /* same addr as before works. */ *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); } /* * Initialize the context related info for a new mm_struct * instance. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.page_id = NO_CONTEXT; return 0; } /* called in schedule() just before actually doing the switch_to */ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { /* make sure we have a context */ get_mmu_context(next); /* remember the pgd for the fault handlers * this is similar to the pgd register in some other CPU's. * we need our own copy of it because current and active_mm * might be invalid at points where we still need to derefer * the pgd. */ per_cpu(current_pgd, smp_processor_id()) = next->pgd; /* switch context in the MMU */ D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n", next->context, next)); *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); } }
gpl-2.0
sakuraba001/android_kernel_samsung_js01lte
drivers/battery/bq24260_charger.c
185
28841
/* * bq24260_charger.c * Samsung bq24260 Charger Driver * * Copyright (C) 2012 Samsung Electronics * * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define DEBUG #include <linux/battery/sec_charger.h> extern unsigned int system_rev; static int bq24260_i2c_write(struct i2c_client *client, int reg, u8 *buf) { int ret; ret = i2c_smbus_write_i2c_block_data(client, reg, 1, buf); if (ret < 0) dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret); return ret; } static int bq24260_i2c_read(struct i2c_client *client, int reg, u8 *buf) { int ret; ret = i2c_smbus_read_i2c_block_data(client, reg, 1, buf); if (ret < 0) dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret); return ret; } #if 0 static void bq24260_i2c_write_array(struct i2c_client *client, u8 *buf, int size) { int i; for (i = 0; i < size; i += 3) bq24260_i2c_write(client, (u8) (*(buf + i)), (buf + i) + 1); } #endif static void bq24260_set_command(struct i2c_client *client, int reg, int datum) { int val; u8 data = 0; val = bq24260_i2c_read(client, reg, &data); if (val >= 0) { dev_dbg(&client->dev, "%s : reg(0x%02x): 0x%02x(0x%02x)", __func__, reg, data, datum); if (data != datum) { data = datum; if (bq24260_i2c_write(client, reg, &data) < 0) dev_err(&client->dev, "%s : error!\n", __func__); val = bq24260_i2c_read(client, reg, &data); if (val >= 0) dev_dbg(&client->dev, " => 0x%02x\n", data); } } } static void bq24260_test_read(struct i2c_client *client) { u8 data = 0; u32 addr = 0; for (addr = 0; addr <= 0x06; addr++) { bq24260_i2c_read(client, addr, &data); dev_dbg(&client->dev, "bq24260 addr : 0x%02x data : 0x%02x\n", addr, data); } } static void bq24260_read_regs(struct i2c_client *client, char *str) { u8 data = 0; u32 addr = 0; for (addr = 0; addr <= 0x06; addr++) { bq24260_i2c_read(client, addr, &data); sprintf(str+strlen(str), "0x%x, ", data); } } static int bq24260_get_charging_status(struct i2c_client *client) { int status = POWER_SUPPLY_STATUS_UNKNOWN; u8 data = 0; bq24260_i2c_read(client, BQ24260_STATUS, &data); dev_info(&client->dev, "%s : charger status(0x%02x)\n", __func__, data); data = (data & 0x30); switch (data) { case 0x00: status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x10: status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x20: status = POWER_SUPPLY_STATUS_FULL; break; case 0x30: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return (int)status; } static int bq24260_get_charging_health(struct i2c_client *client) { int health = POWER_SUPPLY_HEALTH_GOOD; u8 data = 0; bq24260_i2c_read(client, BQ24260_STATUS, &data); dev_info(&client->dev, "%s : charger status(0x%02x)\n", __func__, data); if ((data & 0x30) == 0x30) { /* check for fault */ data = (data & 0x07); switch (data) { case 0x01: health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; break; case 0x02: health = POWER_SUPPLY_HEALTH_UNDERVOLTAGE; break; } } return (int)health; } static u8 bq24260_get_float_voltage_data( int float_voltage) { u8 data; if (float_voltage < 3500) float_voltage = 3500; data = (float_voltage - 3500) / 20; return data << 2; } static u8 bq24260_get_input_current_limit_data( int input_current) { u8 data = 0x00; if (system_rev >= 0x01) if (input_current <= 100) data = 0x00; else if (input_current <= 150) data = 0x01; else if (input_current <= 500) data = 0x02; else if (input_current <= 900) data = 0x03;/*v2: 1000mA*/ else if (input_current <= 1500) data = 0x04;/*v2: 1300mA*/ else if (input_current <= 2000) data = 0x05;/*1950mA, v2: 1800mA*/ else if (input_current <= 2500) data = 0x06;/*v2: 2200mA*/ else/*1950mA, v2: 1800mA*/ data = 0x07; else { if (input_current <= 100) data = 0x00; else if (input_current <= 150) data = 0x01; else if (input_current <= 500) data = 0x02; else if (input_current <= 900) data = 0x03; else if (input_current <= 1000) data = 0x04; else if (input_current <= 2000) data = 0x06;/*1950mA*/ else data = 0x07; } return data << 4; } static u8 bq24260_get_termination_current_limit_data( int termination_current) { u8 data; /* default offset 50mA, max 300mA */ data = (termination_current - 50) / 50; return data; } static u8 bq24260_get_fast_charging_current_data( int fast_charging_current) { u8 data; /* default offset 500mA */ if (fast_charging_current < 500) fast_charging_current = 500; data = (fast_charging_current - 500) / 100; return data << 3; } static void bq24260_charger_function_conrol( struct i2c_client *client) { struct sec_charger_info *charger = i2c_get_clientdata(client); union power_supply_propval val; int full_check_type; u8 data; if (charger->charging_current < 0) { dev_dbg(&client->dev, "%s : OTG is activated. Ignore command!\n", __func__); return; } if (charger->cable_type == POWER_SUPPLY_TYPE_BATTERY) { data = 0x00; bq24260_i2c_read(client, BQ24260_CONTROL, &data); data |= 0x2; data &= 0x7f; /* Prevent register reset */ bq24260_set_command(client, BQ24260_CONTROL, data); } else { data = 0x00; bq24260_i2c_read(client, BQ24260_CONTROL, &data); /* Enable charging */ data &= 0x7d; /*default enabled*/ psy_do_property("battery", get, POWER_SUPPLY_PROP_CHARGE_NOW, val); if (val.intval == SEC_BATTERY_CHARGING_1ST) full_check_type = charger->pdata->full_check_type; else full_check_type = charger->pdata->full_check_type_2nd; /* Termination setting */ switch (full_check_type) { case SEC_BATTERY_FULLCHARGED_CHGGPIO: case SEC_BATTERY_FULLCHARGED_CHGINT: case SEC_BATTERY_FULLCHARGED_CHGPSY: /* Enable Current Termination */ data |= 0x04; break; default: data &= 0x7b; break; } /* Input current limit */ if (charger->pdata->cable_source_type & SEC_BATTERY_CABLE_SOURCE_EXTENDED) { dev_dbg(&client->dev, "%s : chg max (%dmA)\n", __func__, charger->charging_current_max); data &= 0x0F; data |= bq24260_get_input_current_limit_data( charger->charging_current_max); } else { dev_dbg(&client->dev, "%s : input current (%dmA)\n", __func__, charger->pdata->charging_current [charger->cable_type].input_current_limit); data &= 0x0F; data |= bq24260_get_input_current_limit_data( charger->pdata->charging_current [charger->cable_type].input_current_limit); } bq24260_set_command(client, BQ24260_CONTROL, data); data = 0x00; /* Float voltage */ dev_dbg(&client->dev, "%s : float voltage (%dmV)\n", __func__, charger->pdata->chg_float_voltage); data |= bq24260_get_float_voltage_data( charger->pdata->chg_float_voltage); bq24260_set_command(client, BQ24260_VOLTAGE, data); data = 0x00; /* Fast charge and Termination current */ dev_dbg(&client->dev, "%s : fast charging current (%dmA)\n", __func__, charger->charging_current); data |= bq24260_get_fast_charging_current_data( charger->charging_current); dev_dbg(&client->dev, "%s : termination current (%dmA)\n", __func__, charger->pdata->charging_current[ charger->cable_type].full_check_current_1st >= 300 ? 300 : charger->pdata->charging_current[ charger->cable_type].full_check_current_1st); data |= bq24260_get_termination_current_limit_data( charger->pdata->charging_current[ charger->cable_type].full_check_current_1st); bq24260_set_command(client, BQ24260_CURRENT, data); /* Special Charger Voltage * Normal charge current */ bq24260_i2c_read(client, BQ24260_SPECIAL, &data); data &= 0xd8; data |= 0x4; bq24260_set_command(client, BQ24260_SPECIAL, data); } } static void bq24260_charger_otg_conrol( struct i2c_client *client) { struct sec_charger_info *charger = i2c_get_clientdata(client); u8 data; bq24260_i2c_read(client, BQ24260_SAFETY, &data); data &= ~(0x1 << 4); bq24260_set_command(client, BQ24260_SAFETY, data); data = 0x00; if (charger->cable_type != POWER_SUPPLY_TYPE_OTG) { dev_info(&client->dev, "%s : turn off OTG\n", __func__); /* turn off OTG */ bq24260_i2c_read(client, BQ24260_STATUS, &data); data &= 0xbf; bq24260_set_command(client, BQ24260_STATUS, data); } else { dev_info(&client->dev, "%s : turn on OTG\n", __func__); /* turn on OTG */ bq24260_i2c_read(client, BQ24260_STATUS, &data); data |= 0x40; bq24260_set_command(client, BQ24260_STATUS, data); } } static void bq24260_set_input_current( struct i2c_client *client, int input_current) { u8 data = 0x00; bq24260_i2c_read(client, BQ24260_CONTROL, &data); data &= 0x0F; data |= bq24260_get_input_current_limit_data(input_current); bq24260_set_command(client, BQ24260_CONTROL, data); } static void bq24260_set_charging_current( struct i2c_client *client, int charging_current) { u8 data = 0x00; bq24260_i2c_read(client, BQ24260_CURRENT, &data); data |= bq24260_get_fast_charging_current_data(charging_current); bq24260_set_command(client, BQ24260_CURRENT, data); } #if 0 static int bq24260_get_charge_type(struct i2c_client *client) { int ret; u8 data; bq24260_i2c_read(client, BQ24260_STATUS, &data); data = (data & 0x30)>>4; switch (data) { case 0x01: ret = POWER_SUPPLY_CHARGE_TYPE_FAST; break; default: ret = POWER_SUPPLY_CHARGE_TYPE_NONE; break; } return ret; } #endif bool bq24260_hal_chg_init(struct i2c_client *client) { bq24260_test_read(client); return true; } bool bq24260_hal_chg_suspend(struct i2c_client *client) { return true; } bool bq24260_hal_chg_resume(struct i2c_client *client) { return true; } bool bq24260_hal_chg_shutdown(struct i2c_client *client) { u8 data = 1; bq24260_i2c_write(client, BQ24260_CONTROL, &data); return true; } bool bq24260_hal_chg_get_property(struct i2c_client *client, enum power_supply_property psp, union power_supply_propval *val) { struct sec_charger_info *charger = i2c_get_clientdata(client); u8 data; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bq24260_get_charging_status(client); break; case POWER_SUPPLY_PROP_CHARGE_TYPE: /*val->intval = bq24260_get_charge_type(client);*/ if (charger->is_charging) val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; else val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = bq24260_get_charging_health(client); break; /* calculated input current limit value */ case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: /* charging current */ if (charger->charging_current) { /* Rsns 0.068 Ohm */ bq24260_i2c_read(client, BQ24260_CURRENT, &data); val->intval = (data >> 3) * 100 + 500; } else val->intval = 0; dev_dbg(&client->dev, "%s : set-current(%dmA), current now(%dmA)\n", __func__, charger->charging_current, val->intval); break; default: return false; } return true; } bool bq24260_hal_chg_set_property(struct i2c_client *client, enum power_supply_property psp, const union power_supply_propval *val) { struct sec_charger_info *charger = i2c_get_clientdata(client); switch (psp) { /* val->intval : type */ case POWER_SUPPLY_PROP_ONLINE: if (charger->pdata->chg_gpio_en) { if (gpio_request(charger->pdata->chg_gpio_en, "CHG_EN") < 0) { dev_err(&client->dev, "failed to request vbus_in gpio\n"); break; } if (charger->cable_type == POWER_SUPPLY_TYPE_BATTERY) gpio_set_value_cansleep( charger->pdata->chg_gpio_en, charger->pdata->chg_polarity_en ? 0 : 1); else gpio_set_value_cansleep( charger->pdata->chg_gpio_en, charger->pdata->chg_polarity_en ? 1 : 0); gpio_free(charger->pdata->chg_gpio_en); } if (charger->charging_current >= 0) bq24260_charger_function_conrol(client); bq24260_charger_otg_conrol(client); bq24260_test_read(client); break; case POWER_SUPPLY_PROP_CURRENT_MAX: /* input current limit set */ /* calculated input current limit value */ case POWER_SUPPLY_PROP_CURRENT_NOW: bq24260_set_input_current(client, val->intval); break; /* val->intval : charging current */ case POWER_SUPPLY_PROP_CURRENT_AVG: bq24260_set_charging_current(client, val->intval); break; default: return false; } return true; } ssize_t bq24260_hal_chg_show_attrs(struct device *dev, const ptrdiff_t offset, char *buf) { struct power_supply *psy = dev_get_drvdata(dev); struct sec_charger_info *chg = container_of(psy, struct sec_charger_info, psy_chg); int i = 0; char *str = NULL; switch (offset) { case CHG_REG: i += scnprintf(buf + i, PAGE_SIZE - i, "%x\n", chg->reg_addr); break; case CHG_DATA: i += scnprintf(buf + i, PAGE_SIZE - i, "%x\n", chg->reg_data); break; case CHG_REGS: str = kzalloc(sizeof(char)*1024, GFP_KERNEL); if (!str) return -ENOMEM; bq24260_read_regs(chg->client, str); i += scnprintf(buf + i, PAGE_SIZE - i, "%s\n", str); kfree(str); break; default: i = -EINVAL; break; } return i; } ssize_t bq24260_hal_chg_store_attrs(struct device *dev, const ptrdiff_t offset, const char *buf, size_t count) { struct power_supply *psy = dev_get_drvdata(dev); struct sec_charger_info *chg = container_of(psy, struct sec_charger_info, psy_chg); int ret = 0; int x = 0; u8 data = 0; switch (offset) { case CHG_REG: if (sscanf(buf, "%x\n", &x) == 1) { chg->reg_addr = x; bq24260_i2c_read(chg->client, chg->reg_addr, &data); chg->reg_data = data; dev_dbg(dev, "%s: (read) addr = 0x%x, data = 0x%x\n", __func__, chg->reg_addr, chg->reg_data); ret = count; } break; case CHG_DATA: if (sscanf(buf, "%x\n", &x) == 1) { data = (u8)x; dev_dbg(dev, "%s: (write) addr = 0x%x, data = 0x%x\n", __func__, chg->reg_addr, data); bq24260_i2c_write(chg->client, chg->reg_addr, &data); ret = count; } break; default: ret = -EINVAL; break; } return ret; } static struct device_attribute bq24260_charger_attrs[] = { BQ24260_CHARGER_ATTR(reg), BQ24260_CHARGER_ATTR(data), BQ24260_CHARGER_ATTR(regs), }; static enum power_supply_property bq24260_charger_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, }; static int bq24260_chg_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct sec_charger_info *charger = container_of(psy, struct sec_charger_info, psy_chg); switch (psp) { case POWER_SUPPLY_PROP_CURRENT_MAX: /* input current limit set */ val->intval = charger->charging_current_max; break; case POWER_SUPPLY_PROP_ONLINE: case POWER_SUPPLY_PROP_STATUS: case POWER_SUPPLY_PROP_CHARGE_TYPE: case POWER_SUPPLY_PROP_HEALTH: case POWER_SUPPLY_PROP_CURRENT_AVG: /* charging current */ /* calculated input current limit value */ case POWER_SUPPLY_PROP_CURRENT_NOW: if (!bq24260_hal_chg_get_property(charger->client, psp, val)) return -EINVAL; break; default: return -EINVAL; } return 0; } static int bq24260_chg_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { struct sec_charger_info *charger = container_of(psy, struct sec_charger_info, psy_chg); union power_supply_propval input_value; switch (psp) { case POWER_SUPPLY_PROP_STATUS: charger->status = val->intval; break; /* val->intval : type */ case POWER_SUPPLY_PROP_ONLINE: charger->cable_type = val->intval; if (val->intval == POWER_SUPPLY_TYPE_BATTERY) charger->is_charging = false; else charger->is_charging = true; /* current setting */ if (!(charger->pdata->cable_source_type & SEC_BATTERY_CABLE_SOURCE_EXTENDED)) { charger->charging_current_max = charger->pdata->charging_current[ val->intval].input_current_limit; charger->charging_current = charger->pdata->charging_current[ val->intval].fast_charging_current; } if (!bq24260_hal_chg_set_property(charger->client, psp, val)) return -EINVAL; break; /* val->intval : input current limit set */ case POWER_SUPPLY_PROP_CURRENT_MAX: charger->charging_current_max = val->intval; /* to control charging current, * use input current limit and set charging current as much as possible * so we only control input current limit to control charge current */ case POWER_SUPPLY_PROP_CURRENT_NOW: if (!bq24260_hal_chg_set_property(charger->client, psp, val)) return -EINVAL; break; /* val->intval : charging current */ case POWER_SUPPLY_PROP_CURRENT_AVG: charger->charging_current = val->intval; if (!bq24260_hal_chg_set_property(charger->client, psp, val)) return -EINVAL; break; /* val->intval : SIOP level (%) * SIOP charging current setting */ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: /* change val as charging current by SIOP level * do NOT change initial charging current setting */ input_value.intval = charger->charging_current * val->intval / 100; /* charging current should be over than USB charging current */ if (charger->pdata->chg_functions_setting & SEC_CHARGER_MINIMUM_SIOP_CHARGING_CURRENT) { if (input_value.intval > 0 && input_value.intval < charger->pdata->charging_current[ POWER_SUPPLY_TYPE_USB].fast_charging_current) input_value.intval = charger->pdata->charging_current[ POWER_SUPPLY_TYPE_USB].fast_charging_current; } /* set charging current as new value */ if (!bq24260_hal_chg_set_property(charger->client, POWER_SUPPLY_PROP_CURRENT_AVG, &input_value)) return -EINVAL; break; default: return -EINVAL; } return 0; } static void bq24260_chg_isr_work(struct work_struct *work) { struct sec_charger_info *charger = container_of(work, struct sec_charger_info, isr_work.work); union power_supply_propval val; int full_check_type; dev_info(&charger->client->dev, "%s: Charger Interrupt\n", __func__); psy_do_property("battery", get, POWER_SUPPLY_PROP_CHARGE_NOW, val); if (val.intval == SEC_BATTERY_CHARGING_1ST) full_check_type = charger->pdata->full_check_type; else full_check_type = charger->pdata->full_check_type_2nd; if (full_check_type == SEC_BATTERY_FULLCHARGED_CHGINT) { if (!bq24260_hal_chg_get_property(charger->client, POWER_SUPPLY_PROP_STATUS, &val)) return; switch (val.intval) { case POWER_SUPPLY_STATUS_DISCHARGING: dev_err(&charger->client->dev, "%s: Interrupted but Discharging\n", __func__); break; case POWER_SUPPLY_STATUS_NOT_CHARGING: dev_err(&charger->client->dev, "%s: Interrupted but NOT Charging\n", __func__); break; case POWER_SUPPLY_STATUS_FULL: dev_info(&charger->client->dev, "%s: Interrupted by Full\n", __func__); psy_do_property("battery", set, POWER_SUPPLY_PROP_STATUS, val); break; case POWER_SUPPLY_STATUS_CHARGING: dev_err(&charger->client->dev, "%s: Interrupted but Charging\n", __func__); break; case POWER_SUPPLY_STATUS_UNKNOWN: default: dev_err(&charger->client->dev, "%s: Invalid Charger Status\n", __func__); break; } } if (charger->pdata->ovp_uvlo_check_type == SEC_BATTERY_OVP_UVLO_CHGINT) { if (!bq24260_hal_chg_get_property(charger->client, POWER_SUPPLY_PROP_HEALTH, &val)) return; switch (val.intval) { case POWER_SUPPLY_HEALTH_OVERHEAT: case POWER_SUPPLY_HEALTH_COLD: dev_err(&charger->client->dev, "%s: Interrupted but Hot/Cold\n", __func__); break; case POWER_SUPPLY_HEALTH_DEAD: dev_err(&charger->client->dev, "%s: Interrupted but Dead\n", __func__); break; case POWER_SUPPLY_HEALTH_OVERVOLTAGE: case POWER_SUPPLY_HEALTH_UNDERVOLTAGE: dev_info(&charger->client->dev, "%s: Interrupted by OVP/UVLO\n", __func__); psy_do_property("battery", set, POWER_SUPPLY_PROP_HEALTH, val); break; case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE: dev_err(&charger->client->dev, "%s: Interrupted but Unspec\n", __func__); break; case POWER_SUPPLY_HEALTH_GOOD: dev_err(&charger->client->dev, "%s: Interrupted but Good\n", __func__); break; case POWER_SUPPLY_HEALTH_UNKNOWN: default: dev_err(&charger->client->dev, "%s: Invalid Charger Health\n", __func__); break; } } if (charger->pdata->cable_check_type & SEC_BATTERY_CABLE_CHECK_CHGINT) { if (!bq24260_hal_chg_get_property(charger->client, POWER_SUPPLY_PROP_ONLINE, &val)) return; /* use SEC_BATTERY_CABLE_SOURCE_EXTERNAL for cable_source_type * charger would call battery driver to set ONLINE property * check battery driver loaded or not */ if (get_power_supply_by_name("battery")) { psy_do_property("battery", set, POWER_SUPPLY_PROP_ONLINE, val); } else charger->pdata->check_cable_result_callback(val.intval); } } static irqreturn_t bq24260_chg_irq_thread(int irq, void *irq_data) { struct sec_charger_info *charger = irq_data; schedule_delayed_work(&charger->isr_work, 0); return IRQ_HANDLED; } static int bq24260_chg_create_attrs(struct device *dev) { int i, rc; for (i = 0; i < ARRAY_SIZE(bq24260_charger_attrs); i++) { rc = device_create_file(dev, &bq24260_charger_attrs[i]); if (rc) goto create_attrs_failed; } goto create_attrs_succeed; create_attrs_failed: dev_err(dev, "%s: failed (%d)\n", __func__, rc); while (i--) device_remove_file(dev, &bq24260_charger_attrs[i]); create_attrs_succeed: return rc; } ssize_t bq24260_chg_show_attrs(struct device *dev, struct device_attribute *attr, char *buf) { const ptrdiff_t offset = attr - bq24260_charger_attrs; int i = 0; switch (offset) { case CHG_REG: case CHG_DATA: case CHG_REGS: i = bq24260_hal_chg_show_attrs(dev, offset, buf); break; default: i = -EINVAL; break; } return i; } ssize_t bq24260_chg_store_attrs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { const ptrdiff_t offset = attr - bq24260_charger_attrs; int ret = 0; switch (offset) { case CHG_REG: case CHG_DATA: ret = bq24260_hal_chg_store_attrs(dev, offset, buf, count); break; default: ret = -EINVAL; break; } return ret; } #ifdef CONFIG_OF static int bq24260_charger_read_u32_index_dt(const struct device_node *np, const char *propname, u32 index, u32 *out_value) { struct property *prop = of_find_property(np, propname, NULL); u32 len = (index + 1) * sizeof(*out_value); if (!prop) return (-EINVAL); if (!prop->value) return (-ENODATA); if (len > prop->length) return (-EOVERFLOW); *out_value = be32_to_cpup(((__be32 *)prop->value) + index); return 0; } static int bq24260_charger_parse_dt(struct sec_charger_info *charger) { struct device_node *np = of_find_node_by_name(NULL, "charger"); sec_battery_platform_data_t *pdata = charger->pdata; int ret = 0; int i, len; const u32 *p; if (np == NULL) { pr_err("%s np NULL\n", __func__); } else { ret = of_property_read_u32(np, "battery,chg_float_voltage", &pdata->chg_float_voltage); ret = of_property_read_u32(np, "battery,ovp_uvlo_check_type", &pdata->ovp_uvlo_check_type); ret = of_property_read_u32(np, "battery,full_check_type", &pdata->full_check_type); p = of_get_property(np, "battery,input_current_limit", &len); len = len / sizeof(u32); pdata->charging_current = kzalloc(sizeof(sec_charging_current_t) * len, GFP_KERNEL); for(i = 0; i < len; i++) { ret = bq24260_charger_read_u32_index_dt(np, "battery,input_current_limit", i, &pdata->charging_current[i].input_current_limit); ret = bq24260_charger_read_u32_index_dt(np, "battery,fast_charging_current", i, &pdata->charging_current[i].fast_charging_current); ret = bq24260_charger_read_u32_index_dt(np, "battery,full_check_current_1st", i, &pdata->charging_current[i].full_check_current_1st); ret = bq24260_charger_read_u32_index_dt(np, "battery,full_check_current_2nd", i, &pdata->charging_current[i].full_check_current_2nd); } } return ret; } #endif static int __devinit bq24260_charger_probe( struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct sec_charger_info *charger; int ret = 0; dev_info(&client->dev, "%s: BQ24260 Charger Driver Loading\n", __func__); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) return -EIO; charger = kzalloc(sizeof(*charger), GFP_KERNEL); if (!charger) return -ENOMEM; charger->client = client; if (client->dev.of_node) { void * pdata = kzalloc(sizeof(sec_battery_platform_data_t), GFP_KERNEL); if (!pdata) goto err_free1; charger->pdata = pdata; if (bq24260_charger_parse_dt(charger)) dev_err(&client->dev, "%s : Failed to get charger dt\n", __func__); } else charger->pdata = client->dev.platform_data; i2c_set_clientdata(client, charger); charger->psy_chg.name = "bq24260"; charger->psy_chg.type = POWER_SUPPLY_TYPE_UNKNOWN; charger->psy_chg.get_property = bq24260_chg_get_property; charger->psy_chg.set_property = bq24260_chg_set_property; charger->psy_chg.properties = bq24260_charger_props; charger->psy_chg.num_properties = ARRAY_SIZE(bq24260_charger_props); if (!bq24260_hal_chg_init(charger->client)) { dev_err(&client->dev, "%s: Failed to Initialize Charger\n", __func__); goto err_free; } ret = power_supply_register(&client->dev, &charger->psy_chg); if (ret) { dev_err(&client->dev, "%s: Failed to Register psy_chg\n", __func__); goto err_free; } if (charger->pdata->chg_irq) { INIT_DELAYED_WORK_DEFERRABLE( &charger->isr_work, bq24260_chg_isr_work); ret = request_threaded_irq(charger->pdata->chg_irq, NULL, bq24260_chg_irq_thread, charger->pdata->chg_irq_attr, "charger-irq", charger); if (ret) { dev_err(&client->dev, "%s: Failed to Reqeust IRQ\n", __func__); goto err_supply_unreg; } ret = enable_irq_wake(charger->pdata->chg_irq); if (ret < 0) dev_err(&client->dev, "%s: Failed to Enable Wakeup Source(%d)\n", __func__, ret); } ret = bq24260_chg_create_attrs(charger->psy_chg.dev); if (ret) { dev_err(&client->dev, "%s : Failed to create_attrs\n", __func__); goto err_req_irq; } dev_dbg(&client->dev, "%s: BQ24260 Charger Driver Loaded\n", __func__); return 0; err_req_irq: if (charger->pdata->chg_irq) free_irq(charger->pdata->chg_irq, charger); err_supply_unreg: power_supply_unregister(&charger->psy_chg); err_free: kfree(charger->pdata); err_free1: kfree(charger); return ret; } static int __devexit bq24260_charger_remove( struct i2c_client *client) { return 0; } static int bq24260_charger_suspend(struct i2c_client *client, pm_message_t state) { if (!bq24260_hal_chg_suspend(client)) dev_err(&client->dev, "%s: Failed to Suspend Charger\n", __func__); return 0; } static int bq24260_charger_resume(struct i2c_client *client) { if (!bq24260_hal_chg_resume(client)) dev_err(&client->dev, "%s: Failed to Resume Charger\n", __func__); return 0; } static void bq24260_charger_shutdown(struct i2c_client *client) { #if defined(CONFIG_CHARGER_BQ24260) bq24260_hal_chg_shutdown(client); #endif } static const struct i2c_device_id bq24260_charger_id[] = { {"bq24260", 0}, {} }; MODULE_DEVICE_TABLE(i2c, bq24260_charger_id); static struct of_device_id bq24260_i2c_match_table[] = { { .compatible = "bq24260,i2c", }, { }, }; MODULE_DEVICE_TABLE(i2c, bq24260_i2c_match_table); static struct i2c_driver bq24260_charger_driver = { .driver = { .name = "bq24260", .owner = THIS_MODULE, .of_match_table = bq24260_i2c_match_table, }, .probe = bq24260_charger_probe, .remove = __devexit_p(bq24260_charger_remove), .suspend = bq24260_charger_suspend, .resume = bq24260_charger_resume, .shutdown = bq24260_charger_shutdown, .id_table = bq24260_charger_id, }; static int __init bq24260_charger_init(void) { return i2c_add_driver(&bq24260_charger_driver); } static void __exit bq24260_charger_exit(void) { i2c_del_driver(&bq24260_charger_driver); } module_init(bq24260_charger_init); module_exit(bq24260_charger_exit); MODULE_DESCRIPTION("Samsung bq24260 Charger Driver"); MODULE_AUTHOR("Samsung Electronics"); MODULE_LICENSE("GPL");
gpl-2.0
oskarpearson/linux
drivers/iommu/irq_remapping.c
185
4228
#include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/msi.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/irqdomain.h> #include <asm/hw_irq.h> #include <asm/irq_remapping.h> #include <asm/processor.h> #include <asm/x86_init.h> #include <asm/apic.h> #include <asm/hpet.h> #include "irq_remapping.h" int irq_remapping_enabled; int irq_remap_broken; int disable_sourceid_checking; int no_x2apic_optout; int disable_irq_post = 1; static int disable_irq_remap; static struct irq_remap_ops *remap_ops; static void irq_remapping_disable_io_apic(void) { /* * With interrupt-remapping, for now we will use virtual wire A * mode, as virtual wire B is little complex (need to configure * both IOAPIC RTE as well as interrupt-remapping table entry). * As this gets called during crash dump, keep this simple for * now. */ if (cpu_has_apic || apic_from_smp_config()) disconnect_bsp_APIC(0); } static void __init irq_remapping_modify_x86_ops(void) { x86_io_apic_ops.disable = irq_remapping_disable_io_apic; } static __init int setup_nointremap(char *str) { disable_irq_remap = 1; return 0; } early_param("nointremap", setup_nointremap); static __init int setup_irqremap(char *str) { if (!str) return -EINVAL; while (*str) { if (!strncmp(str, "on", 2)) disable_irq_remap = 0; else if (!strncmp(str, "off", 3)) disable_irq_remap = 1; else if (!strncmp(str, "nosid", 5)) disable_sourceid_checking = 1; else if (!strncmp(str, "no_x2apic_optout", 16)) no_x2apic_optout = 1; str += strcspn(str, ","); while (*str == ',') str++; } return 0; } early_param("intremap", setup_irqremap); void set_irq_remapping_broken(void) { irq_remap_broken = 1; } bool irq_remapping_cap(enum irq_remap_cap cap) { if (!remap_ops || disable_irq_post) return false; return (remap_ops->capability & (1 << cap)); } EXPORT_SYMBOL_GPL(irq_remapping_cap); int __init irq_remapping_prepare(void) { if (disable_irq_remap) return -ENOSYS; if (intel_irq_remap_ops.prepare() == 0) remap_ops = &intel_irq_remap_ops; else if (IS_ENABLED(CONFIG_AMD_IOMMU) && amd_iommu_irq_ops.prepare() == 0) remap_ops = &amd_iommu_irq_ops; else return -ENOSYS; return 0; } int __init irq_remapping_enable(void) { int ret; if (!remap_ops->enable) return -ENODEV; ret = remap_ops->enable(); if (irq_remapping_enabled) irq_remapping_modify_x86_ops(); return ret; } void irq_remapping_disable(void) { if (irq_remapping_enabled && remap_ops->disable) remap_ops->disable(); } int irq_remapping_reenable(int mode) { if (irq_remapping_enabled && remap_ops->reenable) return remap_ops->reenable(mode); return 0; } int __init irq_remap_enable_fault_handling(void) { if (!irq_remapping_enabled) return 0; if (!remap_ops->enable_faulting) return -ENODEV; return remap_ops->enable_faulting(); } void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) panic(msg); } void ir_ack_apic_edge(struct irq_data *data) { ack_APIC_irq(); } /** * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU * device serving request @info * @info: interrupt allocation information, used to identify the IOMMU device * * It's used to get parent irqdomain for HPET and IOAPIC irqdomains. * Returns pointer to IRQ domain, or NULL on failure. */ struct irq_domain * irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) { if (!remap_ops || !remap_ops->get_ir_irq_domain) return NULL; return remap_ops->get_ir_irq_domain(info); } /** * irq_remapping_get_irq_domain - Get the irqdomain serving the request @info * @info: interrupt allocation information, used to identify the IOMMU device * * There will be one PCI MSI/MSIX irqdomain associated with each interrupt * remapping device, so this interface is used to retrieve the PCI MSI/MSIX * irqdomain serving request @info. * Returns pointer to IRQ domain, or NULL on failure. */ struct irq_domain * irq_remapping_get_irq_domain(struct irq_alloc_info *info) { if (!remap_ops || !remap_ops->get_irq_domain) return NULL; return remap_ops->get_irq_domain(info); }
gpl-2.0
droidzone/Supernova-Kernel
drivers/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
953
1928
/* videomate-tv-pvr.h - Keytable for videomate_tv_pvr Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> static struct ir_scancode videomate_tv_pvr[] = { { 0x14, KEY_MUTE }, { 0x24, KEY_ZOOM }, { 0x01, KEY_DVD }, { 0x23, KEY_RADIO }, { 0x00, KEY_TV }, { 0x0a, KEY_REWIND }, { 0x08, KEY_PLAYPAUSE }, { 0x0f, KEY_FORWARD }, { 0x02, KEY_PREVIOUS }, { 0x07, KEY_STOP }, { 0x06, KEY_NEXT }, { 0x0c, KEY_UP }, { 0x0e, KEY_DOWN }, { 0x0b, KEY_LEFT }, { 0x0d, KEY_RIGHT }, { 0x11, KEY_OK }, { 0x03, KEY_MENU }, { 0x09, KEY_SETUP }, { 0x05, KEY_VIDEO }, { 0x22, KEY_CHANNEL }, { 0x12, KEY_VOLUMEUP }, { 0x15, KEY_VOLUMEDOWN }, { 0x10, KEY_CHANNELUP }, { 0x13, KEY_CHANNELDOWN }, { 0x04, KEY_RECORD }, { 0x16, KEY_1 }, { 0x17, KEY_2 }, { 0x18, KEY_3 }, { 0x19, KEY_4 }, { 0x1a, KEY_5 }, { 0x1b, KEY_6 }, { 0x1c, KEY_7 }, { 0x1d, KEY_8 }, { 0x1e, KEY_9 }, { 0x1f, KEY_0 }, { 0x20, KEY_LANGUAGE }, { 0x21, KEY_SLEEP }, }; static struct rc_keymap videomate_tv_pvr_map = { .map = { .scan = videomate_tv_pvr, .size = ARRAY_SIZE(videomate_tv_pvr), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_VIDEOMATE_TV_PVR, } }; static int __init init_rc_map_videomate_tv_pvr(void) { return ir_register_map(&videomate_tv_pvr_map); } static void __exit exit_rc_map_videomate_tv_pvr(void) { ir_unregister_map(&videomate_tv_pvr_map); } module_init(init_rc_map_videomate_tv_pvr) module_exit(exit_rc_map_videomate_tv_pvr) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
xingrz/android_kernel_pifoundation_bcm2710
drivers/misc/mic/scif/scif_rb.c
953
7286
/* * Intel MIC Platform Software Stack (MPSS) * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Intel SCIF driver. * */ #include <linux/circ_buf.h> #include <linux/types.h> #include <linux/io.h> #include <linux/errno.h> #include "scif_rb.h" #define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size) #define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size) /** * scif_rb_init - Initializes the ring buffer * @rb: ring buffer * @read_ptr: A pointer to the read offset * @write_ptr: A pointer to the write offset * @rb_base: A pointer to the base of the ring buffer * @size: The size of the ring buffer in powers of two */ void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr, void *rb_base, u8 size) { rb->rb_base = rb_base; rb->size = (1 << size); rb->read_ptr = read_ptr; rb->write_ptr = write_ptr; rb->current_read_offset = *read_ptr; rb->current_write_offset = *write_ptr; } /* Copies a message to the ring buffer -- handles the wrap around case */ static void memcpy_torb(struct scif_rb *rb, void *header, void *msg, u32 size) { u32 size1, size2; if (header + size >= rb->rb_base + rb->size) { /* Need to call two copies if it wraps around */ size1 = (u32)(rb->rb_base + rb->size - header); size2 = size - size1; memcpy_toio((void __iomem __force *)header, msg, size1); memcpy_toio((void __iomem __force *)rb->rb_base, msg + size1, size2); } else { memcpy_toio((void __iomem __force *)header, msg, size); } } /* Copies a message from the ring buffer -- handles the wrap around case */ static void memcpy_fromrb(struct scif_rb *rb, void *header, void *msg, u32 size) { u32 size1, size2; if (header + size >= rb->rb_base + rb->size) { /* Need to call two copies if it wraps around */ size1 = (u32)(rb->rb_base + rb->size - header); size2 = size - size1; memcpy_fromio(msg, (void __iomem __force *)header, size1); memcpy_fromio(msg + size1, (void __iomem __force *)rb->rb_base, size2); } else { memcpy_fromio(msg, (void __iomem __force *)header, size); } } /** * scif_rb_space - Query space available for writing to the RB * @rb: ring buffer * * Return: size available for writing to RB in bytes. */ u32 scif_rb_space(struct scif_rb *rb) { rb->current_read_offset = *rb->read_ptr; /* * Update from the HW read pointer only once the peer has exposed the * new empty slot. This barrier is paired with the memory barrier * scif_rb_update_read_ptr() */ mb(); return scif_rb_ring_space(rb->current_write_offset, rb->current_read_offset, rb->size); } /** * scif_rb_write - Write a message to the RB * @rb: ring buffer * @msg: buffer to send the message. Must be at least size bytes long * @size: the size (in bytes) to be copied to the RB * * This API does not block if there isn't enough space in the RB. * Returns: 0 on success or -ENOMEM on failure */ int scif_rb_write(struct scif_rb *rb, void *msg, u32 size) { void *header; if (scif_rb_space(rb) < size) return -ENOMEM; header = rb->rb_base + rb->current_write_offset; memcpy_torb(rb, header, msg, size); /* * Wait until scif_rb_commit(). Update the local ring * buffer data, not the shared data until commit. */ rb->current_write_offset = (rb->current_write_offset + size) & (rb->size - 1); return 0; } /** * scif_rb_commit - To submit the message to let the peer fetch it * @rb: ring buffer */ void scif_rb_commit(struct scif_rb *rb) { /* * We must ensure ordering between the all the data committed * previously before we expose the new message to the peer by * updating the write_ptr. This write barrier is paired with * the read barrier in scif_rb_count(..) */ wmb(); ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si bug: For the case where a Core is performing an EXT_WR * followed by a Doorbell Write, the Core must perform two EXT_WR to the * same address with the same data before it does the Doorbell Write. * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; #endif } /** * scif_rb_get - To get next message from the ring buffer * @rb: ring buffer * @size: Number of bytes to be read * * Return: NULL if no bytes to be read from the ring buffer, otherwise the * pointer to the next byte */ static void *scif_rb_get(struct scif_rb *rb, u32 size) { void *header = NULL; if (scif_rb_count(rb, size) >= size) header = rb->rb_base + rb->current_read_offset; return header; } /* * scif_rb_get_next - Read from ring buffer. * @rb: ring buffer * @msg: buffer to hold the message. Must be at least size bytes long * @size: Number of bytes to be read * * Return: number of bytes read if available bytes are >= size, otherwise * returns zero. */ u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size) { void *header = NULL; int read_size = 0; header = scif_rb_get(rb, size); if (header) { u32 next_cmd_offset = (rb->current_read_offset + size) & (rb->size - 1); read_size = size; rb->current_read_offset = next_cmd_offset; memcpy_fromrb(rb, header, msg, size); } return read_size; } /** * scif_rb_update_read_ptr * @rb: ring buffer */ void scif_rb_update_read_ptr(struct scif_rb *rb) { u32 new_offset; new_offset = rb->current_read_offset; /* * We must ensure ordering between the all the data committed or read * previously before we expose the empty slot to the peer by updating * the read_ptr. This barrier is paired with the memory barrier in * scif_rb_space(..) */ mb(); ACCESS_ONCE(*rb->read_ptr) = new_offset; #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si Bug: For the case where a Core is performing an EXT_WR * followed by a Doorbell Write, the Core must perform two EXT_WR to the * same address with the same data before it does the Doorbell Write. * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ ACCESS_ONCE(*rb->read_ptr) = new_offset; #endif } /** * scif_rb_count * @rb: ring buffer * @size: Number of bytes expected to be read * * Return: number of bytes that can be read from the RB */ u32 scif_rb_count(struct scif_rb *rb, u32 size) { if (scif_rb_ring_cnt(rb->current_write_offset, rb->current_read_offset, rb->size) < size) { rb->current_write_offset = *rb->write_ptr; /* * Update from the HW write pointer if empty only once the peer * has exposed the new message. This read barrier is paired * with the write barrier in scif_rb_commit(..) */ smp_rmb(); } return scif_rb_ring_cnt(rb->current_write_offset, rb->current_read_offset, rb->size); }
gpl-2.0
shianyow/kernel-android-skyrocket
drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
953
1751
/* pinnacle-pctv-hd.h - Keytable for pinnacle_pctv_hd Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* Pinnacle PCTV HD 800i mini remote */ static struct ir_scancode pinnacle_pctv_hd[] = { { 0x0f, KEY_1 }, { 0x15, KEY_2 }, { 0x10, KEY_3 }, { 0x18, KEY_4 }, { 0x1b, KEY_5 }, { 0x1e, KEY_6 }, { 0x11, KEY_7 }, { 0x21, KEY_8 }, { 0x12, KEY_9 }, { 0x27, KEY_0 }, { 0x24, KEY_ZOOM }, { 0x2a, KEY_SUBTITLE }, { 0x00, KEY_MUTE }, { 0x01, KEY_ENTER }, /* Pinnacle Logo */ { 0x39, KEY_POWER }, { 0x03, KEY_VOLUMEUP }, { 0x09, KEY_VOLUMEDOWN }, { 0x06, KEY_CHANNELUP }, { 0x0c, KEY_CHANNELDOWN }, { 0x2d, KEY_REWIND }, { 0x30, KEY_PLAYPAUSE }, { 0x33, KEY_FASTFORWARD }, { 0x3c, KEY_STOP }, { 0x36, KEY_RECORD }, { 0x3f, KEY_EPG }, /* Labeled "?" */ }; static struct rc_keymap pinnacle_pctv_hd_map = { .map = { .scan = pinnacle_pctv_hd, .size = ARRAY_SIZE(pinnacle_pctv_hd), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PINNACLE_PCTV_HD, } }; static int __init init_rc_map_pinnacle_pctv_hd(void) { return ir_register_map(&pinnacle_pctv_hd_map); } static void __exit exit_rc_map_pinnacle_pctv_hd(void) { ir_unregister_map(&pinnacle_pctv_hd_map); } module_init(init_rc_map_pinnacle_pctv_hd) module_exit(exit_rc_map_pinnacle_pctv_hd) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
sivasankariit/linux-rl
lib/digsig.c
2233
5611
/* * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2011 Intel Corporation * * Author: * Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * <dmitry.kasatkin@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * File: sign.c * implements signature (RSA) verification * pkcs decoding is based on LibTomCrypt code */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <keys/user-type.h> #include <linux/mpi.h> #include <linux/digsig.h> static struct crypto_shash *shash; static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, unsigned long msglen, unsigned long modulus_bitlen, unsigned long *outlen) { unsigned long modulus_len, ps_len, i; modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) return NULL; /* separate encoded message */ if (msg[0] != 0x00 || msg[1] != 0x01) return NULL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) break; /* separator check */ if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ return NULL; ps_len = i - 2; *outlen = (msglen - (2 + ps_len + 1)); return msg + 2 + ps_len + 1; } /* * RSA Signature verification with public key */ static int digsig_verify_rsa(struct key *key, const char *sig, int siglen, const char *h, int hlen) { int err = -EINVAL; unsigned long len; unsigned long mlen, mblen; unsigned nret, l; int head, i; unsigned char *out1 = NULL; const char *m; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap, *endp; struct user_key_payload *ukp; struct pubkey_hdr *pkh; down_read(&key->sem); ukp = key->payload.data; if (ukp->datalen < sizeof(*pkh)) goto err1; pkh = (struct pubkey_hdr *)ukp->data; if (pkh->version != 1) goto err1; if (pkh->algo != PUBKEY_ALGO_RSA) goto err1; if (pkh->nmpi != 2) goto err1; datap = pkh->mpi; endp = ukp->data + ukp->datalen; err = -ENOMEM; for (i = 0; i < pkh->nmpi; i++) { unsigned int remaining = endp - datap; pkey[i] = mpi_read_from_buffer(datap, &remaining); if (!pkey[i]) goto err; datap += remaining; } mblen = mpi_get_nbits(pkey[0]); mlen = DIV_ROUND_UP(mblen, 8); if (mlen == 0) goto err; out1 = kzalloc(mlen, GFP_KERNEL); if (!out1) goto err; nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (!in) goto err; res = mpi_alloc(mpi_get_nlimbs(in) * 2); if (!res) goto err; err = mpi_powm(res, in, pkey[1], pkey[0]); if (err) goto err; if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) { err = -EINVAL; goto err; } p = mpi_get_buffer(res, &l, NULL); if (!p) { err = -EINVAL; goto err; } len = mlen; head = len - l; memset(out1, 0, head); memcpy(out1 + head, p, l); kfree(p); m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); if (!m || len != hlen || memcmp(m, h, hlen)) err = -EINVAL; err: mpi_free(in); mpi_free(res); kfree(out1); while (--i >= 0) mpi_free(pkey[i]); err1: up_read(&key->sem); return err; } /** * digsig_verify() - digital signature verification with public key * @keyring: keyring to search key in * @sig: digital signature * @sigen: length of the signature * @data: data * @datalen: length of the data * @return: 0 on success, -EINVAL otherwise * * Verifies data integrity against digital signature. * Currently only RSA is supported. * Normally hash of the content is used as a data for this function. * */ int digsig_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen) { int err = -ENOMEM; struct signature_hdr *sh = (struct signature_hdr *)sig; struct shash_desc *desc = NULL; unsigned char hash[SHA1_DIGEST_SIZE]; struct key *key; char name[20]; if (siglen < sizeof(*sh) + 2) return -EINVAL; if (sh->algo != PUBKEY_ALGO_RSA) return -ENOTSUPP; sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); if (keyring) { /* search in specific keyring */ key_ref_t kref; kref = keyring_search(make_key_ref(keyring, 1UL), &key_type_user, name); if (IS_ERR(kref)) key = ERR_PTR(PTR_ERR(kref)); else key = key_ref_to_ptr(kref); } else { key = request_key(&key_type_user, name, NULL); } if (IS_ERR(key)) { pr_err("key not found, id: %s\n", name); return PTR_ERR(key); } desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), GFP_KERNEL); if (!desc) goto err; desc->tfm = shash; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; crypto_shash_init(desc); crypto_shash_update(desc, data, datalen); crypto_shash_update(desc, sig, sizeof(*sh)); crypto_shash_final(desc, hash); kfree(desc); /* pass signature mpis address */ err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), hash, sizeof(hash)); err: key_put(key); return err ? -EINVAL : 0; } EXPORT_SYMBOL_GPL(digsig_verify); static int __init digsig_init(void) { shash = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(shash)) { pr_err("shash allocation failed\n"); return PTR_ERR(shash); } return 0; } static void __exit digsig_cleanup(void) { crypto_free_shash(shash); } module_init(digsig_init); module_exit(digsig_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
dmeadows013/furry-hipster
kernel/time/clockevents.c
2745
8661
/* * linux/kernel/time/clockevents.c * * This file contains functions which manage clock event devices. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/clockchips.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/sysdev.h> #include "tick-internal.h" /* The registered clock event devices */ static LIST_HEAD(clockevent_devices); static LIST_HEAD(clockevents_released); /* Notification for clock events */ static RAW_NOTIFIER_HEAD(clockevents_chain); /* Protection for the above */ static DEFINE_RAW_SPINLOCK(clockevents_lock); /** * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds * @latch: value to convert * @evt: pointer to clock event device descriptor * * Math helper, returns latch value converted to nanoseconds (bound checked) */ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) { u64 clc = (u64) latch << evt->shift; if (unlikely(!evt->mult)) { evt->mult = 1; WARN_ON(1); } do_div(clc, evt->mult); if (clc < 1000) clc = 1000; if (clc > KTIME_MAX) clc = KTIME_MAX; return clc; } EXPORT_SYMBOL_GPL(clockevent_delta2ns); /** * clockevents_set_mode - set the operating mode of a clock event device * @dev: device to modify * @mode: new mode * * Must be called with interrupts disabled ! */ void clockevents_set_mode(struct clock_event_device *dev, enum clock_event_mode mode) { if (dev->mode != mode) { dev->set_mode(mode, dev); dev->mode = mode; /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ if (mode == CLOCK_EVT_MODE_ONESHOT) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); } } } } /** * clockevents_shutdown - shutdown the device and clear next_event * @dev: device to shutdown */ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); dev->next_event.tv64 = KTIME_MAX; } /** * clockevents_program_event - Reprogram the clock event device. * @expires: absolute expiry time (monotonic clock) * * Returns 0 on success, -ETIME when the event is in the past. */ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, ktime_t now) { unsigned long long clc; int64_t delta; if (unlikely(expires.tv64 < 0)) { WARN_ON_ONCE(1); return -ETIME; } delta = ktime_to_ns(ktime_sub(expires, now)); if (delta <= 0) return -ETIME; dev->next_event = expires; if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) return 0; if (delta > dev->max_delta_ns) delta = dev->max_delta_ns; if (delta < dev->min_delta_ns) delta = dev->min_delta_ns; clc = delta * dev->mult; clc >>= dev->shift; return dev->set_next_event((unsigned long) clc, dev); } /** * clockevents_register_notifier - register a clock events change listener */ int clockevents_register_notifier(struct notifier_block *nb) { unsigned long flags; int ret; raw_spin_lock_irqsave(&clockevents_lock, flags); ret = raw_notifier_chain_register(&clockevents_chain, nb); raw_spin_unlock_irqrestore(&clockevents_lock, flags); return ret; } /* * Notify about a clock event change. Called with clockevents_lock * held. */ static void clockevents_do_notify(unsigned long reason, void *dev) { raw_notifier_call_chain(&clockevents_chain, reason, dev); } /* * Called after a notify add to make devices available which were * released from the notifier call. */ static void clockevents_notify_released(void) { struct clock_event_device *dev; while (!list_empty(&clockevents_released)) { dev = list_entry(clockevents_released.next, struct clock_event_device, list); list_del(&dev->list); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); } } /** * clockevents_register_device - register a clock event device * @dev: device to register */ void clockevents_register_device(struct clock_event_device *dev) { unsigned long flags; BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); if (!dev->cpumask) { WARN_ON(num_possible_cpus() > 1); dev->cpumask = cpumask_of(smp_processor_id()); } raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_notify_released(); raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_register_device); static void clockevents_config(struct clock_event_device *dev, u32 freq) { u64 sec; if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return; /* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values. */ sec = dev->max_delta_ticks; do_div(sec, freq); if (!sec) sec = 1; else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) sec = 600; clockevents_calc_mult_shift(dev, freq, sec); dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); } /** * clockevents_config_and_register - Configure and register a clock event device * @dev: device to register * @freq: The clock frequency * @min_delta: The minimum clock ticks to program in oneshot mode * @max_delta: The maximum clock ticks to program in oneshot mode * * min/max_delta can be 0 for devices which do not support oneshot mode. */ void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta) { dev->min_delta_ticks = min_delta; dev->max_delta_ticks = max_delta; clockevents_config(dev, freq); clockevents_register_device(dev); } /** * clockevents_update_freq - Update frequency and reprogram a clock event device. * @dev: device to modify * @freq: new device frequency * * Reconfigure and reprogram a clock event device in oneshot * mode. Must be called on the cpu for which the device delivers per * cpu timer events with interrupts disabled! Returns 0 on success, * -ETIME when the event is in the past. */ int clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return 0; return clockevents_program_event(dev, dev->next_event, ktime_get()); } /* * Noop handler when we shut down an event device */ void clockevents_handle_noop(struct clock_event_device *dev) { } /** * clockevents_exchange_device - release and request clock devices * @old: device to release (can be NULL) * @new: device to request (can be NULL) * * Called from the notifier chain. clockevents_lock is held already */ void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new) { unsigned long flags; local_irq_save(flags); /* * Caller releases a clock event device. We queue it into the * released list and do a notify add later. */ if (old) { clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); } if (new) { BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); clockevents_shutdown(new); } local_irq_restore(flags); } #ifdef CONFIG_GENERIC_CLOCKEVENTS /** * clockevents_notify - notification about relevant events */ void clockevents_notify(unsigned long reason, void *arg) { struct clock_event_device *dev, *tmp; unsigned long flags; int cpu; raw_spin_lock_irqsave(&clockevents_lock, flags); clockevents_do_notify(reason, arg); switch (reason) { case CLOCK_EVT_NOTIFY_CPU_DEAD: /* * Unregister the clock event devices which were * released from the users in the notify chain. */ list_for_each_entry_safe(dev, tmp, &clockevents_released, list) list_del(&dev->list); /* * Now check whether the CPU has left unused per cpu devices */ cpu = *((int *)arg); list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); list_del(&dev->list); } } break; default: break; } raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_notify); #endif
gpl-2.0
OldDroid/android_kernel_samsung_tblte
drivers/scsi/arm/eesox.c
3513
16185
/* * linux/drivers/acorn/scsi/eesox.c * * Copyright (C) 1997-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 01-10-1997 RMK Created, READONLY version * 15-02-1998 RMK READ/WRITE version * added DMA support and hardware definitions * 14-03-1998 RMK Updated DMA support * Added terminator control * 15-04-1998 RMK Only do PIO if FAS216 will allow it. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new * error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/pgtable.h> #include "../scsi.h" #include <scsi/scsi_host.h> #include "fas216.h" #include "scsi.h" #include <scsi/scsicam.h> #define EESOX_FAS216_OFFSET 0x3000 #define EESOX_FAS216_SHIFT 5 #define EESOX_DMASTAT 0x2800 #define EESOX_STAT_INTR 0x01 #define EESOX_STAT_DMA 0x02 #define EESOX_CONTROL 0x2800 #define EESOX_INTR_ENABLE 0x04 #define EESOX_TERM_ENABLE 0x02 #define EESOX_RESET 0x01 #define EESOX_DMADATA 0x3800 #define VERSION "1.10 (17/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct eesoxscsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; void __iomem *ctl_port; unsigned int control; struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; /* Prototype: void eesoxscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control |= EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } /* Prototype: void eesoxscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control &= ~EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } static const expansioncard_ops_t eesoxscsi_ops = { .irqenable = eesoxscsi_irqenable, .irqdisable = eesoxscsi_irqdisable, }; /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) * Purpose : Turn the EESOX SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (on_off) info->control |= EESOX_TERM_ENABLE; else info->control &= ~EESOX_TERM_ENABLE; writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } /* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from EESOX SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t eesoxscsi_intr(int irq, void *dev_id) { struct eesoxscsi_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ; dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; } static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; register const unsigned long mask = 0xffff; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; if (status > length) status = length; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; l2 = readl(reg_dmadata) & mask; l2 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; *(u32 *)buf = l2; buf += 4; length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; length -= 4; continue; } if (status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; void __iomem *reg_dmadata = base + EESOX_DMADATA; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; status = 16 - status; if (status > length) status = length; status &= ~1; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = *(u32 *)buf; buf += 4; l2 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); writel(l2 << 16, reg_dmadata); writel(l2, reg_dmadata); length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); length -= 4; continue; } if (status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t dir, int transfer_size) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (dir == DMA_IN) { eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); } else { eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); } } /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); return string; } /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { buffer += 9; length -= 9; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') eesoxscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') eesoxscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct eesoxscsi_info *info; info = (struct eesoxscsi_info *)host->hostdata; seq_printf(m, "EESOX SCSI driver v%s\n", VERSION); fas216_print_host(&info->info, m); seq_printf(m, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); fas216_print_stats(&info->info, m); fas216_print_devices(&info->info, m); return 0; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); } static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; if (len > 1) { spin_lock_irqsave(host->host_lock, flags); if (buf[0] != '0') { info->control |= EESOX_TERM_ENABLE; } else { info->control &= ~EESOX_TERM_ENABLE; } writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, eesoxscsi_show_term, eesoxscsi_store_term); static struct scsi_host_template eesox_template = { .module = THIS_MODULE, .show_info = eesoxscsi_show_info, .write_info = eesoxscsi_set_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", }; static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void eesoxscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); device_remove_file(&ec->dev, &dev_attr_bus_term); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id eesoxscsi_cids[] = { { MANU_EESOX, PROD_EESOX_SCSI2 }, { 0xffff, 0xffff }, }; static struct ecard_driver eesoxscsi_driver = { .probe = eesoxscsi_probe, .remove = eesoxscsi_remove, .id_table = eesoxscsi_cids, .drv = { .name = "eesoxscsi", }, }; static int __init eesox_init(void) { return ecard_register_driver(&eesoxscsi_driver); } static void __exit eesox_exit(void) { ecard_remove_driver(&eesoxscsi_driver); } module_init(eesox_init); module_exit(eesox_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
gpl-2.0
furiousanger/FuriousKernel
drivers/video/backlight/tdo24m.c
4281
11304
/* * tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels * * Copyright (C) 2008 Marvell International Ltd. * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/tdo24m.h> #include <linux/fb.h> #include <linux/lcd.h> #include <linux/slab.h> #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) #define TDO24M_SPI_BUFF_SIZE (4) #define MODE_QVGA 0 #define MODE_VGA 1 struct tdo24m { struct spi_device *spi_dev; struct lcd_device *lcd_dev; struct spi_message msg; struct spi_transfer xfer; uint8_t *buf; int (*adj_mode)(struct tdo24m *lcd, int mode); int color_invert; int power; int mode; }; /* use bit 30, 31 as the indicator of command parameter number */ #define CMD0(x) ((0 << 30) | (x)) #define CMD1(x, x1) ((1 << 30) | ((x) << 9) | 0x100 | (x1)) #define CMD2(x, x1, x2) ((2 << 30) | ((x) << 18) | 0x20000 |\ ((x1) << 9) | 0x100 | (x2)) #define CMD_NULL (-1) static uint32_t lcd_panel_reset[] = { CMD0(0x1), /* reset */ CMD0(0x0), /* nop */ CMD0(0x0), /* nop */ CMD0(0x0), /* nop */ CMD_NULL, }; static uint32_t lcd_panel_on[] = { CMD0(0x29), /* Display ON */ CMD2(0xB8, 0xFF, 0xF9), /* Output Control */ CMD0(0x11), /* Sleep out */ CMD1(0xB0, 0x16), /* Wake */ CMD_NULL, }; static uint32_t lcd_panel_off[] = { CMD0(0x28), /* Display OFF */ CMD2(0xB8, 0x80, 0x02), /* Output Control */ CMD0(0x10), /* Sleep in */ CMD1(0xB0, 0x00), /* Deep stand by in */ CMD_NULL, }; static uint32_t lcd_vga_pass_through_tdo24m[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x80), CMD1(0xE1, 0x00), CMD1(0x36, 0x50), CMD1(0x3B, 0x00), CMD_NULL, }; static uint32_t lcd_qvga_pass_through_tdo24m[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x81), CMD1(0xE1, 0x00), CMD1(0x36, 0x50), CMD1(0x3B, 0x22), CMD_NULL, }; static uint32_t lcd_vga_transfer_tdo24m[] = { CMD1(0xcf, 0x02), /* Blanking period control (1) */ CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd1, 0x01), /* CKV timing control on/off */ CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */ CMD2(0xd3, 0x1a, 0x0f), /* OEV timing control */ CMD2(0xd4, 0x1f, 0xaf), /* ASW timing control (1) */ CMD1(0xd5, 0x14), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_qvga_transfer[] = { CMD1(0xd6, 0x02), /* Blanking period control (1) */ CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd8, 0x01), /* CKV timing control on/off */ CMD2(0xd9, 0x00, 0x08), /* CKV 1,2 timing control */ CMD2(0xde, 0x05, 0x0a), /* OEV timing control */ CMD2(0xdf, 0x0a, 0x19), /* ASW timing control (1) */ CMD1(0xe0, 0x0a), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_vga_pass_through_tdo35s[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x80), CMD1(0xE1, 0x00), CMD1(0x3B, 0x00), CMD_NULL, }; static uint32_t lcd_qvga_pass_through_tdo35s[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x81), CMD1(0xE1, 0x00), CMD1(0x3B, 0x22), CMD_NULL, }; static uint32_t lcd_vga_transfer_tdo35s[] = { CMD1(0xcf, 0x02), /* Blanking period control (1) */ CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd1, 0x01), /* CKV timing control on/off */ CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */ CMD2(0xd3, 0x14, 0x28), /* OEV timing control */ CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */ CMD1(0xd5, 0x28), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_panel_config[] = { CMD2(0xb8, 0xff, 0xf9), /* Output control */ CMD0(0x11), /* sleep out */ CMD1(0xba, 0x01), /* Display mode (1) */ CMD1(0xbb, 0x00), /* Display mode (2) */ CMD1(0x3a, 0x60), /* Display mode 18-bit RGB */ CMD1(0xbf, 0x10), /* Drive system change control */ CMD1(0xb1, 0x56), /* Booster operation setup */ CMD1(0xb2, 0x33), /* Booster mode setup */ CMD1(0xb3, 0x11), /* Booster frequency setup */ CMD1(0xb4, 0x02), /* Op amp/system clock */ CMD1(0xb5, 0x35), /* VCS voltage */ CMD1(0xb6, 0x40), /* VCOM voltage */ CMD1(0xb7, 0x03), /* External display signal */ CMD1(0xbd, 0x00), /* ASW slew rate */ CMD1(0xbe, 0x00), /* Dummy data for QuadData operation */ CMD1(0xc0, 0x11), /* Sleep out FR count (A) */ CMD1(0xc1, 0x11), /* Sleep out FR count (B) */ CMD1(0xc2, 0x11), /* Sleep out FR count (C) */ CMD2(0xc3, 0x20, 0x40), /* Sleep out FR count (D) */ CMD2(0xc4, 0x60, 0xc0), /* Sleep out FR count (E) */ CMD2(0xc5, 0x10, 0x20), /* Sleep out FR count (F) */ CMD1(0xc6, 0xc0), /* Sleep out FR count (G) */ CMD2(0xc7, 0x33, 0x43), /* Gamma 1 fine tuning (1) */ CMD1(0xc8, 0x44), /* Gamma 1 fine tuning (2) */ CMD1(0xc9, 0x33), /* Gamma 1 inclination adjustment */ CMD1(0xca, 0x00), /* Gamma 1 blue offset adjustment */ CMD2(0xec, 0x01, 0xf0), /* Horizontal clock cycles */ CMD_NULL, }; static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array) { struct spi_transfer *x = &lcd->xfer; uint32_t data, *p = array; int nparams, err = 0; for (; *p != CMD_NULL; p++) { if (!lcd->color_invert && *p == CMD0(0x21)) continue; nparams = (*p >> 30) & 0x3; data = *p << (7 - nparams); switch (nparams) { case 0: lcd->buf[0] = (data >> 8) & 0xff; lcd->buf[1] = data & 0xff; break; case 1: lcd->buf[0] = (data >> 16) & 0xff; lcd->buf[1] = (data >> 8) & 0xff; lcd->buf[2] = data & 0xff; break; case 2: lcd->buf[0] = (data >> 24) & 0xff; lcd->buf[1] = (data >> 16) & 0xff; lcd->buf[2] = (data >> 8) & 0xff; lcd->buf[3] = data & 0xff; break; default: continue; } x->len = nparams + 2; err = spi_sync(lcd->spi_dev, &lcd->msg); if (err) break; } return err; } static int tdo24m_adj_mode(struct tdo24m *lcd, int mode) { switch (mode) { case MODE_VGA: tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_vga_transfer_tdo24m); break; case MODE_QVGA: tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_qvga_transfer); break; default: return -EINVAL; } lcd->mode = mode; return 0; } static int tdo35s_adj_mode(struct tdo24m *lcd, int mode) { switch (mode) { case MODE_VGA: tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_vga_transfer_tdo35s); break; case MODE_QVGA: tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_qvga_transfer); break; default: return -EINVAL; } lcd->mode = mode; return 0; } static int tdo24m_power_on(struct tdo24m *lcd) { int err; err = tdo24m_writes(lcd, lcd_panel_on); if (err) goto out; err = tdo24m_writes(lcd, lcd_panel_reset); if (err) goto out; err = lcd->adj_mode(lcd, lcd->mode); out: return err; } static int tdo24m_power_off(struct tdo24m *lcd) { return tdo24m_writes(lcd, lcd_panel_off); } static int tdo24m_power(struct tdo24m *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = tdo24m_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = tdo24m_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int tdo24m_set_power(struct lcd_device *ld, int power) { struct tdo24m *lcd = lcd_get_data(ld); return tdo24m_power(lcd, power); } static int tdo24m_get_power(struct lcd_device *ld) { struct tdo24m *lcd = lcd_get_data(ld); return lcd->power; } static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m) { struct tdo24m *lcd = lcd_get_data(ld); int mode = MODE_QVGA; if (m->xres == 640 || m->xres == 480) mode = MODE_VGA; if (lcd->mode == mode) return 0; return lcd->adj_mode(lcd, mode); } static struct lcd_ops tdo24m_ops = { .get_power = tdo24m_get_power, .set_power = tdo24m_set_power, .set_mode = tdo24m_set_mode, }; static int __devinit tdo24m_probe(struct spi_device *spi) { struct tdo24m *lcd; struct spi_message *m; struct spi_transfer *x; struct tdo24m_platform_data *pdata; enum tdo24m_model model; int err; pdata = spi->dev.platform_data; if (pdata) model = pdata->model; else model = TDO24M; spi->bits_per_word = 8; spi->mode = SPI_MODE_3; err = spi_setup(spi); if (err) return err; lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL); if (!lcd) return -ENOMEM; lcd->spi_dev = spi; lcd->power = FB_BLANK_POWERDOWN; lcd->mode = MODE_VGA; /* default to VGA */ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); if (lcd->buf == NULL) { kfree(lcd); return -ENOMEM; } m = &lcd->msg; x = &lcd->xfer; spi_message_init(m); x->cs_change = 1; x->tx_buf = &lcd->buf[0]; spi_message_add_tail(x, m); switch (model) { case TDO24M: lcd->color_invert = 1; lcd->adj_mode = tdo24m_adj_mode; break; case TDO35S: lcd->adj_mode = tdo35s_adj_mode; lcd->color_invert = 0; break; default: dev_err(&spi->dev, "Unsupported model"); goto out_free; } lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, lcd, &tdo24m_ops); if (IS_ERR(lcd->lcd_dev)) { err = PTR_ERR(lcd->lcd_dev); goto out_free; } dev_set_drvdata(&spi->dev, lcd); err = tdo24m_power(lcd, FB_BLANK_UNBLANK); if (err) goto out_unregister; return 0; out_unregister: lcd_device_unregister(lcd->lcd_dev); out_free: kfree(lcd->buf); kfree(lcd); return err; } static int __devexit tdo24m_remove(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); tdo24m_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->lcd_dev); kfree(lcd->buf); kfree(lcd); return 0; } #ifdef CONFIG_PM static int tdo24m_suspend(struct spi_device *spi, pm_message_t state) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); return tdo24m_power(lcd, FB_BLANK_POWERDOWN); } static int tdo24m_resume(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); return tdo24m_power(lcd, FB_BLANK_UNBLANK); } #else #define tdo24m_suspend NULL #define tdo24m_resume NULL #endif /* Power down all displays on reboot, poweroff or halt */ static void tdo24m_shutdown(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); tdo24m_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver tdo24m_driver = { .driver = { .name = "tdo24m", .owner = THIS_MODULE, }, .probe = tdo24m_probe, .remove = __devexit_p(tdo24m_remove), .shutdown = tdo24m_shutdown, .suspend = tdo24m_suspend, .resume = tdo24m_resume, }; static int __init tdo24m_init(void) { return spi_register_driver(&tdo24m_driver); } module_init(tdo24m_init); static void __exit tdo24m_exit(void) { spi_unregister_driver(&tdo24m_driver); } module_exit(tdo24m_exit); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:tdo24m");
gpl-2.0
ikotpk/android_kernel_samsung_vastoskt
drivers/base/driver.c
4281
6998
/* * driver.c - centralized device driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "base.h" static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_driver(n); dev = dev_prv->device; } return dev; } /** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. * @start: Device to begin with * @data: Data to pass to the callback. * @fn: Function to call for each device. * * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, void *data, int (*fn)(struct device *, void *)) { struct klist_iter i; struct device *dev; int error = 0; if (!drv) return -EINVAL; klist_iter_init_node(&drv->p->klist_devices, &i, start ? &start->p->knode_driver : NULL); while ((dev = next_device(&i)) && !error) error = fn(dev, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(driver_for_each_device); /** * driver_find_device - device iterator for locating a particular device. * @drv: The device's driver * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the driver_for_each_device() function above, but * it returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *driver_find_device(struct device_driver *drv, struct device *start, void *data, int (*match)(struct device *dev, void *data)) { struct klist_iter i; struct device *dev; if (!drv) return NULL; klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); while ((dev = next_device(&i))) if (match(dev, data) && get_device(dev)) break; klist_iter_exit(&i); return dev; } EXPORT_SYMBOL_GPL(driver_find_device); /** * driver_create_file - create sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ int driver_create_file(struct device_driver *drv, const struct driver_attribute *attr) { int error; if (drv) error = sysfs_create_file(&drv->p->kobj, &attr->attr); else error = -EINVAL; return error; } EXPORT_SYMBOL_GPL(driver_create_file); /** * driver_remove_file - remove sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ void driver_remove_file(struct device_driver *drv, const struct driver_attribute *attr) { if (drv) sysfs_remove_file(&drv->p->kobj, &attr->attr); } EXPORT_SYMBOL_GPL(driver_remove_file); /** * driver_add_kobj - add a kobject below the specified driver * @drv: requesting device driver * @kobj: kobject to add below this driver * @fmt: format string that names the kobject * * You really don't want to do this, this is only here due to one looney * iseries driver, go poke those developers if you are annoyed about * this... */ int driver_add_kobj(struct device_driver *drv, struct kobject *kobj, const char *fmt, ...) { va_list args; char *name; int ret; va_start(args, fmt); name = kvasprintf(GFP_KERNEL, fmt, args); va_end(args); if (!name) return -ENOMEM; ret = kobject_add(kobj, &drv->p->kobj, "%s", name); kfree(name); return ret; } EXPORT_SYMBOL_GPL(driver_add_kobj); /** * get_driver - increment driver reference count. * @drv: driver. */ struct device_driver *get_driver(struct device_driver *drv) { if (drv) { struct driver_private *priv; struct kobject *kobj; kobj = kobject_get(&drv->p->kobj); priv = to_driver(kobj); return priv->driver; } return NULL; } EXPORT_SYMBOL_GPL(get_driver); /** * put_driver - decrement driver's refcount. * @drv: driver. */ void put_driver(struct device_driver *drv) { kobject_put(&drv->p->kobj); } EXPORT_SYMBOL_GPL(put_driver); static int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups) { int error = 0; int i; if (groups) { for (i = 0; groups[i]; i++) { error = sysfs_create_group(&drv->p->kobj, groups[i]); if (error) { while (--i >= 0) sysfs_remove_group(&drv->p->kobj, groups[i]); break; } } } return error; } static void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups) { int i; if (groups) for (i = 0; groups[i]; i++) sysfs_remove_group(&drv->p->kobj, groups[i]); } /** * driver_register - register driver with bus * @drv: driver to register * * We pass off most of the work to the bus_add_driver() call, * since most of the things we have to do deal with the bus * structures. */ int driver_register(struct device_driver *drv) { int ret; struct device_driver *other; BUG_ON(!drv->bus->p); if ((drv->bus->probe && drv->probe) || (drv->bus->remove && drv->remove) || (drv->bus->shutdown && drv->shutdown)) printk(KERN_WARNING "Driver '%s' needs updating - please use " "bus_type methods\n", drv->name); other = driver_find(drv->name, drv->bus); if (other) { put_driver(other); printk(KERN_ERR "Error: Driver '%s' is already registered, " "aborting...\n", drv->name); return -EBUSY; } ret = bus_add_driver(drv); if (ret) return ret; ret = driver_add_groups(drv, drv->groups); if (ret) bus_remove_driver(drv); return ret; } EXPORT_SYMBOL_GPL(driver_register); /** * driver_unregister - remove driver from system. * @drv: driver. * * Again, we pass off most of the work to the bus-level call. */ void driver_unregister(struct device_driver *drv) { if (!drv || !drv->p) { WARN(1, "Unexpected driver unregister!\n"); return; } driver_remove_groups(drv, drv->groups); bus_remove_driver(drv); } EXPORT_SYMBOL_GPL(driver_unregister); /** * driver_find - locate driver on a bus by its name. * @name: name of the driver. * @bus: bus to scan for the driver. * * Call kset_find_obj() to iterate over list of drivers on * a bus to find driver by name. Return driver if found. * * Note that kset_find_obj increments driver's reference count. */ struct device_driver *driver_find(const char *name, struct bus_type *bus) { struct kobject *k = kset_find_obj(bus->p->drivers_kset, name); struct driver_private *priv; if (k) { priv = to_driver(k); return priv->driver; } return NULL; } EXPORT_SYMBOL_GPL(driver_find);
gpl-2.0
tako0910/m7GPE
arch/powerpc/platforms/85xx/p1023_rds.c
4537
2915
/* * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Author: Roy Zang <tie-fei.zang@freescale.com> * * Description: * P1023 RDS Board Setup * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include "smp.h" #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" /* ************************************************************************ * * Setup the architecture * */ static void __init mpc85xx_rds_setup_arch(void) { struct device_node *np; if (ppc_md.progress) ppc_md.progress("p1023_rds_setup_arch()", 0); /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (np != NULL) { static u8 __iomem *bcsr_regs; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) { printk(KERN_ERR "BCSR: Failed to map bcsr register space\n"); return; } else { #define BCSR15_I2C_BUS0_SEG_CLR 0x07 #define BCSR15_I2C_BUS0_SEG2 0x02 /* * Note: Accessing exclusively i2c devices. * * The i2c controller selects initially ID EEPROM in the u-boot; * but if menu configuration selects RTC support in the kernel, * the i2c controller switches to select RTC chip in the kernel. */ #ifdef CONFIG_RTC_CLASS /* Enable RTC chip on the segment #2 of i2c */ clrbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG_CLR); setbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG2); #endif iounmap(bcsr_regs); } } #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,p1023-pcie") fsl_add_bridge(np, 0); #endif mpc85xx_smp_init(); } machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices); static void __init mpc85xx_rds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } static int __init p1023_rds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1023RDS"); } define_machine(p1023_rds) { .name = "P1023 RDS", .probe = p1023_rds_probe, .setup_arch = mpc85xx_rds_setup_arch, .init_IRQ = mpc85xx_rds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
papjul/android_kernel_xiaomi_aries
arch/arm/mach-s3c64xx/pm.c
4793
8783
/* linux/arch/arm/plat-s3c64xx/pm.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX CPU PM support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/serial_core.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/pm_domain.h> #include <mach/map.h> #include <mach/irqs.h> #include <plat/devs.h> #include <plat/pm.h> #include <plat/wakeup-mask.h> #include <mach/regs-sys.h> #include <mach/regs-gpio.h> #include <mach/regs-clock.h> #include <mach/regs-syscon-power.h> #include <mach/regs-gpio-memport.h> #include <mach/regs-modem.h> struct s3c64xx_pm_domain { char *const name; u32 ena; u32 pwr_stat; struct generic_pm_domain pd; }; static int s3c64xx_pd_off(struct generic_pm_domain *domain) { struct s3c64xx_pm_domain *pd; u32 val; pd = container_of(domain, struct s3c64xx_pm_domain, pd); val = __raw_readl(S3C64XX_NORMAL_CFG); val &= ~(pd->ena); __raw_writel(val, S3C64XX_NORMAL_CFG); return 0; } static int s3c64xx_pd_on(struct generic_pm_domain *domain) { struct s3c64xx_pm_domain *pd; u32 val; long retry = 1000000L; pd = container_of(domain, struct s3c64xx_pm_domain, pd); val = __raw_readl(S3C64XX_NORMAL_CFG); val |= pd->ena; __raw_writel(val, S3C64XX_NORMAL_CFG); /* Not all domains provide power status readback */ if (pd->pwr_stat) { do { cpu_relax(); if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat) break; } while (retry--); if (!retry) { pr_err("Failed to start domain %s\n", pd->name); return -EBUSY; } } return 0; } static struct s3c64xx_pm_domain s3c64xx_pm_irom = { .name = "IROM", .ena = S3C64XX_NORMALCFG_IROM_ON, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_etm = { .name = "ETM", .ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_ETM, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_s = { .name = "S", .ena = S3C64XX_NORMALCFG_DOMAIN_S_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_S, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_f = { .name = "F", .ena = S3C64XX_NORMALCFG_DOMAIN_F_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_F, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_p = { .name = "P", .ena = S3C64XX_NORMALCFG_DOMAIN_P_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_P, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_i = { .name = "I", .ena = S3C64XX_NORMALCFG_DOMAIN_I_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_I, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_g = { .name = "G", .ena = S3C64XX_NORMALCFG_DOMAIN_G_ON, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain s3c64xx_pm_v = { .name = "V", .ena = S3C64XX_NORMALCFG_DOMAIN_V_ON, .pwr_stat = S3C64XX_BLKPWRSTAT_V, .pd = { .power_off = s3c64xx_pd_off, .power_on = s3c64xx_pd_on, }, }; static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = { &s3c64xx_pm_irom, }; static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = { &s3c64xx_pm_etm, &s3c64xx_pm_g, &s3c64xx_pm_v, &s3c64xx_pm_i, &s3c64xx_pm_p, &s3c64xx_pm_s, &s3c64xx_pm_f, }; #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK void s3c_pm_debug_smdkled(u32 set, u32 clear) { unsigned long flags; int i; local_irq_save(flags); for (i = 0; i < 4; i++) { if (clear & (1 << i)) gpio_set_value(S3C64XX_GPN(12 + i), 0); if (set & (1 << i)) gpio_set_value(S3C64XX_GPN(12 + i), 1); } local_irq_restore(flags); } #endif static struct sleep_save core_save[] = { SAVE_ITEM(S3C_APLL_LOCK), SAVE_ITEM(S3C_MPLL_LOCK), SAVE_ITEM(S3C_EPLL_LOCK), SAVE_ITEM(S3C_CLK_SRC), SAVE_ITEM(S3C_CLK_DIV0), SAVE_ITEM(S3C_CLK_DIV1), SAVE_ITEM(S3C_CLK_DIV2), SAVE_ITEM(S3C_CLK_OUT), SAVE_ITEM(S3C_HCLK_GATE), SAVE_ITEM(S3C_PCLK_GATE), SAVE_ITEM(S3C_SCLK_GATE), SAVE_ITEM(S3C_MEM0_GATE), SAVE_ITEM(S3C_EPLL_CON1), SAVE_ITEM(S3C_EPLL_CON0), SAVE_ITEM(S3C64XX_MEM0DRVCON), SAVE_ITEM(S3C64XX_MEM1DRVCON), #ifndef CONFIG_CPU_FREQ SAVE_ITEM(S3C_APLL_CON), SAVE_ITEM(S3C_MPLL_CON), #endif }; static struct sleep_save misc_save[] = { SAVE_ITEM(S3C64XX_AHB_CON0), SAVE_ITEM(S3C64XX_AHB_CON1), SAVE_ITEM(S3C64XX_AHB_CON2), SAVE_ITEM(S3C64XX_SPCON), SAVE_ITEM(S3C64XX_MEM0CONSTOP), SAVE_ITEM(S3C64XX_MEM1CONSTOP), SAVE_ITEM(S3C64XX_MEM0CONSLP0), SAVE_ITEM(S3C64XX_MEM0CONSLP1), SAVE_ITEM(S3C64XX_MEM1CONSLP), SAVE_ITEM(S3C64XX_SDMA_SEL), SAVE_ITEM(S3C64XX_MODEM_MIFPCON), SAVE_ITEM(S3C64XX_NORMAL_CFG), }; void s3c_pm_configure_extint(void) { __raw_writel(s3c_irqwake_eintmask, S3C64XX_EINT_MASK); } void s3c_pm_restore_core(void) { __raw_writel(0, S3C64XX_EINT_MASK); s3c_pm_debug_smdkled(1 << 2, 0); s3c_pm_do_restore_core(core_save, ARRAY_SIZE(core_save)); s3c_pm_do_restore(misc_save, ARRAY_SIZE(misc_save)); } void s3c_pm_save_core(void) { s3c_pm_do_save(misc_save, ARRAY_SIZE(misc_save)); s3c_pm_do_save(core_save, ARRAY_SIZE(core_save)); } /* since both s3c6400 and s3c6410 share the same sleep pm calls, we * put the per-cpu code in here until any new cpu comes along and changes * this. */ static int s3c64xx_cpu_suspend(unsigned long arg) { unsigned long tmp; /* set our standby method to sleep */ tmp = __raw_readl(S3C64XX_PWR_CFG); tmp &= ~S3C64XX_PWRCFG_CFG_WFI_MASK; tmp |= S3C64XX_PWRCFG_CFG_WFI_SLEEP; __raw_writel(tmp, S3C64XX_PWR_CFG); /* clear any old wakeup */ __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); /* set the LED state to 0110 over sleep */ s3c_pm_debug_smdkled(3 << 1, 0xf); /* issue the standby signal into the pm unit. Note, we * issue a write-buffer drain just in case */ tmp = 0; asm("b 1f\n\t" ".align 5\n\t" "1:\n\t" "mcr p15, 0, %0, c7, c10, 5\n\t" "mcr p15, 0, %0, c7, c10, 4\n\t" "mcr p15, 0, %0, c7, c0, 4" :: "r" (tmp)); /* we should never get past here */ panic("sleep resumed to originator?"); } /* mapping of interrupts to parts of the wakeup mask */ static struct samsung_wakeup_mask wake_irqs[] = { { .irq = IRQ_RTC_ALARM, .bit = S3C64XX_PWRCFG_RTC_ALARM_DISABLE, }, { .irq = IRQ_RTC_TIC, .bit = S3C64XX_PWRCFG_RTC_TICK_DISABLE, }, { .irq = IRQ_PENDN, .bit = S3C64XX_PWRCFG_TS_DISABLE, }, { .irq = IRQ_HSMMC0, .bit = S3C64XX_PWRCFG_MMC0_DISABLE, }, { .irq = IRQ_HSMMC1, .bit = S3C64XX_PWRCFG_MMC1_DISABLE, }, { .irq = IRQ_HSMMC2, .bit = S3C64XX_PWRCFG_MMC2_DISABLE, }, { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_BATF_DISABLE}, { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_MSM_DISABLE }, { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_HSI_DISABLE }, { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_MSM_DISABLE }, }; static void s3c64xx_pm_prepare(void) { samsung_sync_wakemask(S3C64XX_PWR_CFG, wake_irqs, ARRAY_SIZE(wake_irqs)); /* store address of resume. */ __raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0); /* ensure previous wakeup state is cleared before sleeping */ __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); } int __init s3c64xx_pm_init(void) { int i; s3c_pm_init(); for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++) pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd, &pm_domain_always_on_gov, false); for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); if (dev_get_platdata(&s3c_device_fb.dev)) pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); return 0; } static __init int s3c64xx_pm_initcall(void) { pm_cpu_prep = s3c64xx_pm_prepare; pm_cpu_sleep = s3c64xx_cpu_suspend; pm_uart_udivslot = 1; #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK gpio_request(S3C64XX_GPN(12), "DEBUG_LED0"); gpio_request(S3C64XX_GPN(13), "DEBUG_LED1"); gpio_request(S3C64XX_GPN(14), "DEBUG_LED2"); gpio_request(S3C64XX_GPN(15), "DEBUG_LED3"); gpio_direction_output(S3C64XX_GPN(12), 0); gpio_direction_output(S3C64XX_GPN(13), 0); gpio_direction_output(S3C64XX_GPN(14), 0); gpio_direction_output(S3C64XX_GPN(15), 0); #endif return 0; } arch_initcall(s3c64xx_pm_initcall); static __init int s3c64xx_pm_late_initcall(void) { pm_genpd_poweroff_unused(); return 0; } late_initcall(s3c64xx_pm_late_initcall);
gpl-2.0
Philippe12/linux-sunxi
drivers/media/dvb/dvb-usb/pctv452e.c
5049
25260
/* * PCTV 452e DVB driver * * Copyright (c) 2006-2008 Dominik Kuhlen <dkuhlen@gmx.net> * * TT connect S2-3650-CI Common Interface support, MAC readout * Copyright (C) 2008 Michael H. Schimek <mschimek@gmx.at> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. */ /* dvb usb framework */ #define DVB_USB_LOG_PREFIX "pctv452e" #include "dvb-usb.h" /* Demodulator */ #include "stb0899_drv.h" #include "stb0899_reg.h" #include "stb0899_cfg.h" /* Tuner */ #include "stb6100.h" #include "stb6100_cfg.h" /* FE Power */ #include "lnbp22.h" #include "dvb_ca_en50221.h" #include "ttpci-eeprom.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define ISOC_INTERFACE_ALTERNATIVE 3 #define SYNC_BYTE_OUT 0xaa #define SYNC_BYTE_IN 0x55 /* guessed: (copied from ttusb-budget) */ #define PCTV_CMD_RESET 0x15 /* command to poll IR receiver */ #define PCTV_CMD_IR 0x1b /* command to send I2C */ #define PCTV_CMD_I2C 0x31 #define I2C_ADDR_STB0899 (0xd0 >> 1) #define I2C_ADDR_STB6100 (0xc0 >> 1) #define I2C_ADDR_LNBP22 (0x10 >> 1) #define I2C_ADDR_24C16 (0xa0 >> 1) #define I2C_ADDR_24C64 (0xa2 >> 1) /* pctv452e sends us this amount of data for each issued usb-command */ #define PCTV_ANSWER_LEN 64 /* Wait up to 1000ms for device */ #define PCTV_TIMEOUT 1000 #define PCTV_LED_GPIO STB0899_GPIO01 #define PCTV_LED_GREEN 0x82 #define PCTV_LED_ORANGE 0x02 #define ci_dbg(format, arg...) \ do { \ if (0) \ printk(KERN_DEBUG DVB_USB_LOG_PREFIX \ ": " format "\n" , ## arg); \ } while (0) enum { TT3650_CMD_CI_TEST = 0x40, TT3650_CMD_CI_RD_CTRL, TT3650_CMD_CI_WR_CTRL, TT3650_CMD_CI_RD_ATTR, TT3650_CMD_CI_WR_ATTR, TT3650_CMD_CI_RESET, TT3650_CMD_CI_SET_VIDEO_PORT }; static struct stb0899_postproc pctv45e_postproc[] = { { PCTV_LED_GPIO, STB0899_GPIOPULLUP }, { 0, 0 } }; /* * stores all private variables for communication with the PCTV452e DVB-S2 */ struct pctv452e_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; u8 c; /* transaction counter, wraps around... */ u8 initialized; /* set to 1 if 0x15 has been sent */ u16 last_rc_key; }; static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[64]; u8 id; unsigned int rlen; int ret; BUG_ON(NULL == data && 0 != (write_len | read_len)); BUG_ON(write_len > 64 - 4); BUG_ON(read_len > 64 - 4); id = state->c++; buf[0] = SYNC_BYTE_OUT; buf[1] = id; buf[2] = cmd; buf[3] = write_len; memcpy(buf + 4, data, write_len); rlen = (read_len > 0) ? 64 : 0; ret = dvb_usb_generic_rw(d, buf, 4 + write_len, buf, rlen, /* delay_ms */ 0); if (0 != ret) goto failed; ret = -EIO; if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; memcpy(data, buf + 4, read_len); return 0; failed: err("CI error %d; %02X %02X %02X -> %02X %02X %02X.", ret, SYNC_BYTE_OUT, id, cmd, buf[0], buf[1], buf[2]); return ret; } static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; int ret; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, cmd, data, write_len, read_len); mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { u8 buf[3]; int ret; if (0 != slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3); ci_dbg("%s %04x -> %d 0x%02x", __func__, address, ret, buf[2]); if (ret < 0) return ret; return buf[2]; } static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { u8 buf[3]; ci_dbg("%s %d 0x%04x 0x%02x", __func__, slot, address, value); if (0 != slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; buf[2] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3); } static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { u8 buf[2]; int ret; if (0 != slot) return -EINVAL; buf[0] = address & 3; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2); ci_dbg("%s 0x%02x -> %d 0x%02x", __func__, address, ret, buf[1]); if (ret < 0) return ret; return buf[1]; } static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { u8 buf[2]; ci_dbg("%s %d 0x%02x 0x%02x", __func__, slot, address, value); if (0 != slot) return -EINVAL; buf[0] = address; buf[1] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2); } static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca, int slot, int enable) { u8 buf[1]; int ret; ci_dbg("%s %d %d", __func__, slot, enable); if (0 != slot) return -EINVAL; enable = !!enable; buf[0] = enable; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); if (ret < 0) return ret; if (enable != buf[0]) { err("CI not %sabled.", enable ? "en" : "dis"); return -EIO; } return 0; } static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, /* enable */ 0); } static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, /* enable */ 1); } static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[1]; int ret; ci_dbg("%s %d", __func__, slot); if (0 != slot) return -EINVAL; buf[0] = 0; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (0 != ret) goto failed; msleep(500); buf[0] = 1; ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (0 != ret) goto failed; msleep(500); buf[0] = 0; /* FTA */ ret = tt3650_ci_msg(d, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); failed: mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { u8 buf[1]; int ret; if (0 != slot) return -EINVAL; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1); if (0 != ret) return ret; if (1 == buf[0]) return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; return 0; } static void tt3650_ci_uninit(struct dvb_usb_device *d) { struct pctv452e_state *state; ci_dbg("%s", __func__); if (NULL == d) return; state = (struct pctv452e_state *)d->priv; if (NULL == state) return; if (NULL == state->ca.data) return; /* Error ignored. */ tt3650_ci_set_video_port(&state->ca, /* slot */ 0, /* enable */ 0); dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int tt3650_ci_init(struct dvb_usb_adapter *a) { struct dvb_usb_device *d = a->dev; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; int ret; ci_dbg("%s", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem; state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem; state->ca.read_cam_control = tt3650_ci_read_cam_control; state->ca.write_cam_control = tt3650_ci_write_cam_control; state->ca.slot_reset = tt3650_ci_slot_reset; state->ca.slot_shutdown = tt3650_ci_slot_shutdown; state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable; state->ca.poll_slot_status = tt3650_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&a->dvb_adap, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) { err("Cannot initialize CI: Error %d.", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } info("CI initialized."); return 0; } #define CMD_BUFFER_SIZE 0x28 static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr, const u8 *snd_buf, u8 snd_len, u8 *rcv_buf, u8 rcv_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[64]; u8 id; int ret; id = state->c++; ret = -EINVAL; if (snd_len > 64 - 7 || rcv_len > 64 - 7) goto failed; buf[0] = SYNC_BYTE_OUT; buf[1] = id; buf[2] = PCTV_CMD_I2C; buf[3] = snd_len + 3; buf[4] = addr << 1; buf[5] = snd_len; buf[6] = rcv_len; memcpy(buf + 7, snd_buf, snd_len); ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, buf, /* rcv_len */ 64, /* delay_ms */ 0); if (ret < 0) goto failed; /* TT USB protocol error. */ ret = -EIO; if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; /* I2C device didn't respond as expected. */ ret = -EREMOTEIO; if (buf[5] < snd_len || buf[6] < rcv_len) goto failed; memcpy(rcv_buf, buf + 7, rcv_len); return rcv_len; failed: err("I2C error %d; %02X %02X %02X %02X %02X -> " "%02X %02X %02X %02X %02X.", ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, buf[0], buf[1], buf[4], buf[5], buf[6]); return ret; } static int pctv452e_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adapter); int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { u8 addr, snd_len, rcv_len, *snd_buf, *rcv_buf; int ret; if (msg[i].flags & I2C_M_RD) { addr = msg[i].addr; snd_buf = NULL; snd_len = 0; rcv_buf = msg[i].buf; rcv_len = msg[i].len; } else { addr = msg[i].addr; snd_buf = msg[i].buf; snd_len = msg[i].len; rcv_buf = NULL; rcv_len = 0; } ret = pctv452e_i2c_msg(d, addr, snd_buf, snd_len, rcv_buf, rcv_len); if (ret < rcv_len) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 pctv452e_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 }; u8 rx[PCTV_ANSWER_LEN]; int ret; info("%s: %d\n", __func__, i); if (!i) return 0; if (state->initialized) return 0; /* hmm where shoud this should go? */ ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); if (ret != 0) info("%s: Warning set interface returned: %d\n", __func__, ret); /* this is a one-time initialization, dont know where to put */ b0[1] = state->c++; /* reset board */ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); if (ret) return ret; b0[1] = state->c++; b0[4] = 1; /* reset board (again?) */ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); if (ret) return ret; state->initialized = 1; return 0; } static int pctv452e_rc_query(struct dvb_usb_device *d) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 b[CMD_BUFFER_SIZE]; u8 rx[PCTV_ANSWER_LEN]; int ret, i; u8 id = state->c++; /* prepare command header */ b[0] = SYNC_BYTE_OUT; b[1] = id; b[2] = PCTV_CMD_IR; b[3] = 0; /* send ir request */ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); if (ret != 0) return ret; if (debug > 3) { info("%s: read: %2d: %02x %02x %02x: ", __func__, ret, rx[0], rx[1], rx[2]); for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) info(" %02x", rx[i+3]); info("\n"); } if ((rx[3] == 9) && (rx[12] & 0x01)) { /* got a "press" event */ state->last_rc_key = (rx[7] << 8) | rx[6]; if (debug > 2) info("%s: cmd=0x%02x sys=0x%02x\n", __func__, rx[6], rx[7]); rc_keydown(d->rc_dev, state->last_rc_key, 0); } else if (state->last_rc_key) { rc_keyup(d->rc_dev); state->last_rc_key = 0; } return 0; } static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { const u8 mem_addr[] = { 0x1f, 0xcc }; u8 encoded_mac[20]; int ret; ret = -EAGAIN; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) goto failed; ret = pctv452e_i2c_msg(d, I2C_ADDR_24C16, mem_addr + 1, /* snd_len */ 1, encoded_mac, /* rcv_len */ 20); if (-EREMOTEIO == ret) /* Caution! A 24C16 interprets 0xA2 0x1F 0xCC as a byte write if /WC is low. */ ret = pctv452e_i2c_msg(d, I2C_ADDR_24C64, mem_addr, 2, encoded_mac, 20); mutex_unlock(&d->i2c_mutex); if (20 != ret) goto failed; ret = ttpci_eeprom_decode_mac(mac, encoded_mac); if (0 != ret) goto failed; return 0; failed: memset(mac, 0, 6); return ret; } static const struct stb0899_s1_reg pctv452e_init_dev[] = { { STB0899_DISCNTRL1, 0x26 }, { STB0899_DISCNTRL2, 0x80 }, { STB0899_DISRX_ST0, 0x04 }, { STB0899_DISRX_ST1, 0x20 }, { STB0899_DISPARITY, 0x00 }, { STB0899_DISFIFO, 0x00 }, { STB0899_DISF22, 0x99 }, { STB0899_DISF22RX, 0x85 }, /* 0xa8 */ { STB0899_ACRPRESC, 0x11 }, { STB0899_ACRDIV1, 0x0a }, { STB0899_ACRDIV2, 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG, 0x00 }, { STB0899_MODECFG, 0x00 }, /* Inversion */ { STB0899_IRQMSK_3, 0xf3 }, { STB0899_IRQMSK_2, 0xfc }, { STB0899_IRQMSK_1, 0xff }, { STB0899_IRQMSK_0, 0xff }, { STB0899_I2CCFG, 0x88 }, { STB0899_I2CRPT, 0x58 }, { STB0899_GPIO00CFG, 0x82 }, { STB0899_GPIO01CFG, 0x82 }, /* LED: 0x02 green, 0x82 orange */ { STB0899_GPIO02CFG, 0x82 }, { STB0899_GPIO03CFG, 0x82 }, { STB0899_GPIO04CFG, 0x82 }, { STB0899_GPIO05CFG, 0x82 }, { STB0899_GPIO06CFG, 0x82 }, { STB0899_GPIO07CFG, 0x82 }, { STB0899_GPIO08CFG, 0x82 }, { STB0899_GPIO09CFG, 0x82 }, { STB0899_GPIO10CFG, 0x82 }, { STB0899_GPIO11CFG, 0x82 }, { STB0899_GPIO12CFG, 0x82 }, { STB0899_GPIO13CFG, 0x82 }, { STB0899_GPIO14CFG, 0x82 }, { STB0899_GPIO15CFG, 0x82 }, { STB0899_GPIO16CFG, 0x82 }, { STB0899_GPIO17CFG, 0x82 }, { STB0899_GPIO18CFG, 0x82 }, { STB0899_GPIO19CFG, 0x82 }, { STB0899_GPIO20CFG, 0x82 }, { STB0899_SDATCFG, 0xb8 }, { STB0899_SCLTCFG, 0xba }, { STB0899_AGCRFCFG, 0x1c }, /* 0x11 DVB-S; 0x1c DVB-S2 (1c, rjkm) */ { STB0899_GPIO22, 0x82 }, { STB0899_GPIO21, 0x91 }, { STB0899_DIRCLKCFG, 0x82 }, { STB0899_CLKOUT27CFG, 0x7e }, { STB0899_STDBYCFG, 0x82 }, { STB0899_CS0CFG, 0x82 }, { STB0899_CS1CFG, 0x82 }, { STB0899_DISEQCOCFG, 0x20 }, { STB0899_NCOARSE, 0x15 }, /* 0x15 27Mhz, F/3 198MHz, F/6 108MHz */ { STB0899_SYNTCTRL, 0x00 }, /* 0x00 CLKI, 0x02 XTALI */ { STB0899_FILTCTRL, 0x00 }, { STB0899_SYSCTRL, 0x00 }, { STB0899_STOPCLK1, 0x20 }, /* orig: 0x00 budget-ci: 0x20 */ { STB0899_STOPCLK2, 0x00 }, { STB0899_INTBUFCTRL, 0x0a }, { STB0899_AGC2I1, 0x00 }, { STB0899_AGC2I2, 0x00 }, { STB0899_AGCIQIN, 0x00 }, { STB0899_TSTRES, 0x40 }, /* rjkm */ { 0xffff, 0xff }, }; static const struct stb0899_s1_reg pctv452e_init_s1_demod[] = { { STB0899_DEMOD, 0x00 }, { STB0899_RCOMPC, 0xc9 }, { STB0899_AGC1CN, 0x01 }, { STB0899_AGC1REF, 0x10 }, { STB0899_RTC, 0x23 }, { STB0899_TMGCFG, 0x4e }, { STB0899_AGC2REF, 0x34 }, { STB0899_TLSR, 0x84 }, { STB0899_CFD, 0xf7 }, { STB0899_ACLC, 0x87 }, { STB0899_BCLC, 0x94 }, { STB0899_EQON, 0x41 }, { STB0899_LDT, 0xf1 }, { STB0899_LDT2, 0xe3 }, { STB0899_EQUALREF, 0xb4 }, { STB0899_TMGRAMP, 0x10 }, { STB0899_TMGTHD, 0x30 }, { STB0899_IDCCOMP, 0xfd }, { STB0899_QDCCOMP, 0xff }, { STB0899_POWERI, 0x0c }, { STB0899_POWERQ, 0x0f }, { STB0899_RCOMP, 0x6c }, { STB0899_AGCIQIN, 0x80 }, { STB0899_AGC2I1, 0x06 }, { STB0899_AGC2I2, 0x00 }, { STB0899_TLIR, 0x30 }, { STB0899_RTF, 0x7f }, { STB0899_DSTATUS, 0x00 }, { STB0899_LDI, 0xbc }, { STB0899_CFRM, 0xea }, { STB0899_CFRL, 0x31 }, { STB0899_NIRM, 0x2b }, { STB0899_NIRL, 0x80 }, { STB0899_ISYMB, 0x1d }, { STB0899_QSYMB, 0xa6 }, { STB0899_SFRH, 0x2f }, { STB0899_SFRM, 0x68 }, { STB0899_SFRL, 0x40 }, { STB0899_SFRUPH, 0x2f }, { STB0899_SFRUPM, 0x68 }, { STB0899_SFRUPL, 0x40 }, { STB0899_EQUAI1, 0x02 }, { STB0899_EQUAQ1, 0xff }, { STB0899_EQUAI2, 0x04 }, { STB0899_EQUAQ2, 0x05 }, { STB0899_EQUAI3, 0x02 }, { STB0899_EQUAQ3, 0xfd }, { STB0899_EQUAI4, 0x03 }, { STB0899_EQUAQ4, 0x07 }, { STB0899_EQUAI5, 0x08 }, { STB0899_EQUAQ5, 0xf5 }, { STB0899_DSTATUS2, 0x00 }, { STB0899_VSTATUS, 0x00 }, { STB0899_VERROR, 0x86 }, { STB0899_IQSWAP, 0x2a }, { STB0899_ECNT1M, 0x00 }, { STB0899_ECNT1L, 0x00 }, { STB0899_ECNT2M, 0x00 }, { STB0899_ECNT2L, 0x00 }, { STB0899_ECNT3M, 0x0a }, { STB0899_ECNT3L, 0xad }, { STB0899_FECAUTO1, 0x06 }, { STB0899_FECM, 0x01 }, { STB0899_VTH12, 0xb0 }, { STB0899_VTH23, 0x7a }, { STB0899_VTH34, 0x58 }, { STB0899_VTH56, 0x38 }, { STB0899_VTH67, 0x34 }, { STB0899_VTH78, 0x24 }, { STB0899_PRVIT, 0xff }, { STB0899_VITSYNC, 0x19 }, { STB0899_RSULC, 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC, 0x42 }, { STB0899_RSLLC, 0x41 }, { STB0899_TSLPL, 0x12 }, { STB0899_TSCFGH, 0x0c }, { STB0899_TSCFGM, 0x00 }, { STB0899_TSCFGL, 0x00 }, { STB0899_TSOUT, 0x69 }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL, 0x00 }, { STB0899_TSINHDELH, 0x02 }, { STB0899_TSINHDELM, 0x00 }, { STB0899_TSINHDELL, 0x00 }, { STB0899_TSLLSTKM, 0x1b }, { STB0899_TSLLSTKL, 0xb3 }, { STB0899_TSULSTKM, 0x00 }, { STB0899_TSULSTKL, 0x00 }, { STB0899_PCKLENUL, 0xbc }, { STB0899_PCKLENLL, 0xcc }, { STB0899_RSPCKLEN, 0xbd }, { STB0899_TSSTATUS, 0x90 }, { STB0899_ERRCTRL1, 0xb6 }, { STB0899_ERRCTRL2, 0x95 }, { STB0899_ERRCTRL3, 0x8d }, { STB0899_DMONMSK1, 0x27 }, { STB0899_DMONMSK0, 0x03 }, { STB0899_DEMAPVIT, 0x5c }, { STB0899_PLPARM, 0x19 }, { STB0899_PDELCTRL, 0x48 }, { STB0899_PDELCTRL2, 0x00 }, { STB0899_BBHCTRL1, 0x00 }, { STB0899_BBHCTRL2, 0x00 }, { STB0899_HYSTTHRESH, 0x77 }, { STB0899_MATCSTM, 0x00 }, { STB0899_MATCSTL, 0x00 }, { STB0899_UPLCSTM, 0x00 }, { STB0899_UPLCSTL, 0x00 }, { STB0899_DFLCSTM, 0x00 }, { STB0899_DFLCSTL, 0x00 }, { STB0899_SYNCCST, 0x00 }, { STB0899_SYNCDCSTM, 0x00 }, { STB0899_SYNCDCSTL, 0x00 }, { STB0899_ISI_ENTRY, 0x00 }, { STB0899_ISI_BIT_EN, 0x00 }, { STB0899_MATSTRM, 0xf0 }, { STB0899_MATSTRL, 0x02 }, { STB0899_UPLSTRM, 0x45 }, { STB0899_UPLSTRL, 0x60 }, { STB0899_DFLSTRM, 0xe3 }, { STB0899_DFLSTRL, 0x00 }, { STB0899_SYNCSTR, 0x47 }, { STB0899_SYNCDSTRM, 0x05 }, { STB0899_SYNCDSTRL, 0x18 }, { STB0899_CFGPDELSTATUS1, 0x19 }, { STB0899_CFGPDELSTATUS2, 0x2b }, { STB0899_BBFERRORM, 0x00 }, { STB0899_BBFERRORL, 0x01 }, { STB0899_UPKTERRORM, 0x00 }, { STB0899_UPKTERRORL, 0x00 }, { 0xffff, 0xff }, }; static struct stb0899_config stb0899_config = { .init_dev = pctv452e_init_dev, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = pctv452e_init_s1_demod, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .demod_address = I2C_ADDR_STB0899, /* I2C Address */ .block_sync_mode = STB0899_SYNC_FORCED, /* ? */ .xtal_freq = 27000000, /* Assume Hz ? */ .inversion = IQ_SWAP_ON, /* ? */ .lo_clk = 76500000, .hi_clk = 99000000, .ts_output_mode = 0, /* Use parallel mode */ .clock_polarity = 0, .data_clk_parity = 0, .fec_mode = 0, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = stb6100_get_frequency, .tuner_set_frequency = stb6100_set_frequency, .tuner_set_bandwidth = stb6100_set_bandwidth, .tuner_get_bandwidth = stb6100_get_bandwidth, .tuner_set_rfsiggain = NULL, /* helper for switching LED green/orange */ .postproc = pctv45e_postproc }; static struct stb6100_config stb6100_config = { .tuner_address = I2C_ADDR_STB6100, .refclock = 27000000 }; static struct i2c_algorithm pctv452e_i2c_algo = { .master_xfer = pctv452e_i2c_xfer, .functionality = pctv452e_i2c_func }; static int pctv452e_frontend_attach(struct dvb_usb_adapter *a) { struct usb_device_id *id; a->fe_adap[0].fe = dvb_attach(stb0899_attach, &stb0899_config, &a->dev->i2c_adap); if (!a->fe_adap[0].fe) return -ENODEV; if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe, &a->dev->i2c_adap)) == 0) err("Cannot attach lnbp22\n"); id = a->dev->desc->warm_ids[0]; if (USB_VID_TECHNOTREND == id->idVendor && USB_PID_TECHNOTREND_CONNECT_S2_3650_CI == id->idProduct) /* Error ignored. */ tt3650_ci_init(a); return 0; } static int pctv452e_tuner_attach(struct dvb_usb_adapter *a) { if (!a->fe_adap[0].fe) return -ENODEV; if (dvb_attach(stb6100_attach, a->fe_adap[0].fe, &stb6100_config, &a->dev->i2c_adap) == 0) { err("%s failed\n", __func__); return -ENODEV; } return 0; } static struct usb_device_id pctv452e_usb_table[] = { {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_452E)}, {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_3600)}, {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_3650_CI)}, {} }; MODULE_DEVICE_TABLE(usb, pctv452e_usb_table); static struct dvb_usb_device_properties pctv452e_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */ .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct pctv452e_state), .power_ctrl = pctv452e_power_ctrl, .rc.core = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .allowed_protos = RC_TYPE_UNKNOWN, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .frontend_attach = pctv452e_frontend_attach, .tuner_attach = pctv452e_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 4, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1 } } }, } }, } }, .i2c_algo = &pctv452e_i2c_algo, .generic_bulk_ctrl_endpoint = 1, /* allow generice rw function */ .num_device_descs = 1, .devices = { { .name = "PCTV HDTV USB", .cold_ids = { NULL, NULL }, /* this is a warm only device */ .warm_ids = { &pctv452e_usb_table[0], NULL } }, { 0 }, } }; static struct dvb_usb_device_properties tt_connect_s2_3600_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */ .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct pctv452e_state), .power_ctrl = pctv452e_power_ctrl, .read_mac_address = pctv452e_read_mac_address, .rc.core = { .rc_codes = RC_MAP_TT_1500, .allowed_protos = RC_TYPE_UNKNOWN, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .frontend_attach = pctv452e_frontend_attach, .tuner_attach = pctv452e_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 7, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1 } } }, } }, } }, .i2c_algo = &pctv452e_i2c_algo, .generic_bulk_ctrl_endpoint = 1, /* allow generic rw function*/ .num_device_descs = 2, .devices = { { .name = "Technotrend TT Connect S2-3600", .cold_ids = { NULL, NULL }, /* this is a warm only device */ .warm_ids = { &pctv452e_usb_table[1], NULL } }, { .name = "Technotrend TT Connect S2-3650-CI", .cold_ids = { NULL, NULL }, .warm_ids = { &pctv452e_usb_table[2], NULL } }, { 0 }, } }; static void pctv452e_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); tt3650_ci_uninit(d); dvb_usb_device_exit(intf); } static int pctv452e_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &pctv452e_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &tt_connect_s2_3600_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static struct usb_driver pctv452e_usb_driver = { .name = "pctv452e", .probe = pctv452e_usb_probe, .disconnect = pctv452e_usb_disconnect, .id_table = pctv452e_usb_table, }; module_usb_driver(pctv452e_usb_driver); MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>"); MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>"); MODULE_AUTHOR("Michael H. Schimek <mschimek@gmx.at>"); MODULE_DESCRIPTION("Pinnacle PCTV HDTV USB DVB / TT connect S2-3600 Driver"); MODULE_LICENSE("GPL");
gpl-2.0
pantech-msm8960/android_kernel_pantech_msm8960
drivers/media/dvb/dvb-usb/pctv452e.c
5049
25260
/* * PCTV 452e DVB driver * * Copyright (c) 2006-2008 Dominik Kuhlen <dkuhlen@gmx.net> * * TT connect S2-3650-CI Common Interface support, MAC readout * Copyright (C) 2008 Michael H. Schimek <mschimek@gmx.at> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. */ /* dvb usb framework */ #define DVB_USB_LOG_PREFIX "pctv452e" #include "dvb-usb.h" /* Demodulator */ #include "stb0899_drv.h" #include "stb0899_reg.h" #include "stb0899_cfg.h" /* Tuner */ #include "stb6100.h" #include "stb6100_cfg.h" /* FE Power */ #include "lnbp22.h" #include "dvb_ca_en50221.h" #include "ttpci-eeprom.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define ISOC_INTERFACE_ALTERNATIVE 3 #define SYNC_BYTE_OUT 0xaa #define SYNC_BYTE_IN 0x55 /* guessed: (copied from ttusb-budget) */ #define PCTV_CMD_RESET 0x15 /* command to poll IR receiver */ #define PCTV_CMD_IR 0x1b /* command to send I2C */ #define PCTV_CMD_I2C 0x31 #define I2C_ADDR_STB0899 (0xd0 >> 1) #define I2C_ADDR_STB6100 (0xc0 >> 1) #define I2C_ADDR_LNBP22 (0x10 >> 1) #define I2C_ADDR_24C16 (0xa0 >> 1) #define I2C_ADDR_24C64 (0xa2 >> 1) /* pctv452e sends us this amount of data for each issued usb-command */ #define PCTV_ANSWER_LEN 64 /* Wait up to 1000ms for device */ #define PCTV_TIMEOUT 1000 #define PCTV_LED_GPIO STB0899_GPIO01 #define PCTV_LED_GREEN 0x82 #define PCTV_LED_ORANGE 0x02 #define ci_dbg(format, arg...) \ do { \ if (0) \ printk(KERN_DEBUG DVB_USB_LOG_PREFIX \ ": " format "\n" , ## arg); \ } while (0) enum { TT3650_CMD_CI_TEST = 0x40, TT3650_CMD_CI_RD_CTRL, TT3650_CMD_CI_WR_CTRL, TT3650_CMD_CI_RD_ATTR, TT3650_CMD_CI_WR_ATTR, TT3650_CMD_CI_RESET, TT3650_CMD_CI_SET_VIDEO_PORT }; static struct stb0899_postproc pctv45e_postproc[] = { { PCTV_LED_GPIO, STB0899_GPIOPULLUP }, { 0, 0 } }; /* * stores all private variables for communication with the PCTV452e DVB-S2 */ struct pctv452e_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; u8 c; /* transaction counter, wraps around... */ u8 initialized; /* set to 1 if 0x15 has been sent */ u16 last_rc_key; }; static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[64]; u8 id; unsigned int rlen; int ret; BUG_ON(NULL == data && 0 != (write_len | read_len)); BUG_ON(write_len > 64 - 4); BUG_ON(read_len > 64 - 4); id = state->c++; buf[0] = SYNC_BYTE_OUT; buf[1] = id; buf[2] = cmd; buf[3] = write_len; memcpy(buf + 4, data, write_len); rlen = (read_len > 0) ? 64 : 0; ret = dvb_usb_generic_rw(d, buf, 4 + write_len, buf, rlen, /* delay_ms */ 0); if (0 != ret) goto failed; ret = -EIO; if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; memcpy(data, buf + 4, read_len); return 0; failed: err("CI error %d; %02X %02X %02X -> %02X %02X %02X.", ret, SYNC_BYTE_OUT, id, cmd, buf[0], buf[1], buf[2]); return ret; } static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; int ret; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, cmd, data, write_len, read_len); mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { u8 buf[3]; int ret; if (0 != slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3); ci_dbg("%s %04x -> %d 0x%02x", __func__, address, ret, buf[2]); if (ret < 0) return ret; return buf[2]; } static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { u8 buf[3]; ci_dbg("%s %d 0x%04x 0x%02x", __func__, slot, address, value); if (0 != slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; buf[2] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3); } static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { u8 buf[2]; int ret; if (0 != slot) return -EINVAL; buf[0] = address & 3; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2); ci_dbg("%s 0x%02x -> %d 0x%02x", __func__, address, ret, buf[1]); if (ret < 0) return ret; return buf[1]; } static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { u8 buf[2]; ci_dbg("%s %d 0x%02x 0x%02x", __func__, slot, address, value); if (0 != slot) return -EINVAL; buf[0] = address; buf[1] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2); } static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca, int slot, int enable) { u8 buf[1]; int ret; ci_dbg("%s %d %d", __func__, slot, enable); if (0 != slot) return -EINVAL; enable = !!enable; buf[0] = enable; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); if (ret < 0) return ret; if (enable != buf[0]) { err("CI not %sabled.", enable ? "en" : "dis"); return -EIO; } return 0; } static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, /* enable */ 0); } static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, /* enable */ 1); } static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[1]; int ret; ci_dbg("%s %d", __func__, slot); if (0 != slot) return -EINVAL; buf[0] = 0; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (0 != ret) goto failed; msleep(500); buf[0] = 1; ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (0 != ret) goto failed; msleep(500); buf[0] = 0; /* FTA */ ret = tt3650_ci_msg(d, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); failed: mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { u8 buf[1]; int ret; if (0 != slot) return -EINVAL; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1); if (0 != ret) return ret; if (1 == buf[0]) return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; return 0; } static void tt3650_ci_uninit(struct dvb_usb_device *d) { struct pctv452e_state *state; ci_dbg("%s", __func__); if (NULL == d) return; state = (struct pctv452e_state *)d->priv; if (NULL == state) return; if (NULL == state->ca.data) return; /* Error ignored. */ tt3650_ci_set_video_port(&state->ca, /* slot */ 0, /* enable */ 0); dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int tt3650_ci_init(struct dvb_usb_adapter *a) { struct dvb_usb_device *d = a->dev; struct pctv452e_state *state = (struct pctv452e_state *)d->priv; int ret; ci_dbg("%s", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem; state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem; state->ca.read_cam_control = tt3650_ci_read_cam_control; state->ca.write_cam_control = tt3650_ci_write_cam_control; state->ca.slot_reset = tt3650_ci_slot_reset; state->ca.slot_shutdown = tt3650_ci_slot_shutdown; state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable; state->ca.poll_slot_status = tt3650_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&a->dvb_adap, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) { err("Cannot initialize CI: Error %d.", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } info("CI initialized."); return 0; } #define CMD_BUFFER_SIZE 0x28 static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr, const u8 *snd_buf, u8 snd_len, u8 *rcv_buf, u8 rcv_len) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 buf[64]; u8 id; int ret; id = state->c++; ret = -EINVAL; if (snd_len > 64 - 7 || rcv_len > 64 - 7) goto failed; buf[0] = SYNC_BYTE_OUT; buf[1] = id; buf[2] = PCTV_CMD_I2C; buf[3] = snd_len + 3; buf[4] = addr << 1; buf[5] = snd_len; buf[6] = rcv_len; memcpy(buf + 7, snd_buf, snd_len); ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, buf, /* rcv_len */ 64, /* delay_ms */ 0); if (ret < 0) goto failed; /* TT USB protocol error. */ ret = -EIO; if (SYNC_BYTE_IN != buf[0] || id != buf[1]) goto failed; /* I2C device didn't respond as expected. */ ret = -EREMOTEIO; if (buf[5] < snd_len || buf[6] < rcv_len) goto failed; memcpy(rcv_buf, buf + 7, rcv_len); return rcv_len; failed: err("I2C error %d; %02X %02X %02X %02X %02X -> " "%02X %02X %02X %02X %02X.", ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, buf[0], buf[1], buf[4], buf[5], buf[6]); return ret; } static int pctv452e_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adapter); int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { u8 addr, snd_len, rcv_len, *snd_buf, *rcv_buf; int ret; if (msg[i].flags & I2C_M_RD) { addr = msg[i].addr; snd_buf = NULL; snd_len = 0; rcv_buf = msg[i].buf; rcv_len = msg[i].len; } else { addr = msg[i].addr; snd_buf = msg[i].buf; snd_len = msg[i].len; rcv_buf = NULL; rcv_len = 0; } ret = pctv452e_i2c_msg(d, addr, snd_buf, snd_len, rcv_buf, rcv_len); if (ret < rcv_len) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 pctv452e_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 }; u8 rx[PCTV_ANSWER_LEN]; int ret; info("%s: %d\n", __func__, i); if (!i) return 0; if (state->initialized) return 0; /* hmm where shoud this should go? */ ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); if (ret != 0) info("%s: Warning set interface returned: %d\n", __func__, ret); /* this is a one-time initialization, dont know where to put */ b0[1] = state->c++; /* reset board */ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); if (ret) return ret; b0[1] = state->c++; b0[4] = 1; /* reset board (again?) */ ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); if (ret) return ret; state->initialized = 1; return 0; } static int pctv452e_rc_query(struct dvb_usb_device *d) { struct pctv452e_state *state = (struct pctv452e_state *)d->priv; u8 b[CMD_BUFFER_SIZE]; u8 rx[PCTV_ANSWER_LEN]; int ret, i; u8 id = state->c++; /* prepare command header */ b[0] = SYNC_BYTE_OUT; b[1] = id; b[2] = PCTV_CMD_IR; b[3] = 0; /* send ir request */ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); if (ret != 0) return ret; if (debug > 3) { info("%s: read: %2d: %02x %02x %02x: ", __func__, ret, rx[0], rx[1], rx[2]); for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) info(" %02x", rx[i+3]); info("\n"); } if ((rx[3] == 9) && (rx[12] & 0x01)) { /* got a "press" event */ state->last_rc_key = (rx[7] << 8) | rx[6]; if (debug > 2) info("%s: cmd=0x%02x sys=0x%02x\n", __func__, rx[6], rx[7]); rc_keydown(d->rc_dev, state->last_rc_key, 0); } else if (state->last_rc_key) { rc_keyup(d->rc_dev); state->last_rc_key = 0; } return 0; } static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { const u8 mem_addr[] = { 0x1f, 0xcc }; u8 encoded_mac[20]; int ret; ret = -EAGAIN; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) goto failed; ret = pctv452e_i2c_msg(d, I2C_ADDR_24C16, mem_addr + 1, /* snd_len */ 1, encoded_mac, /* rcv_len */ 20); if (-EREMOTEIO == ret) /* Caution! A 24C16 interprets 0xA2 0x1F 0xCC as a byte write if /WC is low. */ ret = pctv452e_i2c_msg(d, I2C_ADDR_24C64, mem_addr, 2, encoded_mac, 20); mutex_unlock(&d->i2c_mutex); if (20 != ret) goto failed; ret = ttpci_eeprom_decode_mac(mac, encoded_mac); if (0 != ret) goto failed; return 0; failed: memset(mac, 0, 6); return ret; } static const struct stb0899_s1_reg pctv452e_init_dev[] = { { STB0899_DISCNTRL1, 0x26 }, { STB0899_DISCNTRL2, 0x80 }, { STB0899_DISRX_ST0, 0x04 }, { STB0899_DISRX_ST1, 0x20 }, { STB0899_DISPARITY, 0x00 }, { STB0899_DISFIFO, 0x00 }, { STB0899_DISF22, 0x99 }, { STB0899_DISF22RX, 0x85 }, /* 0xa8 */ { STB0899_ACRPRESC, 0x11 }, { STB0899_ACRDIV1, 0x0a }, { STB0899_ACRDIV2, 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG, 0x00 }, { STB0899_MODECFG, 0x00 }, /* Inversion */ { STB0899_IRQMSK_3, 0xf3 }, { STB0899_IRQMSK_2, 0xfc }, { STB0899_IRQMSK_1, 0xff }, { STB0899_IRQMSK_0, 0xff }, { STB0899_I2CCFG, 0x88 }, { STB0899_I2CRPT, 0x58 }, { STB0899_GPIO00CFG, 0x82 }, { STB0899_GPIO01CFG, 0x82 }, /* LED: 0x02 green, 0x82 orange */ { STB0899_GPIO02CFG, 0x82 }, { STB0899_GPIO03CFG, 0x82 }, { STB0899_GPIO04CFG, 0x82 }, { STB0899_GPIO05CFG, 0x82 }, { STB0899_GPIO06CFG, 0x82 }, { STB0899_GPIO07CFG, 0x82 }, { STB0899_GPIO08CFG, 0x82 }, { STB0899_GPIO09CFG, 0x82 }, { STB0899_GPIO10CFG, 0x82 }, { STB0899_GPIO11CFG, 0x82 }, { STB0899_GPIO12CFG, 0x82 }, { STB0899_GPIO13CFG, 0x82 }, { STB0899_GPIO14CFG, 0x82 }, { STB0899_GPIO15CFG, 0x82 }, { STB0899_GPIO16CFG, 0x82 }, { STB0899_GPIO17CFG, 0x82 }, { STB0899_GPIO18CFG, 0x82 }, { STB0899_GPIO19CFG, 0x82 }, { STB0899_GPIO20CFG, 0x82 }, { STB0899_SDATCFG, 0xb8 }, { STB0899_SCLTCFG, 0xba }, { STB0899_AGCRFCFG, 0x1c }, /* 0x11 DVB-S; 0x1c DVB-S2 (1c, rjkm) */ { STB0899_GPIO22, 0x82 }, { STB0899_GPIO21, 0x91 }, { STB0899_DIRCLKCFG, 0x82 }, { STB0899_CLKOUT27CFG, 0x7e }, { STB0899_STDBYCFG, 0x82 }, { STB0899_CS0CFG, 0x82 }, { STB0899_CS1CFG, 0x82 }, { STB0899_DISEQCOCFG, 0x20 }, { STB0899_NCOARSE, 0x15 }, /* 0x15 27Mhz, F/3 198MHz, F/6 108MHz */ { STB0899_SYNTCTRL, 0x00 }, /* 0x00 CLKI, 0x02 XTALI */ { STB0899_FILTCTRL, 0x00 }, { STB0899_SYSCTRL, 0x00 }, { STB0899_STOPCLK1, 0x20 }, /* orig: 0x00 budget-ci: 0x20 */ { STB0899_STOPCLK2, 0x00 }, { STB0899_INTBUFCTRL, 0x0a }, { STB0899_AGC2I1, 0x00 }, { STB0899_AGC2I2, 0x00 }, { STB0899_AGCIQIN, 0x00 }, { STB0899_TSTRES, 0x40 }, /* rjkm */ { 0xffff, 0xff }, }; static const struct stb0899_s1_reg pctv452e_init_s1_demod[] = { { STB0899_DEMOD, 0x00 }, { STB0899_RCOMPC, 0xc9 }, { STB0899_AGC1CN, 0x01 }, { STB0899_AGC1REF, 0x10 }, { STB0899_RTC, 0x23 }, { STB0899_TMGCFG, 0x4e }, { STB0899_AGC2REF, 0x34 }, { STB0899_TLSR, 0x84 }, { STB0899_CFD, 0xf7 }, { STB0899_ACLC, 0x87 }, { STB0899_BCLC, 0x94 }, { STB0899_EQON, 0x41 }, { STB0899_LDT, 0xf1 }, { STB0899_LDT2, 0xe3 }, { STB0899_EQUALREF, 0xb4 }, { STB0899_TMGRAMP, 0x10 }, { STB0899_TMGTHD, 0x30 }, { STB0899_IDCCOMP, 0xfd }, { STB0899_QDCCOMP, 0xff }, { STB0899_POWERI, 0x0c }, { STB0899_POWERQ, 0x0f }, { STB0899_RCOMP, 0x6c }, { STB0899_AGCIQIN, 0x80 }, { STB0899_AGC2I1, 0x06 }, { STB0899_AGC2I2, 0x00 }, { STB0899_TLIR, 0x30 }, { STB0899_RTF, 0x7f }, { STB0899_DSTATUS, 0x00 }, { STB0899_LDI, 0xbc }, { STB0899_CFRM, 0xea }, { STB0899_CFRL, 0x31 }, { STB0899_NIRM, 0x2b }, { STB0899_NIRL, 0x80 }, { STB0899_ISYMB, 0x1d }, { STB0899_QSYMB, 0xa6 }, { STB0899_SFRH, 0x2f }, { STB0899_SFRM, 0x68 }, { STB0899_SFRL, 0x40 }, { STB0899_SFRUPH, 0x2f }, { STB0899_SFRUPM, 0x68 }, { STB0899_SFRUPL, 0x40 }, { STB0899_EQUAI1, 0x02 }, { STB0899_EQUAQ1, 0xff }, { STB0899_EQUAI2, 0x04 }, { STB0899_EQUAQ2, 0x05 }, { STB0899_EQUAI3, 0x02 }, { STB0899_EQUAQ3, 0xfd }, { STB0899_EQUAI4, 0x03 }, { STB0899_EQUAQ4, 0x07 }, { STB0899_EQUAI5, 0x08 }, { STB0899_EQUAQ5, 0xf5 }, { STB0899_DSTATUS2, 0x00 }, { STB0899_VSTATUS, 0x00 }, { STB0899_VERROR, 0x86 }, { STB0899_IQSWAP, 0x2a }, { STB0899_ECNT1M, 0x00 }, { STB0899_ECNT1L, 0x00 }, { STB0899_ECNT2M, 0x00 }, { STB0899_ECNT2L, 0x00 }, { STB0899_ECNT3M, 0x0a }, { STB0899_ECNT3L, 0xad }, { STB0899_FECAUTO1, 0x06 }, { STB0899_FECM, 0x01 }, { STB0899_VTH12, 0xb0 }, { STB0899_VTH23, 0x7a }, { STB0899_VTH34, 0x58 }, { STB0899_VTH56, 0x38 }, { STB0899_VTH67, 0x34 }, { STB0899_VTH78, 0x24 }, { STB0899_PRVIT, 0xff }, { STB0899_VITSYNC, 0x19 }, { STB0899_RSULC, 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC, 0x42 }, { STB0899_RSLLC, 0x41 }, { STB0899_TSLPL, 0x12 }, { STB0899_TSCFGH, 0x0c }, { STB0899_TSCFGM, 0x00 }, { STB0899_TSCFGL, 0x00 }, { STB0899_TSOUT, 0x69 }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL, 0x00 }, { STB0899_TSINHDELH, 0x02 }, { STB0899_TSINHDELM, 0x00 }, { STB0899_TSINHDELL, 0x00 }, { STB0899_TSLLSTKM, 0x1b }, { STB0899_TSLLSTKL, 0xb3 }, { STB0899_TSULSTKM, 0x00 }, { STB0899_TSULSTKL, 0x00 }, { STB0899_PCKLENUL, 0xbc }, { STB0899_PCKLENLL, 0xcc }, { STB0899_RSPCKLEN, 0xbd }, { STB0899_TSSTATUS, 0x90 }, { STB0899_ERRCTRL1, 0xb6 }, { STB0899_ERRCTRL2, 0x95 }, { STB0899_ERRCTRL3, 0x8d }, { STB0899_DMONMSK1, 0x27 }, { STB0899_DMONMSK0, 0x03 }, { STB0899_DEMAPVIT, 0x5c }, { STB0899_PLPARM, 0x19 }, { STB0899_PDELCTRL, 0x48 }, { STB0899_PDELCTRL2, 0x00 }, { STB0899_BBHCTRL1, 0x00 }, { STB0899_BBHCTRL2, 0x00 }, { STB0899_HYSTTHRESH, 0x77 }, { STB0899_MATCSTM, 0x00 }, { STB0899_MATCSTL, 0x00 }, { STB0899_UPLCSTM, 0x00 }, { STB0899_UPLCSTL, 0x00 }, { STB0899_DFLCSTM, 0x00 }, { STB0899_DFLCSTL, 0x00 }, { STB0899_SYNCCST, 0x00 }, { STB0899_SYNCDCSTM, 0x00 }, { STB0899_SYNCDCSTL, 0x00 }, { STB0899_ISI_ENTRY, 0x00 }, { STB0899_ISI_BIT_EN, 0x00 }, { STB0899_MATSTRM, 0xf0 }, { STB0899_MATSTRL, 0x02 }, { STB0899_UPLSTRM, 0x45 }, { STB0899_UPLSTRL, 0x60 }, { STB0899_DFLSTRM, 0xe3 }, { STB0899_DFLSTRL, 0x00 }, { STB0899_SYNCSTR, 0x47 }, { STB0899_SYNCDSTRM, 0x05 }, { STB0899_SYNCDSTRL, 0x18 }, { STB0899_CFGPDELSTATUS1, 0x19 }, { STB0899_CFGPDELSTATUS2, 0x2b }, { STB0899_BBFERRORM, 0x00 }, { STB0899_BBFERRORL, 0x01 }, { STB0899_UPKTERRORM, 0x00 }, { STB0899_UPKTERRORL, 0x00 }, { 0xffff, 0xff }, }; static struct stb0899_config stb0899_config = { .init_dev = pctv452e_init_dev, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = pctv452e_init_s1_demod, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .demod_address = I2C_ADDR_STB0899, /* I2C Address */ .block_sync_mode = STB0899_SYNC_FORCED, /* ? */ .xtal_freq = 27000000, /* Assume Hz ? */ .inversion = IQ_SWAP_ON, /* ? */ .lo_clk = 76500000, .hi_clk = 99000000, .ts_output_mode = 0, /* Use parallel mode */ .clock_polarity = 0, .data_clk_parity = 0, .fec_mode = 0, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = stb6100_get_frequency, .tuner_set_frequency = stb6100_set_frequency, .tuner_set_bandwidth = stb6100_set_bandwidth, .tuner_get_bandwidth = stb6100_get_bandwidth, .tuner_set_rfsiggain = NULL, /* helper for switching LED green/orange */ .postproc = pctv45e_postproc }; static struct stb6100_config stb6100_config = { .tuner_address = I2C_ADDR_STB6100, .refclock = 27000000 }; static struct i2c_algorithm pctv452e_i2c_algo = { .master_xfer = pctv452e_i2c_xfer, .functionality = pctv452e_i2c_func }; static int pctv452e_frontend_attach(struct dvb_usb_adapter *a) { struct usb_device_id *id; a->fe_adap[0].fe = dvb_attach(stb0899_attach, &stb0899_config, &a->dev->i2c_adap); if (!a->fe_adap[0].fe) return -ENODEV; if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe, &a->dev->i2c_adap)) == 0) err("Cannot attach lnbp22\n"); id = a->dev->desc->warm_ids[0]; if (USB_VID_TECHNOTREND == id->idVendor && USB_PID_TECHNOTREND_CONNECT_S2_3650_CI == id->idProduct) /* Error ignored. */ tt3650_ci_init(a); return 0; } static int pctv452e_tuner_attach(struct dvb_usb_adapter *a) { if (!a->fe_adap[0].fe) return -ENODEV; if (dvb_attach(stb6100_attach, a->fe_adap[0].fe, &stb6100_config, &a->dev->i2c_adap) == 0) { err("%s failed\n", __func__); return -ENODEV; } return 0; } static struct usb_device_id pctv452e_usb_table[] = { {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_452E)}, {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_3600)}, {USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_S2_3650_CI)}, {} }; MODULE_DEVICE_TABLE(usb, pctv452e_usb_table); static struct dvb_usb_device_properties pctv452e_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */ .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct pctv452e_state), .power_ctrl = pctv452e_power_ctrl, .rc.core = { .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .allowed_protos = RC_TYPE_UNKNOWN, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .frontend_attach = pctv452e_frontend_attach, .tuner_attach = pctv452e_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 4, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1 } } }, } }, } }, .i2c_algo = &pctv452e_i2c_algo, .generic_bulk_ctrl_endpoint = 1, /* allow generice rw function */ .num_device_descs = 1, .devices = { { .name = "PCTV HDTV USB", .cold_ids = { NULL, NULL }, /* this is a warm only device */ .warm_ids = { &pctv452e_usb_table[0], NULL } }, { 0 }, } }; static struct dvb_usb_device_properties tt_connect_s2_3600_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, /* more ? */ .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct pctv452e_state), .power_ctrl = pctv452e_power_ctrl, .read_mac_address = pctv452e_read_mac_address, .rc.core = { .rc_codes = RC_MAP_TT_1500, .allowed_protos = RC_TYPE_UNKNOWN, .rc_query = pctv452e_rc_query, .rc_interval = 100, }, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .frontend_attach = pctv452e_frontend_attach, .tuner_attach = pctv452e_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 7, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1 } } }, } }, } }, .i2c_algo = &pctv452e_i2c_algo, .generic_bulk_ctrl_endpoint = 1, /* allow generic rw function*/ .num_device_descs = 2, .devices = { { .name = "Technotrend TT Connect S2-3600", .cold_ids = { NULL, NULL }, /* this is a warm only device */ .warm_ids = { &pctv452e_usb_table[1], NULL } }, { .name = "Technotrend TT Connect S2-3650-CI", .cold_ids = { NULL, NULL }, .warm_ids = { &pctv452e_usb_table[2], NULL } }, { 0 }, } }; static void pctv452e_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); tt3650_ci_uninit(d); dvb_usb_device_exit(intf); } static int pctv452e_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &pctv452e_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &tt_connect_s2_3600_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static struct usb_driver pctv452e_usb_driver = { .name = "pctv452e", .probe = pctv452e_usb_probe, .disconnect = pctv452e_usb_disconnect, .id_table = pctv452e_usb_table, }; module_usb_driver(pctv452e_usb_driver); MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>"); MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>"); MODULE_AUTHOR("Michael H. Schimek <mschimek@gmx.at>"); MODULE_DESCRIPTION("Pinnacle PCTV HDTV USB DVB / TT connect S2-3600 Driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_bn_omap
drivers/usb/host/uhci-debug.c
8121
15306
/* * UHCI-specific debugging code. Invaluable when something * goes wrong, but don't get in my face. * * Kernel visible pointers are surrounded in []s and bus * visible pointers are surrounded in ()s * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2001 Johannes Erdfelt */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <asm/io.h> #include "uhci-hcd.h" static struct dentry *uhci_debugfs_root; #ifdef DEBUG /* Handle REALLY large printks so we don't overflow buffers */ static void lprintk(char *buf) { char *p; /* Just write one line at a time */ while (buf) { p = strchr(buf, '\n'); if (p) *p = 0; printk(KERN_DEBUG "%s\n", buf); buf = p; if (buf) buf++; } } static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf, int len, int space) { char *out = buf; char *spid; u32 status, token; /* Try to make sure there's enough memory */ if (len < 160) return 0; status = td_status(uhci, td); out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, hc32_to_cpu(uhci, td->link)); out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ", ((status >> 27) & 3), (status & TD_CTRL_SPD) ? "SPD " : "", (status & TD_CTRL_LS) ? "LS " : "", (status & TD_CTRL_IOC) ? "IOC " : "", (status & TD_CTRL_ACTIVE) ? "Active " : "", (status & TD_CTRL_STALLED) ? "Stalled " : "", (status & TD_CTRL_DBUFERR) ? "DataBufErr " : "", (status & TD_CTRL_BABBLE) ? "Babble " : "", (status & TD_CTRL_NAK) ? "NAK " : "", (status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "", (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "", status & 0x7ff); token = td_token(uhci, td); switch (uhci_packetid(token)) { case USB_PID_SETUP: spid = "SETUP"; break; case USB_PID_OUT: spid = "OUT"; break; case USB_PID_IN: spid = "IN"; break; default: spid = "?"; break; } out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ", token >> 21, ((token >> 19) & 1), (token >> 15) & 15, (token >> 8) & 127, (token & 0xff), spid); out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer)); return out - buf; } static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, int len, int space) { char *out = buf; struct uhci_td *td; int i, nactive, ninactive; char *ptype; if (len < 200) return 0; out += sprintf(out, "urb_priv [%p] ", urbp); out += sprintf(out, "urb [%p] ", urbp->urb); out += sprintf(out, "qh [%p] ", urbp->qh); out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe)); out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT")); switch (usb_pipetype(urbp->urb->pipe)) { case PIPE_ISOCHRONOUS: ptype = "ISO"; break; case PIPE_INTERRUPT: ptype = "INT"; break; case PIPE_BULK: ptype = "BLK"; break; default: case PIPE_CONTROL: ptype = "CTL"; break; } out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : "")); out += sprintf(out, " Actlen=%d%s", urbp->urb->actual_length, (urbp->qh->type == USB_ENDPOINT_XFER_CONTROL ? "-8" : "")); if (urbp->urb->unlinked) out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked); out += sprintf(out, "\n"); i = nactive = ninactive = 0; list_for_each_entry(td, &urbp->td_list, list) { if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC && (++i <= 10 || debug > 2)) { out += sprintf(out, "%*s%d: ", space + 2, "", i); out += uhci_show_td(uhci, td, out, len - (out - buf), 0); } else { if (td_status(uhci, td) & TD_CTRL_ACTIVE) ++nactive; else ++ninactive; } } if (nactive + ninactive > 0) out += sprintf(out, "%*s[skipped %d inactive and %d active " "TDs]\n", space, "", ninactive, nactive); return out - buf; } static int uhci_show_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, char *buf, int len, int space) { char *out = buf; int i, nurbs; __hc32 element = qh_element(qh); char *qtype; /* Try to make sure there's enough memory */ if (len < 80 * 7) return 0; switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break; case USB_ENDPOINT_XFER_INT: qtype = "INT"; break; case USB_ENDPOINT_XFER_BULK: qtype = "BLK"; break; case USB_ENDPOINT_XFER_CONTROL: qtype = "CTL"; break; default: qtype = "Skel" ; break; } out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n", space, "", qh, qtype, hc32_to_cpu(uhci, qh->link), hc32_to_cpu(uhci, element)); if (qh->type == USB_ENDPOINT_XFER_ISOC) out += sprintf(out, "%*s period %d phase %d load %d us, " "frame %x desc [%p]\n", space, "", qh->period, qh->phase, qh->load, qh->iso_frame, qh->iso_packet_desc); else if (qh->type == USB_ENDPOINT_XFER_INT) out += sprintf(out, "%*s period %d phase %d load %d us\n", space, "", qh->period, qh->phase, qh->load); if (element & UHCI_PTR_QH(uhci)) out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); if (element & UHCI_PTR_DEPTH(uhci)) out += sprintf(out, "%*s Depth traverse\n", space, ""); if (element & cpu_to_hc32(uhci, 8)) out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, ""); if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci)))) out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); if (list_empty(&qh->queue)) { out += sprintf(out, "%*s queue is empty\n", space, ""); if (qh == uhci->skel_async_qh) out += uhci_show_td(uhci, uhci->term_td, out, len - (out - buf), 0); } else { struct urb_priv *urbp = list_entry(qh->queue.next, struct urb_priv, node); struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); if (element != LINK_TO_TD(uhci, td)) out += sprintf(out, "%*s Element != First TD\n", space, ""); i = nurbs = 0; list_for_each_entry(urbp, &qh->queue, node) { if (++i <= 10) out += uhci_show_urbp(uhci, urbp, out, len - (out - buf), space + 2); else ++nurbs; } if (nurbs > 0) out += sprintf(out, "%*s Skipped %d URBs\n", space, "", nurbs); } if (qh->dummy_td) { out += sprintf(out, "%*s Dummy TD\n", space, ""); out += uhci_show_td(uhci, qh->dummy_td, out, len - (out - buf), 0); } return out - buf; } static int uhci_show_sc(int port, unsigned short status, char *buf, int len) { char *out = buf; /* Try to make sure there's enough memory */ if (len < 160) return 0; out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s%s%s\n", port, status, (status & USBPORTSC_SUSP) ? " Suspend" : "", (status & USBPORTSC_OCC) ? " OverCurrentChange" : "", (status & USBPORTSC_OC) ? " OverCurrent" : "", (status & USBPORTSC_PR) ? " Reset" : "", (status & USBPORTSC_LSDA) ? " LowSpeed" : "", (status & USBPORTSC_RD) ? " ResumeDetect" : "", (status & USBPORTSC_PEC) ? " EnableChange" : "", (status & USBPORTSC_PE) ? " Enabled" : "", (status & USBPORTSC_CSC) ? " ConnectChange" : "", (status & USBPORTSC_CCS) ? " Connected" : ""); return out - buf; } static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; char *rh_state; /* Try to make sure there's enough memory */ if (len < 60) return 0; switch (uhci->rh_state) { case UHCI_RH_RESET: rh_state = "reset"; break; case UHCI_RH_SUSPENDED: rh_state = "suspended"; break; case UHCI_RH_AUTO_STOPPED: rh_state = "auto-stopped"; break; case UHCI_RH_RESUMING: rh_state = "resuming"; break; case UHCI_RH_SUSPENDING: rh_state = "suspending"; break; case UHCI_RH_RUNNING: rh_state = "running"; break; case UHCI_RH_RUNNING_NODEVS: rh_state = "running, no devs"; break; default: rh_state = "?"; break; } out += sprintf(out, "Root-hub state: %s FSBR: %d\n", rh_state, uhci->fsbr_is_on); return out - buf; } static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; unsigned short usbcmd, usbstat, usbint, usbfrnum; unsigned int flbaseadd; unsigned char sof; unsigned short portsc1, portsc2; /* Try to make sure there's enough memory */ if (len < 80 * 9) return 0; usbcmd = uhci_readw(uhci, 0); usbstat = uhci_readw(uhci, 2); usbint = uhci_readw(uhci, 4); usbfrnum = uhci_readw(uhci, 6); flbaseadd = uhci_readl(uhci, 8); sof = uhci_readb(uhci, 12); portsc1 = uhci_readw(uhci, 16); portsc2 = uhci_readw(uhci, 18); out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n", usbcmd, (usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ", (usbcmd & USBCMD_CF) ? "CF " : "", (usbcmd & USBCMD_SWDBG) ? "SWDBG " : "", (usbcmd & USBCMD_FGR) ? "FGR " : "", (usbcmd & USBCMD_EGSM) ? "EGSM " : "", (usbcmd & USBCMD_GRESET) ? "GRESET " : "", (usbcmd & USBCMD_HCRESET) ? "HCRESET " : "", (usbcmd & USBCMD_RS) ? "RS " : ""); out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n", usbstat, (usbstat & USBSTS_HCH) ? "HCHalted " : "", (usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "", (usbstat & USBSTS_HSE) ? "HostSystemError " : "", (usbstat & USBSTS_RD) ? "ResumeDetect " : "", (usbstat & USBSTS_ERROR) ? "USBError " : "", (usbstat & USBSTS_USBINT) ? "USBINT " : ""); out += sprintf(out, " usbint = %04x\n", usbint); out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1, 0xfff & (4*(unsigned int)usbfrnum)); out += sprintf(out, " flbaseadd = %08x\n", flbaseadd); out += sprintf(out, " sof = %02x\n", sof); out += uhci_show_sc(1, portsc1, out, len - (out - buf)); out += uhci_show_sc(2, portsc2, out, len - (out - buf)); out += sprintf(out, "Most recent frame: %x (%d) " "Last ISO frame: %x (%d)\n", uhci->frame_number, uhci->frame_number & 1023, uhci->last_iso_frame, uhci->last_iso_frame & 1023); return out - buf; } static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { char *out = buf; int i, j; struct uhci_qh *qh; struct uhci_td *td; struct list_head *tmp, *head; int nframes, nerrs; __hc32 link; __hc32 fsbr_link; static const char * const qh_names[] = { "unlink", "iso", "int128", "int64", "int32", "int16", "int8", "int4", "int2", "async", "term" }; out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); out += sprintf(out, "HC status\n"); out += uhci_show_status(uhci, out, len - (out - buf)); out += sprintf(out, "Periodic load table\n"); for (i = 0; i < MAX_PHASE; ++i) { out += sprintf(out, "\t%d", uhci->load[i]); if (i % 8 == 7) *out++ = '\n'; } out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n", uhci->total_load, uhci_to_hcd(uhci)->self.bandwidth_int_reqs, uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs); if (debug <= 1) return out - buf; out += sprintf(out, "Frame List\n"); nframes = 10; nerrs = 0; for (i = 0; i < UHCI_NUMFRAMES; ++i) { __hc32 qh_dma; j = 0; td = uhci->frame_cpu[i]; link = uhci->frame[i]; if (!td) goto check_link; if (nframes > 0) { out += sprintf(out, "- Frame %d -> (%08x)\n", i, hc32_to_cpu(uhci, link)); j = 1; } head = &td->fl_list; tmp = head; do { td = list_entry(tmp, struct uhci_td, fl_list); tmp = tmp->next; if (link != LINK_TO_TD(uhci, td)) { if (nframes > 0) out += sprintf(out, " link does " "not match list entry!\n"); else ++nerrs; } if (nframes > 0) out += uhci_show_td(uhci, td, out, len - (out - buf), 4); link = td->link; } while (tmp != head); check_link: qh_dma = uhci_frame_skel_link(uhci, i); if (link != qh_dma) { if (nframes > 0) { if (!j) { out += sprintf(out, "- Frame %d -> (%08x)\n", i, hc32_to_cpu(uhci, link)); j = 1; } out += sprintf(out, " link does not match " "QH (%08x)!\n", hc32_to_cpu(uhci, qh_dma)); } else ++nerrs; } nframes -= j; } if (nerrs > 0) out += sprintf(out, "Skipped %d bad links\n", nerrs); out += sprintf(out, "Skeleton QHs\n"); fsbr_link = 0; for (i = 0; i < UHCI_NUM_SKELQH; ++i) { int cnt = 0; qh = uhci->skelqh[i]; out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \ out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4); /* Last QH is the Terminating QH, it's different */ if (i == SKEL_TERM) { if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); link = fsbr_link; if (!link) link = LINK_TO_QH(uhci, uhci->skel_term_qh); goto check_qh_link; } head = &qh->node; tmp = head->next; while (tmp != head) { qh = list_entry(tmp, struct uhci_qh, node); tmp = tmp->next; if (++cnt <= 10) out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4); if (!fsbr_link && qh->skel >= SKEL_FSBR) fsbr_link = LINK_TO_QH(uhci, qh); } if ((cnt -= 10) > 0) out += sprintf(out, " Skipped %d QHs\n", cnt); link = UHCI_PTR_TERM(uhci); if (i <= SKEL_ISO) ; else if (i < SKEL_ASYNC) link = LINK_TO_QH(uhci, uhci->skel_async_qh); else if (!uhci->fsbr_is_on) ; else link = LINK_TO_QH(uhci, uhci->skel_term_qh); check_qh_link: if (qh->link != link) out += sprintf(out, " last QH not linked to next skeleton!\n"); } return out - buf; } #ifdef CONFIG_DEBUG_FS #define MAX_OUTPUT (64 * 1024) struct uhci_debug { int size; char *data; }; static int uhci_debug_open(struct inode *inode, struct file *file) { struct uhci_hcd *uhci = inode->i_private; struct uhci_debug *up; unsigned long flags; up = kmalloc(sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL); if (!up->data) { kfree(up); return -ENOMEM; } up->size = 0; spin_lock_irqsave(&uhci->lock, flags); if (uhci->is_initialized) up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT); spin_unlock_irqrestore(&uhci->lock, flags); file->private_data = up; return 0; } static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence) { struct uhci_debug *up; loff_t new = -1; up = file->private_data; /* XXX: atomic 64bit seek access, but that needs to be fixed in the VFS */ switch (whence) { case 0: new = off; break; case 1: new = file->f_pos + off; break; } if (new < 0 || new > up->size) return -EINVAL; return (file->f_pos = new); } static ssize_t uhci_debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct uhci_debug *up = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size); } static int uhci_debug_release(struct inode *inode, struct file *file) { struct uhci_debug *up = file->private_data; kfree(up->data); kfree(up); return 0; } static const struct file_operations uhci_debug_operations = { .owner = THIS_MODULE, .open = uhci_debug_open, .llseek = uhci_debug_lseek, .read = uhci_debug_read, .release = uhci_debug_release, }; #define UHCI_DEBUG_OPS #endif /* CONFIG_DEBUG_FS */ #else /* DEBUG */ static inline void lprintk(char *buf) {} static inline int uhci_show_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, char *buf, int len, int space) { return 0; } static inline int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { return 0; } #endif
gpl-2.0
qubir/PhoenixA20_linux_sourcecode
drivers/mfd/wm8350-gpio.c
10425
6211
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0