repo_name
string
path
string
copies
string
size
string
content
string
license
string
Ateeq72/weekly-kernel
drivers/usb/gadget/dbgp.c
2370
8517
/* * dbgp.c -- EHCI Debug Port device gadget * * Copyright (C) 2010 Stephane Duverger * * Released under the GPLv2. * */ /* verbose messages */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> /* See comments in "zero.c" */ #include "epautoconf.c" #ifdef CONFIG_USB_G_DBGP_SERIAL #include "u_serial.c" #endif #define DRIVER_VENDOR_ID 0x0525 /* NetChip */ #define DRIVER_PRODUCT_ID 0xc0de /* undefined */ #define USB_DEBUG_MAX_PACKET_SIZE 8 #define DBGP_REQ_EP0_LEN 128 #define DBGP_REQ_LEN 512 static struct dbgp { struct usb_gadget *gadget; struct usb_request *req; struct usb_ep *i_ep; struct usb_ep *o_ep; #ifdef CONFIG_USB_G_DBGP_SERIAL struct gserial *serial; #endif } dbgp; static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_VENDOR_SPEC, .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID), .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID), .bNumConfigurations = 1, }; static struct usb_debug_descriptor dbg_desc = { .bLength = sizeof dbg_desc, .bDescriptorType = USB_DT_DEBUG, }; static struct usb_endpoint_descriptor i_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .bEndpointAddress = USB_DIR_IN, }; static struct usb_endpoint_descriptor o_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .bEndpointAddress = USB_DIR_OUT, }; #ifdef CONFIG_USB_G_DBGP_PRINTK static int dbgp_consume(char *buf, unsigned len) { char c; if (!len) return 0; c = buf[len-1]; if (c != 0) buf[len-1] = 0; printk(KERN_NOTICE "%s%c", buf, c); return 0; } static void __disable_ep(struct usb_ep *ep) { if (ep && ep->driver_data == dbgp.gadget) { usb_ep_disable(ep); ep->driver_data = NULL; } } static void dbgp_disable_ep(void) { __disable_ep(dbgp.i_ep); __disable_ep(dbgp.o_ep); } static void dbgp_complete(struct usb_ep *ep, struct usb_request *req) { int stp; int err = 0; int status = req->status; if (ep == dbgp.i_ep) { stp = 1; goto fail; } if (status != 0) { stp = 2; goto release_req; } dbgp_consume(req->buf, req->actual); req->length = DBGP_REQ_LEN; err = usb_ep_queue(ep, req, GFP_ATOMIC); if (err < 0) { stp = 3; goto release_req; } return; release_req: kfree(req->buf); usb_ep_free_request(dbgp.o_ep, req); dbgp_disable_ep(); fail: dev_dbg(&dbgp.gadget->dev, "complete: failure (%d:%d) ==> %d\n", stp, err, status); } static int dbgp_enable_ep_req(struct usb_ep *ep) { int err, stp; struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) { err = -ENOMEM; stp = 1; goto fail_1; } req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL); if (!req->buf) { err = -ENOMEM; stp = 2; goto fail_2; } req->complete = dbgp_complete; req->length = DBGP_REQ_LEN; err = usb_ep_queue(ep, req, GFP_ATOMIC); if (err < 0) { stp = 3; goto fail_3; } return 0; fail_3: kfree(req->buf); fail_2: usb_ep_free_request(dbgp.o_ep, req); fail_1: dev_dbg(&dbgp.gadget->dev, "enable ep req: failure (%d:%d)\n", stp, err); return err; } static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc) { int err = usb_ep_enable(ep, desc); ep->driver_data = dbgp.gadget; return err; } static int dbgp_enable_ep(void) { int err, stp; err = __enable_ep(dbgp.i_ep, &i_desc); if (err < 0) { stp = 1; goto fail_1; } err = __enable_ep(dbgp.o_ep, &o_desc); if (err < 0) { stp = 2; goto fail_2; } err = dbgp_enable_ep_req(dbgp.o_ep); if (err < 0) { stp = 3; goto fail_3; } return 0; fail_3: __disable_ep(dbgp.o_ep); fail_2: __disable_ep(dbgp.i_ep); fail_1: dev_dbg(&dbgp.gadget->dev, "enable ep: failure (%d:%d)\n", stp, err); return err; } #endif static void dbgp_disconnect(struct usb_gadget *gadget) { #ifdef CONFIG_USB_G_DBGP_PRINTK dbgp_disable_ep(); #else gserial_disconnect(dbgp.serial); #endif } static void dbgp_unbind(struct usb_gadget *gadget) { #ifdef CONFIG_USB_G_DBGP_SERIAL kfree(dbgp.serial); #endif if (dbgp.req) { kfree(dbgp.req->buf); usb_ep_free_request(gadget->ep0, dbgp.req); } gadget->ep0->driver_data = NULL; } static int __init dbgp_configure_endpoints(struct usb_gadget *gadget) { int stp; usb_ep_autoconfig_reset(gadget); dbgp.i_ep = usb_ep_autoconfig(gadget, &i_desc); if (!dbgp.i_ep) { stp = 1; goto fail_1; } dbgp.i_ep->driver_data = gadget; i_desc.wMaxPacketSize = __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE); dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc); if (!dbgp.o_ep) { dbgp.i_ep->driver_data = NULL; stp = 2; goto fail_2; } dbgp.o_ep->driver_data = gadget; o_desc.wMaxPacketSize = __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE); dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress; dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress; #ifdef CONFIG_USB_G_DBGP_SERIAL dbgp.serial->in = dbgp.i_ep; dbgp.serial->out = dbgp.o_ep; dbgp.serial->in_desc = &i_desc; dbgp.serial->out_desc = &o_desc; if (gserial_setup(gadget, 1) < 0) { stp = 3; goto fail_3; } return 0; fail_3: dbgp.o_ep->driver_data = NULL; #else return 0; #endif fail_2: dbgp.i_ep->driver_data = NULL; fail_1: dev_dbg(&dbgp.gadget->dev, "ep config: failure (%d)\n", stp); return -ENODEV; } static int __init dbgp_bind(struct usb_gadget *gadget) { int err, stp; dbgp.gadget = gadget; dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); if (!dbgp.req) { err = -ENOMEM; stp = 1; goto fail; } dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL); if (!dbgp.req->buf) { err = -ENOMEM; stp = 2; goto fail; } dbgp.req->length = DBGP_REQ_EP0_LEN; gadget->ep0->driver_data = gadget; device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; #ifdef CONFIG_USB_G_DBGP_SERIAL dbgp.serial = kzalloc(sizeof(struct gserial), GFP_KERNEL); if (!dbgp.serial) { stp = 3; err = -ENOMEM; goto fail; } #endif err = dbgp_configure_endpoints(gadget); if (err < 0) { stp = 4; goto fail; } dev_dbg(&dbgp.gadget->dev, "bind: success\n"); return 0; fail: dev_dbg(&gadget->dev, "bind: failure (%d:%d)\n", stp, err); dbgp_unbind(gadget); return err; } static void dbgp_setup_complete(struct usb_ep *ep, struct usb_request *req) { dev_dbg(&dbgp.gadget->dev, "setup complete: %d, %d/%d\n", req->status, req->actual, req->length); } static int dbgp_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_request *req = dbgp.req; u8 request = ctrl->bRequest; u16 value = le16_to_cpu(ctrl->wValue); u16 length = le16_to_cpu(ctrl->wLength); int err = -EOPNOTSUPP; void *data = NULL; u16 len = 0; gadget->ep0->driver_data = gadget; if (request == USB_REQ_GET_DESCRIPTOR) { switch (value>>8) { case USB_DT_DEVICE: dev_dbg(&dbgp.gadget->dev, "setup: desc device\n"); len = sizeof device_desc; data = &device_desc; break; case USB_DT_DEBUG: dev_dbg(&dbgp.gadget->dev, "setup: desc debug\n"); len = sizeof dbg_desc; data = &dbg_desc; break; default: goto fail; } err = 0; } else if (request == USB_REQ_SET_FEATURE && value == USB_DEVICE_DEBUG_MODE) { dev_dbg(&dbgp.gadget->dev, "setup: feat debug\n"); #ifdef CONFIG_USB_G_DBGP_PRINTK err = dbgp_enable_ep(); #else err = gserial_connect(dbgp.serial, 0); #endif if (err < 0) goto fail; } else goto fail; req->length = min(length, len); req->zero = len < req->length; if (data && req->length) memcpy(req->buf, data, req->length); req->complete = dbgp_setup_complete; return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); fail: dev_dbg(&dbgp.gadget->dev, "setup: failure req %x v %x\n", request, value); return err; } static struct usb_gadget_driver dbgp_driver = { .function = "dbgp", .speed = USB_SPEED_HIGH, .unbind = dbgp_unbind, .setup = dbgp_setup, .disconnect = dbgp_disconnect, .driver = { .owner = THIS_MODULE, .name = "dbgp" }, }; static int __init dbgp_init(void) { return usb_gadget_probe_driver(&dbgp_driver, dbgp_bind); } static void __exit dbgp_exit(void) { usb_gadget_unregister_driver(&dbgp_driver); #ifdef CONFIG_USB_G_DBGP_SERIAL gserial_cleanup(); #endif } MODULE_AUTHOR("Stephane Duverger"); MODULE_LICENSE("GPL"); module_init(dbgp_init); module_exit(dbgp_exit);
gpl-2.0
FrancescoCG/CrazySuperKernel-TW-MM-KLTE
fs/ext3/super.c
3138
85488
/* * linux/fs/ext3/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/exportfs.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include "ext3.h" #include "xattr.h" #include "acl.h" #include "namei.h" #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA #endif static int ext3_load_journal(struct super_block *, struct ext3_super_block *, unsigned long journal_devnum); static int ext3_create_journal(struct super_block *, struct ext3_super_block *, unsigned int); static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync); static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es); static void ext3_clear_journal_err(struct super_block * sb, struct ext3_super_block * es); static int ext3_sync_fs(struct super_block *sb, int wait); static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]); static int ext3_remount (struct super_block * sb, int * flags, char * data); static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext3_unfreeze(struct super_block *sb); static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. * * The only special thing we need to do here is to make sure that all * journal_end calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ journal = EXT3_SB(sb)->s_journal; if (is_journal_aborted(journal)) { ext3_abort(sb, __func__, "Detected aborted journal"); return ERR_PTR(-EROFS); } return journal_start(journal, nblocks); } /* * The only special thing we need to do here is to make sure that all * journal_stop calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. */ int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; int err; int rc; sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = journal_stop(handle); if (!err) err = rc; if (err) __ext3_std_error(sb, where, err); return err; } void ext3_journal_abort_handle(const char *caller, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext3_decode_error(NULL, err, nbuf); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", caller, errstr, err_fn); journal_abort_handle(handle); } void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext3, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext3_handle_error(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); if (sb->s_flags & MS_RDONLY) return; if (!test_opt (sb, ERRORS_CONT)) { journal_t *journal = EXT3_SB(sb)->s_journal; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (journal) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } void ext3_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); ext3_handle_error(sb); } static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext3_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext3_std_error (struct super_block * sb, const char * function, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext3_decode_error(sb, errno, nbuf); ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } /* * ext3_abort is a much stronger failure handler than ext3_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void ext3_abort(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (EXT3_SB(sb)->s_journal) journal_abort(EXT3_SB(sb)->s_journal, -EIO); } void ext3_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ext3_update_dynamic_rev(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; ext3_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static int ext3_blkdev_put(struct block_device *bdev) { return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext3_blkdev_remove(struct ext3_sb_info *sbi) { struct block_device *bdev; int ret = -ENODEV; bdev = sbi->journal_bdev; if (bdev) { ret = ext3_blkdev_put(bdev); sbi->journal_bdev = NULL; } return ret; } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext3_put_super (struct super_block * sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int i, err; dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext3_xattr_put_super(sb); err = journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext3_abort(sb, __func__, "Couldn't clean up the journal"); if (!(sb->s_flags & MS_RDONLY)) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); BUFFER_TRACE(sbi->s_sbh, "marking dirty"); mark_buffer_dirty(sbi->s_sbh); ext3_commit_super(sb, es, 1); } for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; atomic_set(&ei->i_datasync_tid, 0); atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } static int ext3_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext3_drop_inode(inode, drop); return drop; } static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } static void ext3_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT3_I(inode)->i_orphan))) { printk("EXT3 Inode %p: orphan list check failed!\n", EXT3_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT3_I(inode), sizeof(struct ext3_inode_info), false); dump_stack(); } call_rcu(&inode->i_rcu, ext3_i_callback); } static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(ext3_inode_cachep); } static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } static char *data_mode_string(unsigned long mode) { switch (mode) { case EXT3_MOUNT_JOURNAL_DATA: return "journal"; case EXT3_MOUNT_ORDERED_DATA: return "ordered"; case EXT3_MOUNT_WRITEBACK_DATA: return "writeback"; } return "unknown"; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext3_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; unsigned long def_mount_opts; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (sbi->s_resuid != EXT3_DEF_RESUID || le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) { seq_printf(seq, ",resuid=%u", sbi->s_resuid); } if (sbi->s_resgid != EXT3_DEF_RESGID || le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { seq_printf(seq, ",resgid=%u", sbi->s_resgid); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT3_ERRORS_PANIC || def_errors == EXT3_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT3_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT3_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); if (sbi->s_commit_interval) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } /* * Always display barrier state so it's clear what the status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); ext3_show_quota_options(seq, sb); return 0; } static struct inode *ext3_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext3_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext3_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT3_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext3_quota_operations = { .write_dquot = ext3_write_dquot, .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, .write_info = ext3_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext3_sops = { .alloc_inode = ext3_alloc_inode, .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, .freeze_fs = ext3_freeze, .unfreeze_fs = ext3_unfreeze, .statfs = ext3_statfs, .remount_fs = ext3_remount, .show_options = ext3_show_options, #ifdef CONFIG_QUOTA .quota_read = ext3_quota_read, .quota_write = ext3_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext3_export_ops = { .fh_to_dentry = ext3_fh_to_dentry, .fh_to_parent = ext3_fh_to_parent, .get_parent = ext3_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "nocheck"}, {Opt_nocheck, "check=none"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_inum, "journal=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, {Opt_err, NULL}, }; static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext3_sb_info *sbi = EXT3_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext3_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype] && strcmp(sbi->s_qf_names[qtype], qname)) { ext3_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; if (strchr(sbi->s_qf_names[qtype], '/')) { ext3_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 0; } set_opt(sbi->s_mount_opt, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } /* * The space will be released later when all options are confirmed * to be correct */ sbi->s_qf_names[qtype] = NULL; return 1; } #endif static int parse_options (char *options, struct super_block *sb, unsigned int *inum, unsigned long *journal_devnum, ext3_fsblk_t *n_blocks_count, int is_remount) { struct ext3_sb_info *sbi = EXT3_SB(sb); char * p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = 0; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; sbi->s_resuid = option; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; sbi->s_resgid = option; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT3_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext3_msg(sb, KERN_INFO, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext3_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *inum = option; break; case Opt_journal_dev: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_noload: set_opt (sbi->s_mount_opt, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_data_journal: data_opt = EXT3_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT3_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT3_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (test_opt(sb, DATA_FLAGS) == data_opt) break; ext3_msg(sb, KERN_ERR, "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " "try to remount it in data=%s mode.", data_mode_string(test_opt(sb, DATA_FLAGS)), data_mode_string(data_opt)); return 0; } else { clear_opt(sbi->s_mount_opt, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext3_msg(sb, KERN_ERR, "error: cannot change " "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); clear_opt(sbi->s_mount_opt, USRQUOTA); clear_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext3_msg(sb, KERN_ERR, "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext3_msg(sb, KERN_ERR, "error: journaled quota options not " "supported."); break; case Opt_noquota: break; #endif case Opt_abort: set_opt(sbi->s_mount_opt, ABORT); break; case Opt_nobarrier: clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sbi->s_mount_opt, BARRIER); else clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext3_msg(sb, KERN_ERR, "error: resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated nobh option"); break; case Opt_bh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated bh option"); break; default: ext3_msg(sb, KERN_ERR, "error: unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sbi->s_mount_opt, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sbi->s_mount_opt, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext3_msg(sb, KERN_ERR, "error: old and new quota " "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " "enabled."); return 0; } } #endif return 1; } static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int read_only) { struct ext3_sb_info *sbi = EXT3_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { ext3_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= le16_to_cpu(es->s_max_mnt_count)) ext3_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext3_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for a plain journaled filesystem we can keep it set as valid forever! :) */ es->s_state &= cpu_to_le16(~EXT3_VALID_FS); #endif if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } cleancache_init_fs(sb); return res; } /* Called at mount-time, super-block is locked */ static int ext3_check_descriptors(struct super_block *sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); int i; ext3_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i); ext3_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb)); return 1; } /* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext3_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext3_orphan_cleanup (struct super_block * sb, struct ext3_super_block * es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, skipping orphan cleanup."); return; } /* Check if feature set allows readwrite operations */ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) ext3_msg(sb, KERN_ERR, "error: cannot turn on journaled " "quota: %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %Ld bytes\n", inode->i_ino, inode->i_size); ext3_truncate(inode); nr_truncates++; } else { printk(KERN_DEBUG "%s: deleting unreferenced inode %lu\n", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext3_max_size(int bits) { loff_t res = EXT3_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); } static int ext3_fill_super (struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; int blocksize; int hblock; int db_count; int i; int needs_recovery; int ret = -EINVAL; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_mount_opt = 0; sbi->s_resuid = EXT3_DEF_RESUID; sbi->s_resgid = EXT3_DEF_RESGID; sbi->s_sb_block = sb_block; blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } /* * The ext3 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT3_MIN_BLOCK_SIZE) { logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext3 macro-instructions depend on its value */ es = (struct ext3_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT3_SUPER_MAGIC) goto cantfind_ext3; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT3_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT3_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT3_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT3_FS_XATTR if (def_mount_opts & EXT3_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (def_mount_opts & EXT3_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) set_opt(sbi->s_mount_opt, JOURNAL_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) set_opt(sbi->s_mount_opt, ORDERED_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) set_opt(sbi->s_mount_opt, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); /* enable barriers by default */ set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, NULL, 0)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext3_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "filesystem blocksize %d", blocksize); goto failed_mount; } hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: fsblocksize %d too small for " "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { ext3_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { ext3_msg(sb, KERN_ERR, "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { ext3_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) { sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext3_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { ext3_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } sbi->s_frags_per_block = 1; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext3; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb)); for (i=0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } if (sbi->s_blocks_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } err = generic_check_addressable(sb->s_blocksize_bits, le32_to_cpu(es->s_blocks_count)); if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT3_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT3_BLOCKS_PER_GROUP(sb)) + 1; db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb)); sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext3_msg(sb, KERN_ERR, "error: not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext3_msg(sb, KERN_ERR, "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { ext3_msg(sb, KERN_ERR, "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext3_rsv_window_add(sb, &sbi->s_rsv_window_head); /* * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; sb->s_export_op = &ext3_export_ops; sb->s_xattr = ext3_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext3_qctl_operations; sb->dq_op = &ext3_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); mutex_init(&sbi->s_resize_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)); /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext3_load_journal(sb, es, journal_devnum)) goto failed_mount2; } else if (journal_inum) { if (ext3_create_journal(sb, es, journal_inum)) goto failed_mount2; } else { if (!silent) ext3_msg(sb, KERN_ERR, "error: no journal found. " "mounting ext3 over ext2?"); goto failed_mount2; } err = percpu_counter_init(&sbi->s_freeblocks_counter, ext3_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext3_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext3_count_dirs(sb)); } if (err) { ext3_msg(sb, KERN_ERR, "error: insufficient memory"); ret = err; goto failed_mount3; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal capabilities: ORDERED_DATA if the journal can cope, else JOURNAL_DATA */ if (journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE); else set_opt(sbi->s_mount_opt, JOURNAL_DATA); break; case EXT3_MOUNT_ORDERED_DATA: case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { ext3_msg(sb, KERN_ERR, "error: journal does not support " "requested data journaling mode"); goto failed_mount3; } default: break; } /* * The journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); ret = -ENOMEM; goto failed_mount3; } ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) { ext3_mark_recovery_complete(sb, es); ext3_msg(sb, KERN_INFO, "recovery complete"); } ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); return 0; cantfind_ext3: if (!silent) ext3_msg(sb, KERN_INFO, "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); journal_destroy(sbi->s_journal); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); failed_mount: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext3_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_commit_interval) journal->j_commit_interval = sbi->s_commit_interval; /* We could also set up an ext3-specific default for the commit * interval here, but for now we'll just fall back to the jbd * default. */ spin_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JFS_BARRIER; else journal->j_flags &= ~JFS_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR; spin_unlock(&journal->j_state_lock); } static journal_t *ext3_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext3_init_journal_params(sb, journal); return journal; } static journal_t *ext3_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head * bh; journal_t *journal; ext3_fsblk_t start; ext3_fsblk_t len; int hblock, blocksize; ext3_fsblk_t sb_block; unsigned long offset; struct ext3_super_block * es; struct block_device *bdev; bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: blocksize too small for journal device"); goto out_bdev; } sb_block = EXT3_MIN_BLOCK_SIZE / blocksize; offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext3_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext3_msg(sb, KERN_ERR, "error: external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } len = le32_to_cpu(es->s_blocks_count); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext3_msg(sb, KERN_ERR, "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; if (!bh_uptodate_or_lock(journal->j_sb_buffer)) { if (bh_submit_read(journal->j_sb_buffer)) { ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext3_msg(sb, KERN_ERR, "error: external journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT3_SB(sb)->journal_bdev = bdev; ext3_init_journal_params(sb, journal); return journal; out_journal: journal_destroy(journal); out_bdev: ext3_blkdev_put(bdev); return NULL; } static int ext3_load_journal(struct super_block *sb, struct ext3_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext3_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "recovery required on readonly filesystem"); if (really_read_only) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, cannot proceed"); return -EROFS; } ext3_msg(sb, KERN_INFO, "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " "and inode journals"); return -EINVAL; } if (journal_inum) { if (!(journal = ext3_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext3_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JFS_BARRIER)) printk(KERN_INFO "EXT3-fs: barriers not enabled\n"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } } if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) err = journal_wipe(journal, !really_read_only); if (!err) err = journal_load(journal); if (err) { ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); } return 0; } static int ext3_create_journal(struct super_block *sb, struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_ERR, "error: readonly filesystem when trying to " "create journal"); return -EROFS; } journal = ext3_get_journal(sb, journal_inum); if (!journal) return -EINVAL; ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } EXT3_SB(sb)->s_journal = journal; ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); es->s_journal_inum = cpu_to_le32(journal_inum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); return 0; } static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync) { struct buffer_head *sbh = EXT3_SB(sb)->s_sbh; int error = 0; if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext3_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (buffer_write_io_error(sbh)) { ext3_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es) { journal_t *journal = EXT3_SB(sb)->s_journal; journal_lock_updates(journal); if (journal_flush(journal) < 0) goto out; if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); } out: journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext3_clear_journal_err(struct super_block *sb, struct ext3_super_block *es) { journal_t *journal; int j_errno; const char *errstr; journal = EXT3_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext3_error() or ext3_abort() */ j_errno = journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext3_decode_error(sb, j_errno, nbuf); ext3_warning(sb, __func__, "Filesystem error recorded " "from previous mount: %s", errstr); ext3_warning(sb, __func__, "Marking fs in need of " "filesystem check."); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); ext3_commit_super (sb, es, 1); journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext3_force_commit(struct super_block *sb) { journal_t *journal; int ret; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT3_SB(sb)->s_journal; ret = ext3_journal_force_commit(journal); return ret; } static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; trace_ext3_sync_fs(sb, wait); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); } return 0; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. */ static int ext3_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (!(sb->s_flags & MS_RDONLY)) { journal = EXT3_SB(sb)->s_journal; /* Now we set up the journal barrier. */ journal_lock_updates(journal); /* * We don't want to clear needs_recovery flag when we failed * to flush the journal. */ error = journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); if (error) goto out; } return 0; out: journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext3_unfreeze(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) { lock_super(sb); /* Reser the needs_recovery flag before the fs is unlocked. */ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); unlock_super(sb); journal_unlock_updates(EXT3_SB(sb)->s_journal); } return 0; } static int ext3_remount (struct super_block * sb, int * flags, char * data) { struct ext3_super_block * es; struct ext3_sb_info *sbi = EXT3_SB(sb); ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; #endif /* Store the original options */ lock_super(sb); old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) old_opts.s_qf_names[i] = sbi->s_qf_names[i]; #endif /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; ext3_init_journal_params(sb, sbi->s_journal); if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > le32_to_cpu(es->s_blocks_count)) { if (test_opt(sb, ABORT)) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) && (sbi->s_mount_state & EXT3_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); ext3_mark_recovery_complete(sb, es); } else { __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { ext3_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR " "because of unsupported optional " "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount & mount for now. */ if (es->s_last_orphan) { ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount & mount instead."); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ ext3_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext3_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; enable_quota = 1; } } #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) if (old_opts.s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(old_opts.s_qf_names[i]); #endif unlock_super(sb); if (enable_quota) dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { if (sbi->s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif unlock_super(sb); return err; } static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; u64 fsid; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long ngroups = sbi->s_groups_count, i; ext3_fsblk_t overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < ngroups; i++) { overhead += ext3_bg_has_super(sb, i) + ext3_bg_num_gdb(sb, i); cond_resched(); } /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += ngroups * (2 + sbi->s_itb_per_group); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT3_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction before quota file * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } static int ext3_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext3_journal_start(inode, EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext3_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext3_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext3_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext3_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext3_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (!handle) { ext3_msg(sb, KERN_WARNING, "warning: quota write (off=%llu, len=%llu)" " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); bh = ext3_bread(handle, inode, blk, 1, &err); if (!bh) goto out; if (journal_quota) { err = ext3_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); if (journal_quota) err = ext3_journal_dirty_metadata(handle, bh); else { /* Always do at least ordered writes for quotas */ err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } brelse(bh); out: if (err) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT3_I(inode)->i_disksize = inode->i_size; } inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext3_mark_inode_dirty(handle, inode); mutex_unlock(&inode->i_mutex); return len; } #endif static struct dentry *ext3_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super); } static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext3_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_ext3_fs(void) { int err = init_ext3_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext3_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext3_xattr(); return err; } static void __exit exit_ext3_fs(void) { unregister_filesystem(&ext3_fs_type); destroy_inodecache(); exit_ext3_xattr(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); MODULE_LICENSE("GPL"); module_init(init_ext3_fs) module_exit(exit_ext3_fs)
gpl-2.0
bilalliberty/android_kernel_htc_memul
fs/ext3/super.c
3138
85488
/* * linux/fs/ext3/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/exportfs.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include "ext3.h" #include "xattr.h" #include "acl.h" #include "namei.h" #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA #endif static int ext3_load_journal(struct super_block *, struct ext3_super_block *, unsigned long journal_devnum); static int ext3_create_journal(struct super_block *, struct ext3_super_block *, unsigned int); static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync); static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es); static void ext3_clear_journal_err(struct super_block * sb, struct ext3_super_block * es); static int ext3_sync_fs(struct super_block *sb, int wait); static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]); static int ext3_remount (struct super_block * sb, int * flags, char * data); static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext3_unfreeze(struct super_block *sb); static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. * * The only special thing we need to do here is to make sure that all * journal_end calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ journal = EXT3_SB(sb)->s_journal; if (is_journal_aborted(journal)) { ext3_abort(sb, __func__, "Detected aborted journal"); return ERR_PTR(-EROFS); } return journal_start(journal, nblocks); } /* * The only special thing we need to do here is to make sure that all * journal_stop calls result in the superblock being marked dirty, so * that sync() will call the filesystem's write_super callback if * appropriate. */ int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; int err; int rc; sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = journal_stop(handle); if (!err) err = rc; if (err) __ext3_std_error(sb, where, err); return err; } void ext3_journal_abort_handle(const char *caller, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext3_decode_error(NULL, err, nbuf); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", caller, errstr, err_fn); journal_abort_handle(handle); } void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext3, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext3_handle_error(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); if (sb->s_flags & MS_RDONLY) return; if (!test_opt (sb, ERRORS_CONT)) { journal_t *journal = EXT3_SB(sb)->s_journal; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (journal) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } void ext3_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); ext3_handle_error(sb); } static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext3_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext3_std_error (struct super_block * sb, const char * function, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext3_decode_error(sb, errno, nbuf); ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } /* * ext3_abort is a much stronger failure handler than ext3_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void ext3_abort(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (EXT3_SB(sb)->s_journal) journal_abort(EXT3_SB(sb)->s_journal, -EIO); } void ext3_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ext3_update_dynamic_rev(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; ext3_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static int ext3_blkdev_put(struct block_device *bdev) { return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext3_blkdev_remove(struct ext3_sb_info *sbi) { struct block_device *bdev; int ret = -ENODEV; bdev = sbi->journal_bdev; if (bdev) { ret = ext3_blkdev_put(bdev); sbi->journal_bdev = NULL; } return ret; } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext3_put_super (struct super_block * sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int i, err; dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext3_xattr_put_super(sb); err = journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext3_abort(sb, __func__, "Couldn't clean up the journal"); if (!(sb->s_flags & MS_RDONLY)) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); BUFFER_TRACE(sbi->s_sbh, "marking dirty"); mark_buffer_dirty(sbi->s_sbh); ext3_commit_super(sb, es, 1); } for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; atomic_set(&ei->i_datasync_tid, 0); atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } static int ext3_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext3_drop_inode(inode, drop); return drop; } static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } static void ext3_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT3_I(inode)->i_orphan))) { printk("EXT3 Inode %p: orphan list check failed!\n", EXT3_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT3_I(inode), sizeof(struct ext3_inode_info), false); dump_stack(); } call_rcu(&inode->i_rcu, ext3_i_callback); } static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(ext3_inode_cachep); } static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } static char *data_mode_string(unsigned long mode) { switch (mode) { case EXT3_MOUNT_JOURNAL_DATA: return "journal"; case EXT3_MOUNT_ORDERED_DATA: return "ordered"; case EXT3_MOUNT_WRITEBACK_DATA: return "writeback"; } return "unknown"; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext3_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; unsigned long def_mount_opts; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (sbi->s_resuid != EXT3_DEF_RESUID || le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) { seq_printf(seq, ",resuid=%u", sbi->s_resuid); } if (sbi->s_resgid != EXT3_DEF_RESGID || le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { seq_printf(seq, ",resgid=%u", sbi->s_resgid); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT3_ERRORS_PANIC || def_errors == EXT3_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT3_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT3_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); if (sbi->s_commit_interval) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } /* * Always display barrier state so it's clear what the status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); ext3_show_quota_options(seq, sb); return 0; } static struct inode *ext3_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext3_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext3_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT3_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext3_quota_operations = { .write_dquot = ext3_write_dquot, .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, .write_info = ext3_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext3_sops = { .alloc_inode = ext3_alloc_inode, .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, .freeze_fs = ext3_freeze, .unfreeze_fs = ext3_unfreeze, .statfs = ext3_statfs, .remount_fs = ext3_remount, .show_options = ext3_show_options, #ifdef CONFIG_QUOTA .quota_read = ext3_quota_read, .quota_write = ext3_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext3_export_ops = { .fh_to_dentry = ext3_fh_to_dentry, .fh_to_parent = ext3_fh_to_parent, .get_parent = ext3_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "nocheck"}, {Opt_nocheck, "check=none"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_inum, "journal=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, {Opt_err, NULL}, }; static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext3_sb_info *sbi = EXT3_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext3_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype] && strcmp(sbi->s_qf_names[qtype], qname)) { ext3_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; if (strchr(sbi->s_qf_names[qtype], '/')) { ext3_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 0; } set_opt(sbi->s_mount_opt, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } /* * The space will be released later when all options are confirmed * to be correct */ sbi->s_qf_names[qtype] = NULL; return 1; } #endif static int parse_options (char *options, struct super_block *sb, unsigned int *inum, unsigned long *journal_devnum, ext3_fsblk_t *n_blocks_count, int is_remount) { struct ext3_sb_info *sbi = EXT3_SB(sb); char * p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = 0; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; sbi->s_resuid = option; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; sbi->s_resgid = option; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT3_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext3_msg(sb, KERN_INFO, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext3_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *inum = option; break; case Opt_journal_dev: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_noload: set_opt (sbi->s_mount_opt, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_data_journal: data_opt = EXT3_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT3_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT3_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (test_opt(sb, DATA_FLAGS) == data_opt) break; ext3_msg(sb, KERN_ERR, "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " "try to remount it in data=%s mode.", data_mode_string(test_opt(sb, DATA_FLAGS)), data_mode_string(data_opt)); return 0; } else { clear_opt(sbi->s_mount_opt, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext3_msg(sb, KERN_ERR, "error: cannot change " "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); clear_opt(sbi->s_mount_opt, USRQUOTA); clear_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext3_msg(sb, KERN_ERR, "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext3_msg(sb, KERN_ERR, "error: journaled quota options not " "supported."); break; case Opt_noquota: break; #endif case Opt_abort: set_opt(sbi->s_mount_opt, ABORT); break; case Opt_nobarrier: clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sbi->s_mount_opt, BARRIER); else clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext3_msg(sb, KERN_ERR, "error: resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated nobh option"); break; case Opt_bh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated bh option"); break; default: ext3_msg(sb, KERN_ERR, "error: unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sbi->s_mount_opt, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sbi->s_mount_opt, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext3_msg(sb, KERN_ERR, "error: old and new quota " "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " "enabled."); return 0; } } #endif return 1; } static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int read_only) { struct ext3_sb_info *sbi = EXT3_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { ext3_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= le16_to_cpu(es->s_max_mnt_count)) ext3_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext3_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for a plain journaled filesystem we can keep it set as valid forever! :) */ es->s_state &= cpu_to_le16(~EXT3_VALID_FS); #endif if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } cleancache_init_fs(sb); return res; } /* Called at mount-time, super-block is locked */ static int ext3_check_descriptors(struct super_block *sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); int i; ext3_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i); ext3_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb)); return 1; } /* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext3_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext3_orphan_cleanup (struct super_block * sb, struct ext3_super_block * es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, skipping orphan cleanup."); return; } /* Check if feature set allows readwrite operations */ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) ext3_msg(sb, KERN_ERR, "error: cannot turn on journaled " "quota: %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %Ld bytes\n", inode->i_ino, inode->i_size); ext3_truncate(inode); nr_truncates++; } else { printk(KERN_DEBUG "%s: deleting unreferenced inode %lu\n", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext3_max_size(int bits) { loff_t res = EXT3_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); } static int ext3_fill_super (struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; int blocksize; int hblock; int db_count; int i; int needs_recovery; int ret = -EINVAL; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_mount_opt = 0; sbi->s_resuid = EXT3_DEF_RESUID; sbi->s_resgid = EXT3_DEF_RESGID; sbi->s_sb_block = sb_block; blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } /* * The ext3 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT3_MIN_BLOCK_SIZE) { logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext3 macro-instructions depend on its value */ es = (struct ext3_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT3_SUPER_MAGIC) goto cantfind_ext3; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT3_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT3_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT3_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT3_FS_XATTR if (def_mount_opts & EXT3_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (def_mount_opts & EXT3_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) set_opt(sbi->s_mount_opt, JOURNAL_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) set_opt(sbi->s_mount_opt, ORDERED_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) set_opt(sbi->s_mount_opt, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); /* enable barriers by default */ set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, NULL, 0)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext3_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "filesystem blocksize %d", blocksize); goto failed_mount; } hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: fsblocksize %d too small for " "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { ext3_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { ext3_msg(sb, KERN_ERR, "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { ext3_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) { sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext3_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { ext3_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } sbi->s_frags_per_block = 1; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext3; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb)); for (i=0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } if (sbi->s_blocks_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } err = generic_check_addressable(sb->s_blocksize_bits, le32_to_cpu(es->s_blocks_count)); if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT3_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT3_BLOCKS_PER_GROUP(sb)) + 1; db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb)); sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext3_msg(sb, KERN_ERR, "error: not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext3_msg(sb, KERN_ERR, "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { ext3_msg(sb, KERN_ERR, "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext3_rsv_window_add(sb, &sbi->s_rsv_window_head); /* * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; sb->s_export_op = &ext3_export_ops; sb->s_xattr = ext3_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext3_qctl_operations; sb->dq_op = &ext3_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); mutex_init(&sbi->s_resize_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)); /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext3_load_journal(sb, es, journal_devnum)) goto failed_mount2; } else if (journal_inum) { if (ext3_create_journal(sb, es, journal_inum)) goto failed_mount2; } else { if (!silent) ext3_msg(sb, KERN_ERR, "error: no journal found. " "mounting ext3 over ext2?"); goto failed_mount2; } err = percpu_counter_init(&sbi->s_freeblocks_counter, ext3_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext3_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext3_count_dirs(sb)); } if (err) { ext3_msg(sb, KERN_ERR, "error: insufficient memory"); ret = err; goto failed_mount3; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal capabilities: ORDERED_DATA if the journal can cope, else JOURNAL_DATA */ if (journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE); else set_opt(sbi->s_mount_opt, JOURNAL_DATA); break; case EXT3_MOUNT_ORDERED_DATA: case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { ext3_msg(sb, KERN_ERR, "error: journal does not support " "requested data journaling mode"); goto failed_mount3; } default: break; } /* * The journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); ret = -ENOMEM; goto failed_mount3; } ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) { ext3_mark_recovery_complete(sb, es); ext3_msg(sb, KERN_INFO, "recovery complete"); } ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); return 0; cantfind_ext3: if (!silent) ext3_msg(sb, KERN_INFO, "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); journal_destroy(sbi->s_journal); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); failed_mount: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext3_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_commit_interval) journal->j_commit_interval = sbi->s_commit_interval; /* We could also set up an ext3-specific default for the commit * interval here, but for now we'll just fall back to the jbd * default. */ spin_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JFS_BARRIER; else journal->j_flags &= ~JFS_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR; spin_unlock(&journal->j_state_lock); } static journal_t *ext3_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext3_init_journal_params(sb, journal); return journal; } static journal_t *ext3_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head * bh; journal_t *journal; ext3_fsblk_t start; ext3_fsblk_t len; int hblock, blocksize; ext3_fsblk_t sb_block; unsigned long offset; struct ext3_super_block * es; struct block_device *bdev; bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: blocksize too small for journal device"); goto out_bdev; } sb_block = EXT3_MIN_BLOCK_SIZE / blocksize; offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext3_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext3_msg(sb, KERN_ERR, "error: external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } len = le32_to_cpu(es->s_blocks_count); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext3_msg(sb, KERN_ERR, "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; if (!bh_uptodate_or_lock(journal->j_sb_buffer)) { if (bh_submit_read(journal->j_sb_buffer)) { ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext3_msg(sb, KERN_ERR, "error: external journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT3_SB(sb)->journal_bdev = bdev; ext3_init_journal_params(sb, journal); return journal; out_journal: journal_destroy(journal); out_bdev: ext3_blkdev_put(bdev); return NULL; } static int ext3_load_journal(struct super_block *sb, struct ext3_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext3_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "recovery required on readonly filesystem"); if (really_read_only) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, cannot proceed"); return -EROFS; } ext3_msg(sb, KERN_INFO, "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " "and inode journals"); return -EINVAL; } if (journal_inum) { if (!(journal = ext3_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext3_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JFS_BARRIER)) printk(KERN_INFO "EXT3-fs: barriers not enabled\n"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } } if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) err = journal_wipe(journal, !really_read_only); if (!err) err = journal_load(journal); if (err) { ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); } return 0; } static int ext3_create_journal(struct super_block *sb, struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_ERR, "error: readonly filesystem when trying to " "create journal"); return -EROFS; } journal = ext3_get_journal(sb, journal_inum); if (!journal) return -EINVAL; ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } EXT3_SB(sb)->s_journal = journal; ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); es->s_journal_inum = cpu_to_le32(journal_inum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); return 0; } static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync) { struct buffer_head *sbh = EXT3_SB(sb)->s_sbh; int error = 0; if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext3_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (buffer_write_io_error(sbh)) { ext3_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es) { journal_t *journal = EXT3_SB(sb)->s_journal; journal_lock_updates(journal); if (journal_flush(journal) < 0) goto out; if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); } out: journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext3_clear_journal_err(struct super_block *sb, struct ext3_super_block *es) { journal_t *journal; int j_errno; const char *errstr; journal = EXT3_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext3_error() or ext3_abort() */ j_errno = journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext3_decode_error(sb, j_errno, nbuf); ext3_warning(sb, __func__, "Filesystem error recorded " "from previous mount: %s", errstr); ext3_warning(sb, __func__, "Marking fs in need of " "filesystem check."); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); ext3_commit_super (sb, es, 1); journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext3_force_commit(struct super_block *sb) { journal_t *journal; int ret; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT3_SB(sb)->s_journal; ret = ext3_journal_force_commit(journal); return ret; } static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; trace_ext3_sync_fs(sb, wait); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); } return 0; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. */ static int ext3_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (!(sb->s_flags & MS_RDONLY)) { journal = EXT3_SB(sb)->s_journal; /* Now we set up the journal barrier. */ journal_lock_updates(journal); /* * We don't want to clear needs_recovery flag when we failed * to flush the journal. */ error = journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); if (error) goto out; } return 0; out: journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext3_unfreeze(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) { lock_super(sb); /* Reser the needs_recovery flag before the fs is unlocked. */ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); unlock_super(sb); journal_unlock_updates(EXT3_SB(sb)->s_journal); } return 0; } static int ext3_remount (struct super_block * sb, int * flags, char * data) { struct ext3_super_block * es; struct ext3_sb_info *sbi = EXT3_SB(sb); ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; #endif /* Store the original options */ lock_super(sb); old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) old_opts.s_qf_names[i] = sbi->s_qf_names[i]; #endif /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; ext3_init_journal_params(sb, sbi->s_journal); if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > le32_to_cpu(es->s_blocks_count)) { if (test_opt(sb, ABORT)) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) && (sbi->s_mount_state & EXT3_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); ext3_mark_recovery_complete(sb, es); } else { __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { ext3_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR " "because of unsupported optional " "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount & mount for now. */ if (es->s_last_orphan) { ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount & mount instead."); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ ext3_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext3_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; enable_quota = 1; } } #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) if (old_opts.s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(old_opts.s_qf_names[i]); #endif unlock_super(sb); if (enable_quota) dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { if (sbi->s_qf_names[i] && old_opts.s_qf_names[i] != sbi->s_qf_names[i]) kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif unlock_super(sb); return err; } static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; u64 fsid; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long ngroups = sbi->s_groups_count, i; ext3_fsblk_t overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < ngroups; i++) { overhead += ext3_bg_has_super(sb, i) + ext3_bg_num_gdb(sb, i); cond_resched(); } /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += ngroups * (2 + sbi->s_itb_per_group); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT3_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction before quota file * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } static int ext3_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext3_journal_start(inode, EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext3_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext3_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext3_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext3_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext3_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (!handle) { ext3_msg(sb, KERN_WARNING, "warning: quota write (off=%llu, len=%llu)" " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); bh = ext3_bread(handle, inode, blk, 1, &err); if (!bh) goto out; if (journal_quota) { err = ext3_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); if (journal_quota) err = ext3_journal_dirty_metadata(handle, bh); else { /* Always do at least ordered writes for quotas */ err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } brelse(bh); out: if (err) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT3_I(inode)->i_disksize = inode->i_size; } inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext3_mark_inode_dirty(handle, inode); mutex_unlock(&inode->i_mutex); return len; } #endif static struct dentry *ext3_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super); } static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext3_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_ext3_fs(void) { int err = init_ext3_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext3_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext3_xattr(); return err; } static void __exit exit_ext3_fs(void) { unregister_filesystem(&ext3_fs_type); destroy_inodecache(); exit_ext3_xattr(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); MODULE_LICENSE("GPL"); module_init(init_ext3_fs) module_exit(exit_ext3_fs)
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_oneplus_msm8994
drivers/char/ppdev.c
4162
19549
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) break; cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file_inode(file)); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; memset(&par_timeout, 0, sizeof(par_timeout)); par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } err = parport_register_driver(&pp_driver); if (err < 0) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
gpl-2.0
n3ocort3x/android_kernel_htc_m7
arch/sh/boards/mach-migor/setup.c
4418
16364
/* * Renesas System Solutions Asia Pte. Ltd - Migo-R * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/i2c.h> #include <linux/smc91x.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/videodev2.h> #include <video/sh_mobile_lcdc.h> #include <media/sh_mobile_ceu.h> #include <media/ov772x.h> #include <media/soc_camera.h> #include <media/tw9910.h> #include <asm/clock.h> #include <asm/machvec.h> #include <asm/io.h> #include <asm/suspend.h> #include <mach/migor.h> #include <cpu/sh7722.h> /* Address IRQ Size Bus Description * 0x00000000 64MB 16 NOR Flash (SP29PL256N) * 0x0c000000 64MB 64 SDRAM (2xK4M563233G) * 0x10000000 IRQ0 16 Ethernet (SMC91C111) * 0x14000000 IRQ4 16 USB 2.0 Host Controller (M66596) * 0x18000000 8GB 8 NAND Flash (K9K8G08U0A) */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_eth_resources[] = { [0] = { .name = "SMC91C111" , .start = 0x10000300, .end = 0x1000030f, .flags = IORESOURCE_MEM, }, [1] = { .start = 32, /* IRQ0 */ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_eth_device = { .name = "smc91x", .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, .dev = { .platform_data = &smc91x_info, }, }; static struct sh_keysc_info sh_keysc_info = { .mode = SH_KEYSC_MODE_2, /* KEYOUT0->4, KEYIN1->5 */ .scan_timing = 3, .delay = 5, .keycodes = { 0, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_ENTER, 0, KEY_F, KEY_C, KEY_D, KEY_H, KEY_1, 0, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, 0, KEY_7, KEY_8, KEY_9, KEY_S, KEY_0, 0, KEY_P, KEY_STOP, KEY_REWIND, KEY_PLAY, KEY_FASTFORWARD, }, }; static struct resource sh_keysc_resources[] = { [0] = { .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = 79, .flags = IORESOURCE_IRQ, }, }; static struct platform_device sh_keysc_device = { .name = "sh_keysc", .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(sh_keysc_resources), .resource = sh_keysc_resources, .dev = { .platform_data = &sh_keysc_info, }, }; static struct mtd_partition migor_nor_flash_partitions[] = { { .name = "uboot", .offset = 0, .size = (1 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = (15 * 1024 * 1024), }, { .name = "other", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data migor_nor_flash_data = { .width = 2, .parts = migor_nor_flash_partitions, .nr_parts = ARRAY_SIZE(migor_nor_flash_partitions), }; static struct resource migor_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x03ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device migor_nor_flash_device = { .name = "physmap-flash", .resource = migor_nor_flash_resources, .num_resources = ARRAY_SIZE(migor_nor_flash_resources), .dev = { .platform_data = &migor_nor_flash_data, }, }; static struct mtd_partition migor_nand_flash_partitions[] = { { .name = "nanddata1", .offset = 0x0, .size = 512 * 1024 * 1024, }, { .name = "nanddata2", .offset = MTDPART_OFS_APPEND, .size = 512 * 1024 * 1024, }, }; static void migor_nand_flash_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, chip->IO_ADDR_W + 0x00400000); else if (ctrl & NAND_ALE) writeb(cmd, chip->IO_ADDR_W + 0x00800000); else writeb(cmd, chip->IO_ADDR_W); } static int migor_nand_flash_ready(struct mtd_info *mtd) { return gpio_get_value(GPIO_PTA1); /* NAND_RBn */ } static struct platform_nand_data migor_nand_flash_data = { .chip = { .nr_chips = 1, .partitions = migor_nand_flash_partitions, .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions), .chip_delay = 20, .part_probe_types = (const char *[]) { "cmdlinepart", NULL }, }, .ctrl = { .dev_ready = migor_nand_flash_ready, .cmd_ctrl = migor_nand_flash_cmd_ctl, }, }; static struct resource migor_nand_flash_resources[] = { [0] = { .name = "NAND Flash", .start = 0x18000000, .end = 0x18ffffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device migor_nand_flash_device = { .name = "gen_nand", .resource = migor_nand_flash_resources, .num_resources = ARRAY_SIZE(migor_nand_flash_resources), .dev = { .platform_data = &migor_nand_flash_data, } }; static const struct fb_videomode migor_lcd_modes[] = { { #if defined(CONFIG_SH_MIGOR_RTA_WVGA) .name = "LB070WV1", .xres = 800, .yres = 480, .left_margin = 64, .right_margin = 16, .hsync_len = 120, .sync = 0, #elif defined(CONFIG_SH_MIGOR_QVGA) .name = "PH240320T", .xres = 320, .yres = 240, .left_margin = 0, .right_margin = 16, .hsync_len = 8, .sync = FB_SYNC_HOR_HIGH_ACT, #endif .upper_margin = 1, .lower_margin = 17, .vsync_len = 2, }, }; static struct sh_mobile_lcdc_info sh_mobile_lcdc_info = { #if defined(CONFIG_SH_MIGOR_RTA_WVGA) .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = RGB16, .clock_divider = 2, .lcd_modes = migor_lcd_modes, .num_modes = ARRAY_SIZE(migor_lcd_modes), .panel_cfg = { /* 7.0 inch */ .width = 152, .height = 91, }, } #elif defined(CONFIG_SH_MIGOR_QVGA) .clock_source = LCDC_CLK_PERIPHERAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = SYS16A, .clock_divider = 10, .lcd_modes = migor_lcd_modes, .num_modes = ARRAY_SIZE(migor_lcd_modes), .panel_cfg = { .width = 49, /* 2.4 inch */ .height = 37, .setup_sys = migor_lcd_qvga_setup, }, .sys_bus_cfg = { .ldmt2r = 0x06000a09, .ldmt3r = 0x180e3418, /* set 1s delay to encourage fsync() */ .deferred_io_msec = 1000, }, } #endif }; static struct resource migor_lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, /* P4-only space */ .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = 28, .flags = IORESOURCE_IRQ, }, }; static struct platform_device migor_lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(migor_lcdc_resources), .resource = migor_lcdc_resources, .dev = { .platform_data = &sh_mobile_lcdc_info, }, }; static struct clk *camera_clk; static DEFINE_MUTEX(camera_lock); static void camera_power_on(int is_tw) { mutex_lock(&camera_lock); /* Use 10 MHz VIO_CKO instead of 24 MHz to work * around signal quality issues on Panel Board V2.1. */ camera_clk = clk_get(NULL, "video_clk"); clk_set_rate(camera_clk, 10000000); clk_enable(camera_clk); /* start VIO_CKO */ /* use VIO_RST to take camera out of reset */ mdelay(10); if (is_tw) { gpio_set_value(GPIO_PTT2, 0); gpio_set_value(GPIO_PTT0, 0); } else { gpio_set_value(GPIO_PTT0, 1); } gpio_set_value(GPIO_PTT3, 0); mdelay(10); gpio_set_value(GPIO_PTT3, 1); mdelay(10); /* wait to let chip come out of reset */ } static void camera_power_off(void) { clk_disable(camera_clk); /* stop VIO_CKO */ clk_put(camera_clk); gpio_set_value(GPIO_PTT3, 0); mutex_unlock(&camera_lock); } static int ov7725_power(struct device *dev, int mode) { if (mode) camera_power_on(0); else camera_power_off(); return 0; } static int tw9910_power(struct device *dev, int mode) { if (mode) camera_power_on(1); else camera_power_off(); return 0; } static struct sh_mobile_ceu_info sh_mobile_ceu_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource migor_ceu_resources[] = { [0] = { .name = "CEU", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = 52, .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device migor_ceu_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(migor_ceu_resources), .resource = migor_ceu_resources, .dev = { .platform_data = &sh_mobile_ceu_info, }, }; static struct resource sdhi_cn9_resources[] = { [0] = { .name = "SDHI", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 100, .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sh7724_sdhi_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi_cn9_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(sdhi_cn9_resources), .resource = sdhi_cn9_resources, .dev = { .platform_data = &sh7724_sdhi_data, }, }; static struct i2c_board_info migor_i2c_devices[] = { { I2C_BOARD_INFO("rs5c372b", 0x32), }, { I2C_BOARD_INFO("migor_ts", 0x51), .irq = 38, /* IRQ6 */ }, { I2C_BOARD_INFO("wm8978", 0x1a), }, }; static struct i2c_board_info migor_i2c_camera[] = { { I2C_BOARD_INFO("ov772x", 0x21), }, { I2C_BOARD_INFO("tw9910", 0x45), }, }; static struct ov772x_camera_info ov7725_info; static struct soc_camera_link ov7725_link = { .power = ov7725_power, .board_info = &migor_i2c_camera[0], .i2c_adapter_id = 0, .priv = &ov7725_info, }; static struct tw9910_video_info tw9910_info = { .buswidth = SOCAM_DATAWIDTH_8, .mpout = TW9910_MPO_FIELD, }; static struct soc_camera_link tw9910_link = { .power = tw9910_power, .board_info = &migor_i2c_camera[1], .i2c_adapter_id = 0, .priv = &tw9910_info, }; static struct platform_device migor_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &ov7725_link, }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &tw9910_link, }, }, }; static struct platform_device *migor_devices[] __initdata = { &smc91x_eth_device, &sh_keysc_device, &migor_lcdc_device, &migor_ceu_device, &migor_nor_flash_device, &migor_nand_flash_device, &sdhi_cn9_device, &migor_camera[0], &migor_camera[1], }; extern char migor_sdram_enter_start; extern char migor_sdram_enter_end; extern char migor_sdram_leave_start; extern char migor_sdram_leave_end; static int __init migor_devices_setup(void) { /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, &migor_sdram_enter_start, &migor_sdram_enter_end, &migor_sdram_leave_start, &migor_sdram_leave_end); /* Let D11 LED show STATUS0 */ gpio_request(GPIO_FN_STATUS0, NULL); /* Lit D12 LED show PDSTATUS */ gpio_request(GPIO_FN_PDSTATUS, NULL); /* SMC91C111 - Enable IRQ0, Setup CS4 for 16-bit fast access */ gpio_request(GPIO_FN_IRQ0, NULL); __raw_writel(0x00003400, BSC_CS4BCR); __raw_writel(0x00110080, BSC_CS4WCR); /* KEYSC */ gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYIN1, NULL); gpio_request(GPIO_FN_KEYIN2, NULL); gpio_request(GPIO_FN_KEYIN3, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); /* NAND Flash */ gpio_request(GPIO_FN_CS6A_CE2B, NULL); __raw_writel((__raw_readl(BSC_CS6ABCR) & ~0x0600) | 0x0200, BSC_CS6ABCR); gpio_request(GPIO_PTA1, NULL); gpio_direction_input(GPIO_PTA1); /* SDHI */ gpio_request(GPIO_FN_SDHICD, NULL); gpio_request(GPIO_FN_SDHIWP, NULL); gpio_request(GPIO_FN_SDHID3, NULL); gpio_request(GPIO_FN_SDHID2, NULL); gpio_request(GPIO_FN_SDHID1, NULL); gpio_request(GPIO_FN_SDHID0, NULL); gpio_request(GPIO_FN_SDHICMD, NULL); gpio_request(GPIO_FN_SDHICLK, NULL); /* Touch Panel */ gpio_request(GPIO_FN_IRQ6, NULL); /* LCD Panel */ #ifdef CONFIG_SH_MIGOR_QVGA /* LCDC - QVGA - Enable SYS Interface signals */ gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDRS, NULL); gpio_request(GPIO_FN_LCDCS, NULL); gpio_request(GPIO_FN_LCDRD, NULL); gpio_request(GPIO_FN_LCDWR, NULL); gpio_request(GPIO_PTH2, NULL); /* LCD_DON */ gpio_direction_output(GPIO_PTH2, 1); #endif #ifdef CONFIG_SH_MIGOR_RTA_WVGA /* LCDC - WVGA - Enable RGB Interface signals */ gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDLCLK, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_FN_LCDVEPWC, NULL); gpio_request(GPIO_FN_LCDVCPWC, NULL); gpio_request(GPIO_FN_LCDVSYN, NULL); gpio_request(GPIO_FN_LCDHSYN, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDDON, NULL); #endif /* CEU */ gpio_request(GPIO_FN_VIO_CLK2, NULL); gpio_request(GPIO_FN_VIO_VD2, NULL); gpio_request(GPIO_FN_VIO_HD2, NULL); gpio_request(GPIO_FN_VIO_FLD, NULL); gpio_request(GPIO_FN_VIO_CKO, NULL); gpio_request(GPIO_FN_VIO_D15, NULL); gpio_request(GPIO_FN_VIO_D14, NULL); gpio_request(GPIO_FN_VIO_D13, NULL); gpio_request(GPIO_FN_VIO_D12, NULL); gpio_request(GPIO_FN_VIO_D11, NULL); gpio_request(GPIO_FN_VIO_D10, NULL); gpio_request(GPIO_FN_VIO_D9, NULL); gpio_request(GPIO_FN_VIO_D8, NULL); gpio_request(GPIO_PTT3, NULL); /* VIO_RST */ gpio_direction_output(GPIO_PTT3, 0); gpio_request(GPIO_PTT2, NULL); /* TV_IN_EN */ gpio_direction_output(GPIO_PTT2, 1); gpio_request(GPIO_PTT0, NULL); /* CAM_EN */ #ifdef CONFIG_SH_MIGOR_RTA_WVGA gpio_direction_output(GPIO_PTT0, 0); #else gpio_direction_output(GPIO_PTT0, 1); #endif __raw_writew(__raw_readw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */ platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20); /* SIU: Port B */ gpio_request(GPIO_FN_SIUBOLR, NULL); gpio_request(GPIO_FN_SIUBOBT, NULL); gpio_request(GPIO_FN_SIUBISLD, NULL); gpio_request(GPIO_FN_SIUBOSLD, NULL); gpio_request(GPIO_FN_SIUMCKB, NULL); /* * The original driver sets SIUB OLR/OBT, ILR/IBT, and SIUA OLR/OBT to * output. Need only SIUB, set to output for master mode (table 34.2) */ __raw_writew(__raw_readw(PORT_MSELCRA) | 1, PORT_MSELCRA); i2c_register_board_info(0, migor_i2c_devices, ARRAY_SIZE(migor_i2c_devices)); return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); } arch_initcall(migor_devices_setup); /* Return the board specific boot mode pin configuration */ static int migor_mode_pins(void) { /* MD0=1, MD1=1, MD2=0: Clock Mode 3 * MD3=0: 16-bit Area0 Bus Width * MD5=1: Little Endian * TSTMD=1, MD8=0: Test Mode Disabled */ return MODE_PIN0 | MODE_PIN1 | MODE_PIN5; } /* * The Machine Vector */ static struct sh_machine_vector mv_migor __initmv = { .mv_name = "Migo-R", .mv_mode_pins = migor_mode_pins, };
gpl-2.0
MoKee/android_kernel_zte_nx507j
net/ipv4/ah4.c
4418
12498
#define pr_fmt(fmt) "IPsec: " fmt #include <crypto/hash.h> #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ah.h> #include <linux/crypto.h> #include <linux/pfkeyv2.h> #include <linux/scatterlist.h> #include <net/icmp.h> #include <net/protocol.h> struct ah_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, unsigned int size) { unsigned int len; len = size + crypto_ahash_digestsize(ahash) + (crypto_ahash_alignmask(ahash) & ~(crypto_tfm_ctx_alignment() - 1)); len = ALIGN(len, crypto_tfm_ctx_alignment()); len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset) { return tmp + offset; } static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, unsigned int offset) { return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); } static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, u8 *icv) { struct ahash_request *req; req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), crypto_tfm_ctx_alignment()); ahash_request_set_tfm(req, ahash); return req; } static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, struct ahash_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_ahash_reqsize(ahash), __alignof__(struct scatterlist)); } /* Clear mutable options and find final destination to substitute * into IP header for icv calculation. Options are already checked * for validity, so paranoia is not required. */ static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) { unsigned char * optptr = (unsigned char*)(iph+1); int l = iph->ihl*4 - sizeof(struct iphdr); int optlen; while (l > 0) { switch (*optptr) { case IPOPT_END: return 0; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) return -EINVAL; switch (*optptr) { case IPOPT_SEC: case 0x85: /* Some "Extended Security" crap. */ case IPOPT_CIPSO: case IPOPT_RA: case 0x80|21: /* RFC1770 */ break; case IPOPT_LSRR: case IPOPT_SSRR: if (optlen < 6) return -EINVAL; memcpy(daddr, optptr+optlen-4, 4); /* Fall through */ default: memset(optptr, 0, optlen); } l -= optlen; optptr += optlen; } return 0; } static void ah_output_done(struct crypto_async_request *base, int err) { u8 *icv; struct iphdr *iph; struct sk_buff *skb = base->data; struct xfrm_state *x = skb_dst(skb)->xfrm; struct ah_data *ahp = x->data; struct iphdr *top_iph = ip_hdr(skb); struct ip_auth_hdr *ah = ip_auth_hdr(skb); int ihl = ip_hdrlen(skb); iph = AH_SKB_CB(skb)->tmp; icv = ah_tmp_icv(ahp->ahash, iph, ihl); memcpy(ah->auth_data, icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } kfree(AH_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int ah_output(struct xfrm_state *x, struct sk_buff *skb) { int err; int nfrags; int ihl; u8 *icv; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct iphdr *iph, *top_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; ahp = x->data; ahash = ahp->ahash; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; skb_push(skb, -skb_network_offset(skb)); ah = ip_auth_hdr(skb); ihl = ip_hdrlen(skb); err = -ENOMEM; iph = ah_alloc_tmp(ahash, nfrags, ihl); if (!iph) goto out; icv = ah_tmp_icv(ahash, iph, ihl); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); memset(ah->auth_data, 0, ahp->icv_trunc_len); top_iph = ip_hdr(skb); iph->tos = top_iph->tos; iph->ttl = top_iph->ttl; iph->frag_off = top_iph->frag_off; if (top_iph->ihl != 5) { iph->daddr = top_iph->daddr; memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); err = ip_clear_mutable_options(top_iph, &top_iph->daddr); if (err) goto out_free; } ah->nexthdr = *skb_mac_header(skb); *skb_mac_header(skb) = IPPROTO_AH; top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; top_iph->check = 0; if (x->props.flags & XFRM_STATE_ALIGN4) ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; else ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, 0, skb->len); ahash_request_set_crypt(req, sg, icv, skb->len); ahash_request_set_callback(req, 0, ah_output_done, skb); AH_SKB_CB(skb)->tmp = iph; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; if (err == -EBUSY) err = NET_XMIT_DROP; goto out_free; } memcpy(ah->auth_data, icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } out_free: kfree(iph); out: return err; } static void ah_input_done(struct crypto_async_request *base, int err) { u8 *auth_data; u8 *icv; struct iphdr *work_iph; struct sk_buff *skb = base->data; struct xfrm_state *x = xfrm_input_state(skb); struct ah_data *ahp = x->data; struct ip_auth_hdr *ah = ip_auth_hdr(skb); int ihl = ip_hdrlen(skb); int ah_hlen = (ah->hdrlen + 2) << 2; work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; if (err) goto out; err = ah->nexthdr; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, ihl); __skb_pull(skb, ah_hlen + ihl); skb_set_transport_header(skb, -ihl); out: kfree(AH_SKB_CB(skb)->tmp); xfrm_input_resume(skb, err); } static int ah_input(struct xfrm_state *x, struct sk_buff *skb) { int ah_hlen; int ihl; int nexthdr; int nfrags; u8 *auth_data; u8 *icv; struct sk_buff *trailer; struct crypto_ahash *ahash; struct ahash_request *req; struct scatterlist *sg; struct iphdr *iph, *work_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; int err = -ENOMEM; if (!pskb_may_pull(skb, sizeof(*ah))) goto out; ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; ahash = ahp->ahash; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; if (x->props.flags & XFRM_STATE_ALIGN4) { if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len)) goto out; } else { if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; } if (!pskb_may_pull(skb, ah_hlen)) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; ah = (struct ip_auth_hdr *)skb->data; iph = ip_hdr(skb); ihl = ip_hdrlen(skb); work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); if (!work_iph) goto out; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); req = ah_tmp_req(ahash, icv); sg = ah_req_sg(ahash, req); memcpy(work_iph, iph, ihl); memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); iph->ttl = 0; iph->tos = 0; iph->frag_off = 0; iph->check = 0; if (ihl > sizeof(*iph)) { __be32 dummy; err = ip_clear_mutable_options(iph, &dummy); if (err) goto out_free; } skb_push(skb, ihl); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, 0, skb->len); ahash_request_set_crypt(req, sg, icv, skb->len); ahash_request_set_callback(req, 0, ah_input_done, skb); AH_SKB_CB(skb)->tmp = work_iph; err = crypto_ahash_digest(req); if (err) { if (err == -EINPROGRESS) goto out; goto out_free; } err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; if (err) goto out_free; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_iph, ihl); __skb_pull(skb, ah_hlen + ihl); skb_set_transport_header(skb, -ihl); err = nexthdr; out_free: kfree (work_iph); out: return err; } static void ah4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); const struct iphdr *iph = (const struct iphdr *)skb->data; struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); if (!x) return; printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", ntohl(ah->spi), ntohl(iph->daddr)); xfrm_state_put(x); } static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; struct crypto_ahash *ahash; if (!x->aalg) goto error; if (x->encap) goto error; ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (!ahp) return -ENOMEM; ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); if (IS_ERR(ahash)) goto error; ahp->ahash = ahash; if (crypto_ahash_setkey(ahash, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8)) goto error; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_ahash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { pr_info("%s: %s digestsize %u != %hu\n", __func__, x->aalg->alg_name, crypto_ahash_digestsize(ahash), aalg_desc->uinfo.auth.icv_fullbits / 8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); if (x->props.flags & XFRM_STATE_ALIGN4) x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); else x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); x->data = ahp; return 0; error: if (ahp) { crypto_free_ahash(ahp->ahash); kfree(ahp); } return -EINVAL; } static void ah_destroy(struct xfrm_state *x) { struct ah_data *ahp = x->data; if (!ahp) return; crypto_free_ahash(ahp->ahash); kfree(ahp); } static const struct xfrm_type ah_type = { .description = "AH4", .owner = THIS_MODULE, .proto = IPPROTO_AH, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah_init_state, .destructor = ah_destroy, .input = ah_input, .output = ah_output }; static const struct net_protocol ah4_protocol = { .handler = xfrm4_rcv, .err_handler = ah4_err, .no_policy = 1, .netns_ok = 1, }; static int __init ah4_init(void) { if (xfrm_register_type(&ah_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah_type, AF_INET); return -EAGAIN; } return 0; } static void __exit ah4_fini(void) { if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0) pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah_type, AF_INET) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ah4_init); module_exit(ah4_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_lge_g3
arch/h8300/mm/init.c
4418
5438
/* * linux/arch/h8300/mm/init.c * * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, * Kenneth Albanowski <kjahds@kjahds.com>, * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) * * Based on: * * linux/arch/m68knommu/mm/init.c * linux/arch/m68k/mm/init.c * * Copyright (C) 1995 Hamish Macdonald * * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com) * DEC/2000 -- linux 2.4 support <davidm@snapgear.com> */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/gfp.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> #undef DEBUG /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving a inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized * to point to BAD_PAGE entries. * * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */ static unsigned long empty_bad_page_table; static unsigned long empty_bad_page; unsigned long empty_zero_page; extern unsigned long rom_length; extern unsigned long memory_start; extern unsigned long memory_end; /* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views og the world. */ #ifdef DEBUG unsigned long start_mem = PAGE_ALIGN(memory_start); #endif unsigned long end_mem = memory_end & PAGE_MASK; #ifdef DEBUG printk ("start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif /* * Initialize the bad page table and bad page to point * to a couple of allocated pages. */ empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). */ set_fs (USER_DS); #ifdef DEBUG printk ("before free_area_init\n"); printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif { unsigned long zones_size[MAX_NR_ZONES] = {0, }; zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = 0; #endif free_area_init(zones_size); } } void __init mem_init(void) { int codek = 0, datak = 0, initk = 0; /* DAVIDM look at setup memory map generically with reserved area */ unsigned long tmp; extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end; extern unsigned long _ramend, _ramstart; unsigned long len = &_ramend - &_ramstart; unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */ #ifdef DEBUG printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem); #endif end_mem &= PAGE_MASK; high_memory = (void *) end_mem; start_mem = PAGE_ALIGN(start_mem); max_mapnr = num_physpages = MAP_NR(high_memory); /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); codek = (&_etext - &_stext) >> 10; datak = (&_ebss - &_sdata) >> 10; initk = (&__init_begin - &__init_end) >> 10; tmp = nr_free_pages() << PAGE_SHIFT; printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", tmp >> 10, len >> 10, (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, rom_length >> 10, codek, datak ); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { int pages = 0; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; pages++; } printk ("Freeing initrd memory: %dk freed\n", pages); } #endif void free_initmem(void) { #ifdef CONFIG_RAMKERNEL unsigned long addr; extern char __init_begin, __init_end; /* * the following code should be cool even if these sections * are not page aligned. */ addr = PAGE_ALIGN((unsigned long)(&__init_begin)); /* next to check that the page we free is not a partial page */ for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), (int)(addr - PAGE_SIZE)); #endif }
gpl-2.0
revjunkie/galbi-g2
arch/x86/lib/usercopy_32.c
4674
23134
/* * User address space access functions. * The non inlined parts of asm-i386/uaccess.h are here. * * Copyright 1997 Andi Kleen <ak@muc.de> * Copyright 1997 Linus Torvalds */ #include <linux/mm.h> #include <linux/highmem.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <asm/mmx.h> #ifdef CONFIG_X86_INTEL_USERCOPY /* * Alignment at which movsl is preferred for bulk memory copies. */ struct movsl_mask movsl_mask __read_mostly; #endif static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask)) return 0; #endif return 1; } #define movsl_is_ok(a1, a2, n) \ __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) /* * Zero Userspace */ #define __do_clear_user(addr,size) \ do { \ int __d0; \ might_fault(); \ __asm__ __volatile__( \ "0: rep; stosl\n" \ " movl %2,%0\n" \ "1: rep; stosb\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: lea 0(%2,%0,4),%0\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ _ASM_EXTABLE(1b,2b) \ : "=&c"(size), "=&D" (__d0) \ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ } while (0) /** * clear_user: - Zero a block of memory in user space. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. * * Returns number of bytes that could not be cleared. * On success, this will be zero. */ unsigned long clear_user(void __user *to, unsigned long n) { might_fault(); if (access_ok(VERIFY_WRITE, to, n)) __do_clear_user(to, n); return n; } EXPORT_SYMBOL(clear_user); /** * __clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be cleared. * On success, this will be zero. */ unsigned long __clear_user(void __user *to, unsigned long n) { __do_clear_user(to, n); return n; } EXPORT_SYMBOL(__clear_user); /** * strnlen_user: - Get the size of a string in user space. * @s: The string to measure. * @n: The maximum valid length * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. * If the string is too long, returns a value greater than @n. */ long strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res, tmp; might_fault(); __asm__ __volatile__( " testl %0, %0\n" " jz 3f\n" " andl %0,%%ecx\n" "0: repne; scasb\n" " setne %%al\n" " subl %%ecx,%0\n" " addl %0,%%eax\n" "1:\n" ".section .fixup,\"ax\"\n" "2: xorl %%eax,%%eax\n" " jmp 1b\n" "3: movb $1,%%al\n" " jmp 1b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,2b\n" ".previous" :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); return res & mask; } EXPORT_SYMBOL(strnlen_user); #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long size) { int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" "1: movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 3f\n" "2: movl 64(%4), %%eax\n" " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" "5: movl %%eax, 0(%3)\n" "6: movl %%edx, 4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" "9: movl %%eax, 8(%3)\n" "10: movl %%edx, 12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" "13: movl %%eax, 16(%3)\n" "14: movl %%edx, 20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" "17: movl %%eax, 24(%3)\n" "18: movl %%edx, 28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" "21: movl %%eax, 32(%3)\n" "22: movl %%edx, 36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" "25: movl %%eax, 40(%3)\n" "26: movl %%edx, 44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" "29: movl %%eax, 48(%3)\n" "30: movl %%edx, 52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" "33: movl %%eax, 56(%3)\n" "34: movl %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" " cmpl $63, %0\n" " ja 1b\n" "35: movl %0, %%eax\n" " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" "99: rep; movsl\n" "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,100b\n" " .long 2b,100b\n" " .long 3b,100b\n" " .long 4b,100b\n" " .long 5b,100b\n" " .long 6b,100b\n" " .long 7b,100b\n" " .long 8b,100b\n" " .long 9b,100b\n" " .long 10b,100b\n" " .long 11b,100b\n" " .long 12b,100b\n" " .long 13b,100b\n" " .long 14b,100b\n" " .long 15b,100b\n" " .long 16b,100b\n" " .long 17b,100b\n" " .long 18b,100b\n" " .long 19b,100b\n" " .long 20b,100b\n" " .long 21b,100b\n" " .long 22b,100b\n" " .long 23b,100b\n" " .long 24b,100b\n" " .long 25b,100b\n" " .long 26b,100b\n" " .long 27b,100b\n" " .long 28b,100b\n" " .long 29b,100b\n" " .long 30b,100b\n" " .long 31b,100b\n" " .long 32b,100b\n" " .long 33b,100b\n" " .long 34b,100b\n" " .long 35b,100b\n" " .long 36b,100b\n" " .long 37b,100b\n" " .long 99b,101b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); return size; } static unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" "1: movl 64(%4), %%eax\n" " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" " movl %%eax, 0(%3)\n" " movl %%edx, 4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" " movl %%eax, 8(%3)\n" " movl %%edx, 12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" " movl %%eax, 16(%3)\n" " movl %%edx, 20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" " movl %%eax, 24(%3)\n" " movl %%edx, 28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" " movl %%eax, 32(%3)\n" " movl %%edx, 36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" " movl %%eax, 40(%3)\n" " movl %%edx, 44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" " movl %%eax, 48(%3)\n" " movl %%edx, 52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" " movl %%eax, 56(%3)\n" " movl %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" " cmpl $63, %0\n" " ja 0b\n" "5: movl %0, %%eax\n" " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" "6: rep; movsl\n" " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" " pushl %%eax\n" " xorl %%eax,%%eax\n" " rep; stosb\n" " popl %%eax\n" " popl %0\n" " jmp 8b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,16b\n" " .long 1b,16b\n" " .long 2b,16b\n" " .long 21b,16b\n" " .long 3b,16b\n" " .long 31b,16b\n" " .long 4b,16b\n" " .long 41b,16b\n" " .long 10b,16b\n" " .long 51b,16b\n" " .long 11b,16b\n" " .long 61b,16b\n" " .long 12b,16b\n" " .long 71b,16b\n" " .long 13b,16b\n" " .long 81b,16b\n" " .long 14b,16b\n" " .long 91b,16b\n" " .long 6b,9b\n" " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); return size; } /* * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware. * hyoshiok@miraclelinux.com */ static unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" "1: movl 64(%4), %%eax\n" " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" " cmpl $63, %0\n" " ja 0b\n" " sfence \n" "5: movl %0, %%eax\n" " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" "6: rep; movsl\n" " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" " pushl %%eax\n" " xorl %%eax,%%eax\n" " rep; stosb\n" " popl %%eax\n" " popl %0\n" " jmp 8b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,16b\n" " .long 1b,16b\n" " .long 2b,16b\n" " .long 21b,16b\n" " .long 3b,16b\n" " .long 31b,16b\n" " .long 4b,16b\n" " .long 41b,16b\n" " .long 10b,16b\n" " .long 51b,16b\n" " .long 11b,16b\n" " .long 61b,16b\n" " .long 12b,16b\n" " .long 71b,16b\n" " .long 13b,16b\n" " .long 81b,16b\n" " .long 14b,16b\n" " .long 91b,16b\n" " .long 6b,9b\n" " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); return size; } static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" "1: movl 64(%4), %%eax\n" " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" " cmpl $63, %0\n" " ja 0b\n" " sfence \n" "5: movl %0, %%eax\n" " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" "6: rep; movsl\n" " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: jmp 8b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 0b,16b\n" " .long 1b,16b\n" " .long 2b,16b\n" " .long 21b,16b\n" " .long 3b,16b\n" " .long 31b,16b\n" " .long 4b,16b\n" " .long 41b,16b\n" " .long 10b,16b\n" " .long 51b,16b\n" " .long 11b,16b\n" " .long 61b,16b\n" " .long 12b,16b\n" " .long 71b,16b\n" " .long 13b,16b\n" " .long 81b,16b\n" " .long 14b,16b\n" " .long 91b,16b\n" " .long 6b,9b\n" " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); return size; } #else /* * Leave these declared but undefined. They should not be any references to * them */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ #define __copy_user(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ " cmp $7,%0\n" \ " jbe 1f\n" \ " movl %1,%0\n" \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ "4: rep; movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ "0: rep; movsl\n" \ " movl %3,%0\n" \ "1: rep; movsb\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ " jmp 2b\n" \ "3: lea 0(%3,%0,4),%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 4b,5b\n" \ " .long 0b,3b\n" \ " .long 1b,2b\n" \ ".previous" \ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ : "3"(size), "0"(size), "1"(to), "2"(from) \ : "memory"); \ } while (0) #define __copy_user_zeroing(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ " cmp $7,%0\n" \ " jbe 1f\n" \ " movl %1,%0\n" \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ "4: rep; movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ "0: rep; movsl\n" \ " movl %3,%0\n" \ "1: rep; movsb\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ " jmp 6f\n" \ "3: lea 0(%3,%0,4),%0\n" \ "6: pushl %0\n" \ " pushl %%eax\n" \ " xorl %%eax,%%eax\n" \ " rep; stosb\n" \ " popl %%eax\n" \ " popl %0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 4b,5b\n" \ " .long 0b,3b\n" \ " .long 1b,6b\n" \ ".previous" \ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ : "3"(size), "0"(size), "1"(to), "2"(from) \ : "memory"); \ } while (0) unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long)to) < TASK_SIZE) { /* * When we are in an atomic section (see * mm/filemap.c:file_read_actor), return the full * length to take the slow path. */ if (in_atomic()) return n; /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, * the page tables can change at any time. * Do it manually. Manfred <manfred@colorfullife.com> */ while (n) { unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; if (len > n) len = n; survive: down_read(&current->mm->mmap_sem); retval = get_user_pages(current, current->mm, (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(&current->mm->mmap_sem); congestion_wait(BLK_RW_ASYNC, HZ/50); goto survive; } if (retval != 1) { up_read(&current->mm->mmap_sem); break; } maddr = kmap_atomic(pg); memcpy(maddr + offset, from, len); kunmap_atomic(maddr); set_page_dirty_lock(pg); put_page(pg); up_read(&current->mm->mmap_sem); from += len; to += len; n -= len; } return n; } #endif if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_to_user_ll); unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n) { if (movsl_is_ok(to, from, n)) __copy_user_zeroing(to, from, n); else n = __copy_user_zeroing_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_from_user_ll); unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n) { if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel((void __user *)to, (const void *)from, n); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nozero); unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && cpu_has_xmm2) n = __copy_user_zeroing_intel_nocache(to, from, n); else __copy_user_zeroing(to, from, n); #else __copy_user_zeroing(to, from, n); #endif return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && cpu_has_xmm2) n = __copy_user_intel_nocache(to, from, n); else __copy_user(to, from, n); #else __copy_user(to, from, n); #endif return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); /** * copy_to_user: - Copy a block of data into user space. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from kernel space to user space. * * Returns number of bytes that could not be copied. * On success, this will be zero. */ unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; } EXPORT_SYMBOL(copy_to_user); /** * copy_from_user: - Copy a block of data from user space. * @to: Destination address, in kernel space. * @from: Source address, in user space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from user space to kernel space. * * Returns number of bytes that could not be copied. * On success, this will be zero. * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else memset(to, 0, n); return n; } EXPORT_SYMBOL(_copy_from_user); void copy_from_user_overflow(void) { WARN(1, "Buffer overflow detected!\n"); } EXPORT_SYMBOL(copy_from_user_overflow);
gpl-2.0
esgie/viennalte_p905_kernel_source
drivers/media/video/omap3isp/ispcsiphy.c
8002
6316
/* * ispcsiphy.c * * TI OMAP3 ISP - CSI PHY module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/regulator/consumer.h> #include "isp.h" #include "ispreg.h" #include "ispcsiphy.h" /* * csiphy_lanes_config - Configuration of CSIPHY lanes. * * Updates HW configuration. * Called with phy->mutex taken. */ static void csiphy_lanes_config(struct isp_csiphy *phy) { unsigned int i; u32 reg; reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG); for (i = 0; i < phy->num_data_lanes; i++) { reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) | ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1)); reg |= (phy->lanes.data[i].pol << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1)); reg |= (phy->lanes.data[i].pos << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1)); } reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK | ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK); reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT; reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT; isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG); } /* * csiphy_power_autoswitch_enable * @enable: Sets or clears the autoswitch function enable flag. */ static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable) { isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_AUTO, enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0); } /* * csiphy_set_power * @power: Power state to be set. * * Returns 0 if successful, or -EBUSY if the retry count is exceeded. */ static int csiphy_set_power(struct isp_csiphy *phy, u32 power) { u32 reg; u8 retry_count; isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_CMD_MASK, power); retry_count = 0; do { udelay(50); reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) & ISPCSI2_PHY_CFG_PWR_STATUS_MASK; if (reg != power >> 2) retry_count++; } while ((reg != power >> 2) && (retry_count < 100)); if (retry_count == 100) { printk(KERN_ERR "CSI2 CIO set power failed!\n"); return -EBUSY; } return 0; } /* * csiphy_dphy_config - Configure CSI2 D-PHY parameters. * * Called with phy->mutex taken. */ static void csiphy_dphy_config(struct isp_csiphy *phy) { u32 reg; /* Set up ISPCSIPHY_REG0 */ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0); reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK | ISPCSIPHY_REG0_THS_SETTLE_MASK); reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT; reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0); /* Set up ISPCSIPHY_REG1 */ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1); reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK | ISPCSIPHY_REG1_TCLK_MISS_MASK | ISPCSIPHY_REG1_TCLK_SETTLE_MASK); reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT; reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT; reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1); } static int csiphy_config(struct isp_csiphy *phy, struct isp_csiphy_dphy_cfg *dphy, struct isp_csiphy_lanes_cfg *lanes) { unsigned int used_lanes = 0; unsigned int i; /* Clock and data lanes verification */ for (i = 0; i < phy->num_data_lanes; i++) { if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3) return -EINVAL; if (used_lanes & (1 << lanes->data[i].pos)) return -EINVAL; used_lanes |= 1 << lanes->data[i].pos; } if (lanes->clk.pol > 1 || lanes->clk.pos > 3) return -EINVAL; if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos)) return -EINVAL; mutex_lock(&phy->mutex); phy->dphy = *dphy; phy->lanes = *lanes; mutex_unlock(&phy->mutex); return 0; } int omap3isp_csiphy_acquire(struct isp_csiphy *phy) { int rval; if (phy->vdd == NULL) { dev_err(phy->isp->dev, "Power regulator for CSI PHY not " "available\n"); return -ENODEV; } mutex_lock(&phy->mutex); rval = regulator_enable(phy->vdd); if (rval < 0) goto done; omap3isp_csi2_reset(phy->csi2); csiphy_dphy_config(phy); csiphy_lanes_config(phy); rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON); if (rval) { regulator_disable(phy->vdd); goto done; } csiphy_power_autoswitch_enable(phy, true); phy->phy_in_use = 1; done: mutex_unlock(&phy->mutex); return rval; } void omap3isp_csiphy_release(struct isp_csiphy *phy) { mutex_lock(&phy->mutex); if (phy->phy_in_use) { csiphy_power_autoswitch_enable(phy, false); csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF); regulator_disable(phy->vdd); phy->phy_in_use = 0; } mutex_unlock(&phy->mutex); } /* * omap3isp_csiphy_init - Initialize the CSI PHY frontends */ int omap3isp_csiphy_init(struct isp_device *isp) { struct isp_csiphy *phy1 = &isp->isp_csiphy1; struct isp_csiphy *phy2 = &isp->isp_csiphy2; isp->platform_cb.csiphy_config = csiphy_config; phy2->isp = isp; phy2->csi2 = &isp->isp_csi2a; phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES; phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1; phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2; mutex_init(&phy2->mutex); if (isp->revision == ISP_REVISION_15_0) { phy1->isp = isp; phy1->csi2 = &isp->isp_csi2c; phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES; phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1; phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1; mutex_init(&phy1->mutex); } return 0; }
gpl-2.0
Grace5921/untouched
net/irda/irproc.c
12354
2589
/********************************************************************* * * Filename: irproc.c * Version: 1.0 * Description: Various entries in the /proc file system * Status: Experimental. * Author: Thomas Davis, <ratbert@radiks.net> * Created at: Sat Feb 21 21:33:24 1998 * Modified at: Sun Nov 14 08:54:54 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * I, Thomas Davis, provide no warranty for any of this software. * This material is provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/miscdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> extern const struct file_operations discovery_seq_fops; extern const struct file_operations irlap_seq_fops; extern const struct file_operations irlmp_seq_fops; extern const struct file_operations irttp_seq_fops; extern const struct file_operations irias_seq_fops; struct irda_entry { const char *name; const struct file_operations *fops; }; struct proc_dir_entry *proc_irda; EXPORT_SYMBOL(proc_irda); static const struct irda_entry irda_dirs[] = { {"discovery", &discovery_seq_fops}, {"irttp", &irttp_seq_fops}, {"irlmp", &irlmp_seq_fops}, {"irlap", &irlap_seq_fops}, {"irias", &irias_seq_fops}, }; /* * Function irda_proc_register (void) * * Register irda entry in /proc file system * */ void __init irda_proc_register(void) { int i; proc_irda = proc_mkdir("irda", init_net.proc_net); if (proc_irda == NULL) return; for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) (void) proc_create(irda_dirs[i].name, 0, proc_irda, irda_dirs[i].fops); } /* * Function irda_proc_unregister (void) * * Unregister irda entry in /proc file system * */ void irda_proc_unregister(void) { int i; if (proc_irda) { for (i=0; i<ARRAY_SIZE(irda_dirs); i++) remove_proc_entry(irda_dirs[i].name, proc_irda); remove_proc_entry("irda", init_net.proc_net); proc_irda = NULL; } }
gpl-2.0
arunov/Goldeneye-Bufflehead
arch/mips/bcm63xx/timer.c
12610
4510
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_timer.h> #include <bcm63xx_regs.h> static DEFINE_RAW_SPINLOCK(timer_reg_lock); static DEFINE_RAW_SPINLOCK(timer_data_lock); static struct clk *periph_clk; static struct timer_data { void (*cb)(void *); void *data; } timer_data[BCM63XX_TIMER_COUNT]; static irqreturn_t timer_interrupt(int irq, void *dev_id) { u32 stat; int i; raw_spin_lock(&timer_reg_lock); stat = bcm_timer_readl(TIMER_IRQSTAT_REG); bcm_timer_writel(stat, TIMER_IRQSTAT_REG); raw_spin_unlock(&timer_reg_lock); for (i = 0; i < BCM63XX_TIMER_COUNT; i++) { if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i))) continue; raw_spin_lock(&timer_data_lock); if (!timer_data[i].cb) { raw_spin_unlock(&timer_data_lock); continue; } timer_data[i].cb(timer_data[i].data); raw_spin_unlock(&timer_data_lock); } return IRQ_HANDLED; } int bcm63xx_timer_enable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg |= TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg |= TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_enable); int bcm63xx_timer_disable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg &= ~TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_disable); int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data) { unsigned long flags; int ret; if (id >= BCM63XX_TIMER_COUNT || !callback) return -EINVAL; ret = 0; raw_spin_lock_irqsave(&timer_data_lock, flags); if (timer_data[id].cb) { ret = -EBUSY; goto out; } timer_data[id].cb = callback; timer_data[id].data = data; out: raw_spin_unlock_irqrestore(&timer_data_lock, flags); return ret; } EXPORT_SYMBOL(bcm63xx_timer_register); void bcm63xx_timer_unregister(int id) { unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return; raw_spin_lock_irqsave(&timer_data_lock, flags); timer_data[id].cb = NULL; raw_spin_unlock_irqrestore(&timer_data_lock, flags); } EXPORT_SYMBOL(bcm63xx_timer_unregister); unsigned int bcm63xx_timer_countdown(unsigned int countdown_us) { return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us; } EXPORT_SYMBOL(bcm63xx_timer_countdown); int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us) { u32 reg, countdown; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; countdown = bcm63xx_timer_countdown(countdown_us); if (countdown & ~TIMER_CTL_COUNTDOWN_MASK) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); if (monotonic) reg &= ~TIMER_CTL_MONOTONIC_MASK; else reg |= TIMER_CTL_MONOTONIC_MASK; reg &= ~TIMER_CTL_COUNTDOWN_MASK; reg |= countdown; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_set); int bcm63xx_timer_init(void) { int ret, irq; u32 reg; reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN; bcm_timer_writel(reg, TIMER_IRQSTAT_REG); periph_clk = clk_get(NULL, "periph"); if (IS_ERR(periph_clk)) return -ENODEV; irq = bcm63xx_get_irq_number(IRQ_TIMER); ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL); if (ret) { printk(KERN_ERR "bcm63xx_timer: failed to register irq\n"); return ret; } return 0; } arch_initcall(bcm63xx_timer_init);
gpl-2.0
AOSParadox/android_kernel_oneplus_onyx
arch/m32r/mm/cache.c
13890
2672
/* * linux/arch/m32r/mm/cache.c * * Copyright (C) 2002-2005 Hirokazu Takata, Hayato Fujiwara */ #include <asm/pgtable.h> #undef MCCR #if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) \ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) /* Cache Control Register */ #define MCCR ((volatile unsigned long*)0xfffffffc) #define MCCR_CC (1UL << 7) /* Cache mode modify bit */ #define MCCR_IIV (1UL << 6) /* I-cache invalidate */ #define MCCR_DIV (1UL << 5) /* D-cache invalidate */ #define MCCR_DCB (1UL << 4) /* D-cache copy back */ #define MCCR_ICM (1UL << 1) /* I-cache mode [0:off,1:on] */ #define MCCR_DCM (1UL << 0) /* D-cache mode [0:off,1:on] */ #define MCCR_ICACHE_INV (MCCR_CC|MCCR_IIV) #define MCCR_DCACHE_CB (MCCR_CC|MCCR_DCB) #define MCCR_DCACHE_CBINV (MCCR_CC|MCCR_DIV|MCCR_DCB) #define CHECK_MCCR(mccr) (mccr = *MCCR) #elif defined(CONFIG_CHIP_M32102) #define MCCR ((volatile unsigned char*)0xfffffffe) #define MCCR_IIV (1UL << 0) /* I-cache invalidate */ #define MCCR_ICACHE_INV MCCR_IIV #elif defined(CONFIG_CHIP_M32104) #define MCCR ((volatile unsigned short*)0xfffffffe) #define MCCR_IIV (1UL << 8) /* I-cache invalidate */ #define MCCR_DIV (1UL << 9) /* D-cache invalidate */ #define MCCR_DCB (1UL << 10) /* D-cache copy back */ #define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */ #define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */ #define MCCR_ICACHE_INV MCCR_IIV #define MCCR_DCACHE_CB MCCR_DCB #define MCCR_DCACHE_CBINV (MCCR_DIV|MCCR_DCB) #endif #ifndef MCCR #error Unknown cache type. #endif /* Copy back and invalidate D-cache and invalidate I-cache all */ void _flush_cache_all(void) { #if defined(CONFIG_CHIP_M32102) unsigned char mccr; *MCCR = MCCR_ICACHE_INV; #elif defined(CONFIG_CHIP_M32104) unsigned short mccr; /* Copyback and invalidate D-cache */ /* Invalidate I-cache */ *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CBINV); #else unsigned long mccr; /* Copyback and invalidate D-cache */ /* Invalidate I-cache */ *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV; #endif while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ } /* Copy back D-cache and invalidate I-cache all */ void _flush_cache_copyback_all(void) { #if defined(CONFIG_CHIP_M32102) unsigned char mccr; *MCCR = MCCR_ICACHE_INV; #elif defined(CONFIG_CHIP_M32104) unsigned short mccr; /* Copyback and invalidate D-cache */ /* Invalidate I-cache */ *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CB); #else unsigned long mccr; /* Copyback D-cache */ /* Invalidate I-cache */ *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB; #endif while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ }
gpl-2.0
bluewish/tiny4412-linux-3.5
drivers/net/ethernet/mellanox/mlx4/mr.c
67
21817
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" #include "icm.h" #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MLX4_MPT_FLAG_FREE (0x3UL << 28) #define MLX4_MPT_FLAG_MIO (1 << 17) #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) #define MLX4_MPT_FLAG_REGION (1 << 8) #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) #define MLX4_MPT_PD_FLAG_RAE (1 << 28) #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) #define MLX4_MPT_STATUS_SW 0xF0 #define MLX4_MPT_STATUS_HW 0x00 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) { int o; int m; u32 seg; spin_lock(&buddy->lock); for (o = order; o <= buddy->max_order; ++o) if (buddy->num_free[o]) { m = 1 << (buddy->max_order - o); seg = find_first_bit(buddy->bits[o], m); if (seg < m) goto found; } spin_unlock(&buddy->lock); return -1; found: clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } spin_unlock(&buddy->lock); seg <<= order; return seg; } static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) { seg >>= order; spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); } static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) { int i, s; buddy->max_order = max_order; spin_lock_init(&buddy->lock); buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); } set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) kfree(buddy->bits[i]); err_out: kfree(buddy->bits); kfree(buddy->num_free); return -ENOMEM; } static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) { int i; for (i = 0; i <= buddy->max_order; ++i) kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); } u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; u32 seg; int seg_order; u32 offset; seg_order = max_t(int, order - log_mtts_per_seg, 0); seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); if (seg == -1) return -1; offset = seg * (1 << log_mtts_per_seg); if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, offset + (1 << order) - 1)) { mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); return -1; } return offset; } static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { u64 in_param; u64 out_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, order); err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return -1; return get_param_l(&out_param); } return __mlx4_alloc_mtt_range(dev, order); } int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, struct mlx4_mtt *mtt) { int i; if (!npages) { mtt->order = -1; mtt->page_shift = MLX4_ICM_PAGE_SHIFT; return 0; } else mtt->page_shift = page_shift; for (mtt->order = 0, i = 1; i < npages; i <<= 1) ++mtt->order; mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); if (mtt->offset == -1) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_mtt_init); void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { u32 first_seg; int seg_order; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; seg_order = max_t(int, order - log_mtts_per_seg, 0); first_seg = offset / (1 << log_mtts_per_seg); mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); mlx4_table_put_range(dev, &mr_table->mtt_table, offset, offset + (1 << order) - 1); } static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { u64 in_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, offset); set_param_h(&in_param, order); err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) mlx4_warn(dev, "Failed to free mtt range at:" "%d order:%d\n", offset, order); return; } __mlx4_free_mtt_range(dev, offset, order); } void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { if (mtt->order < 0) return; mlx4_free_mtt_range(dev, mtt->offset, mtt->order); } EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) { return (u64) mtt->offset * dev->caps.mtt_entry_sz; } EXPORT_SYMBOL_GPL(mlx4_mtt_addr); static u32 hw_index_to_key(u32 ind) { return (ind >> 24) | (ind << 8); } static u32 key_to_hw_index(u32 key) { return (key << 24) | (key >> 8); } static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) { mr->iova = iova; mr->size = size; mr->pd = pd; mr->access = access; mr->enabled = MLX4_MR_DISABLED; mr->key = hw_index_to_key(mridx); return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); } static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int num_entries) { return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } int __mlx4_mr_reserve(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); } static int mlx4_mr_reserve(struct mlx4_dev *dev) { u64 out_param; if (mlx4_is_mfunc(dev)) { if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) return -1; return get_param_l(&out_param); } return __mlx4_mr_reserve(dev); } void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) { struct mlx4_priv *priv = mlx4_priv(dev); mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); } static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) { u64 in_param; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to release mr index:%d\n", index); return; } __mlx4_mr_release(dev, index); } int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; return mlx4_table_get(dev, &mr_table->dmpt_table, index); } static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) { u64 param; if (mlx4_is_mfunc(dev)) { set_param_l(&param, index); return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } return __mlx4_mr_alloc_icm(dev, index); } void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; mlx4_table_put(dev, &mr_table->dmpt_table, index); } static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) { u64 in_param; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to free icm of mr index:%d\n", index); return; } return __mlx4_mr_free_icm(dev, index); } int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) { u32 index; int err; index = mlx4_mr_reserve(dev); if (index == -1) return -ENOMEM; err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, access, npages, page_shift, mr); if (err) mlx4_mr_release(dev, index); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) { int err; if (mr->enabled == MLX4_MR_EN_HW) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); mr->enabled = MLX4_MR_EN_SW; } mlx4_mtt_cleanup(dev, &mr->mtt); } void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) { mlx4_mr_free_reserved(dev, mr); if (mr->enabled) mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); mlx4_mr_release(dev, key_to_hw_index(mr->key)); } EXPORT_SYMBOL_GPL(mlx4_mr_free); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_mpt_entry *mpt_entry; int err; err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); if (err) return err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_table; } mpt_entry = mailbox->buf; memset(mpt_entry, 0, sizeof *mpt_entry); mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | MLX4_MPT_FLAG_REGION | mr->access); mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); mpt_entry->start = cpu_to_be64(mr->iova); mpt_entry->length = cpu_to_be64(mr->size); mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); mpt_entry->mtt_addr = 0; } else { mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); } if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { /* fast register MR in free state */ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } err = mlx4_SW2HW_MPT(dev, mailbox, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); if (err) { mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } mr->enabled = MLX4_MR_EN_HW; mlx4_free_cmd_mailbox(dev, mailbox); return 0; err_cmd: mlx4_free_cmd_mailbox(dev, mailbox); err_table: mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_enable); static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { struct mlx4_priv *priv = mlx4_priv(dev); __be64 *mtts; dma_addr_t dma_handle; int i; mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + start_index, &dma_handle); if (!mtts) return -ENOMEM; dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); return 0; } int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { int err = 0; int chunk; int mtts_per_page; int max_mtts_first_page; /* compute how may mtts fit in the first page */ mtts_per_page = PAGE_SIZE / sizeof(u64); max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) % mtts_per_page; chunk = min_t(int, max_mtts_first_page, npages); while (npages > 0) { err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); if (err) return err; npages -= chunk; start_index += chunk; page_list += chunk; chunk = min_t(int, mtts_per_page, npages); } return err; } int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { struct mlx4_cmd_mailbox *mailbox = NULL; __be64 *inbox = NULL; int chunk; int err = 0; int i; if (mtt->order < 0) return -EINVAL; if (mlx4_is_mfunc(dev)) { mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); inbox = mailbox->buf; while (npages > 0) { chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, npages); inbox[0] = cpu_to_be64(mtt->offset + start_index); inbox[1] = 0; for (i = 0; i < chunk; ++i) inbox[i + 2] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); err = mlx4_WRITE_MTT(dev, mailbox, chunk); if (err) { mlx4_free_cmd_mailbox(dev, mailbox); return err; } npages -= chunk; start_index += chunk; page_list += chunk; } mlx4_free_cmd_mailbox(dev, mailbox); return err; } return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); } EXPORT_SYMBOL_GPL(mlx4_write_mtt); int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf) { u64 *page_list; int err; int i; page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); if (!page_list) return -ENOMEM; for (i = 0; i < buf->npages; ++i) if (buf->nbufs == 1) page_list[i] = buf->direct.map + (i << buf->page_shift); else page_list[i] = buf->page_list[i].map; err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); kfree(page_list); return err; } EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); int mlx4_init_mr_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_mr_table *mr_table = &priv->mr_table; int err; if (!is_power_of_2(dev->caps.num_mpts)) return -EINVAL; /* Nothing to do for slaves - all MR handling is forwarded * to the master */ if (mlx4_is_slave(dev)) return 0; err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, ~0, dev->caps.reserved_mrws, 0); if (err) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, ilog2(dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; if (dev->caps.reserved_mtts) { priv->reserved_mtts = mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { mlx4_warn(dev, "MTT table of order %d is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; } } return 0; err_reserve_mtts: mlx4_buddy_cleanup(&mr_table->mtt_buddy); err_buddy: mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); return err; } void mlx4_cleanup_mr_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_mr_table *mr_table = &priv->mr_table; if (mlx4_is_slave(dev)) return; if (priv->reserved_mtts >= 0) mlx4_free_mtt_range(dev, priv->reserved_mtts, fls(dev->caps.reserved_mtts - 1)); mlx4_buddy_cleanup(&mr_table->mtt_buddy); mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); } static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) { int i, page_mask; if (npages > fmr->max_pages) return -EINVAL; page_mask = (1 << fmr->page_shift) - 1; /* We are getting page lists, so va must be page aligned. */ if (iova & page_mask) return -EINVAL; /* Trust the user not to pass misaligned data in page_list */ if (0) for (i = 0; i < npages; ++i) { if (page_list[i] & ~page_mask) return -EINVAL; } if (fmr->maps >= fmr->max_maps) return -EINVAL; return 0; } int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) { u32 key; int i, err; err = mlx4_check_fmr(fmr, page_list, npages, iova); if (err) return err; ++fmr->maps; key = key_to_hw_index(fmr->mr.key); key += dev->caps.num_mpts; *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; /* Make sure MPT status is visible before writing MTT entries */ wmb(); dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, npages * sizeof(u64), DMA_TO_DEVICE); fmr->mpt->key = cpu_to_be32(key); fmr->mpt->lkey = cpu_to_be32(key); fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); fmr->mpt->start = cpu_to_be64(iova); /* Make MTT entries are visible before setting MPT status */ wmb(); *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; /* Make sure MPT status is visible before consumer can use FMR */ wmb(); return 0; } EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); int err = -ENOMEM; if (max_maps > dev->caps.max_fmr_maps) return -EINVAL; if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; /* All MTTs must fit in the same page */ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) return -EINVAL; fmr->page_shift = page_shift; fmr->max_pages = max_pages; fmr->max_maps = max_maps; fmr->maps = 0; err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, page_shift, &fmr->mr); if (err) return err; fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, fmr->mr.mtt.offset, &fmr->dma_handle); if (!fmr->mtts) { err = -ENOMEM; goto err_free; } return 0; err_free: mlx4_mr_free(dev, &fmr->mr); return err; } EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { struct mlx4_priv *priv = mlx4_priv(dev); int err; err = mlx4_mr_enable(dev, &fmr->mr); if (err) return err; fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, key_to_hw_index(fmr->mr.key), NULL); if (!fmr->mpt) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { struct mlx4_cmd_mailbox *mailbox; int err; if (!fmr->maps) return; fmr->maps = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" " failed (%d)\n", err); return; } err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(fmr->mr.key) & (dev->caps.num_mpts - 1)); mlx4_free_cmd_mailbox(dev, mailbox); if (err) { printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); return; } fmr->mr.enabled = MLX4_MR_EN_SW; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { if (fmr->maps) return -EBUSY; mlx4_mr_free(dev, &fmr->mr); fmr->mr.enabled = MLX4_MR_DISABLED; return 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_free); int mlx4_SYNC_TPT(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
gpl-2.0
TheNotOnly/android_kernel_lge_jagnm_lp
drivers/base/firmware_class.c
67
21852
/* * firmware_class.c - Multi purpose firmware loading support * * Copyright (c) 2003 Manuel Estrada Sainz * * Please see Documentation/firmware_class/ for more information. * */ #include <linux/capability.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/highmem.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/io.h> #define to_dev(obj) container_of(obj, struct device, kobj) MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Multi purpose firmware loading support"); MODULE_LICENSE("GPL"); /* Builtin firmware support */ #ifdef CONFIG_FW_LOADER extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; return true; } } return false; } static bool fw_is_builtin_firmware(const struct firmware *fw) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) if (fw->data == b_fw->data) return true; return false; } #else /* Module case - no builtin firmware support */ static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { return false; } static inline bool fw_is_builtin_firmware(const struct firmware *fw) { return false; } #endif enum { FW_STATUS_LOADING, FW_STATUS_DONE, FW_STATUS_ABORT, }; static int loading_timeout = 60; /* In seconds */ static inline long firmware_loading_timeout(void) { return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; } /* fw_lock could be moved to 'struct firmware_priv' but since it is just * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); struct firmware_priv { struct completion completion; struct firmware *fw; unsigned long status; struct page **pages; int nr_pages; int page_array_size; phys_addr_t dest_addr; size_t dest_size; struct timer_list timeout; struct device dev; bool nowait; char fw_id[]; }; static struct firmware_priv *to_firmware_priv(struct device *dev) { return container_of(dev, struct firmware_priv, dev); } static void fw_load_abort(struct firmware_priv *fw_priv) { set_bit(FW_STATUS_ABORT, &fw_priv->status); wmb(); complete(&fw_priv->completion); } static ssize_t firmware_timeout_show(struct class *class, struct class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", loading_timeout); } /** * firmware_timeout_store - set number of seconds to wait for firmware * @class: device class pointer * @attr: device attribute pointer * @buf: buffer to scan for timeout value * @count: number of bytes in @buf * * Sets the number of seconds to wait for the firmware. Once * this expires an error will be returned to the driver and no * firmware will be provided. * * Note: zero means 'wait forever'. **/ static ssize_t firmware_timeout_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { loading_timeout = simple_strtol(buf, NULL, 10); if (loading_timeout < 0) loading_timeout = 0; return count; } static struct class_attribute firmware_class_attrs[] = { __ATTR(timeout, S_IWUSR | S_IRUGO, firmware_timeout_show, firmware_timeout_store), __ATTR_NULL }; static void fw_dev_release(struct device *dev) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int i; for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kfree(fw_priv->pages); kfree(fw_priv); module_put(THIS_MODULE); } static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) { struct firmware_priv *fw_priv = to_firmware_priv(dev); if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id)) return -ENOMEM; if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) return -ENOMEM; if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait)) return -ENOMEM; return 0; } static struct class firmware_class = { .name = "firmware", .class_attrs = firmware_class_attrs, .dev_uevent = firmware_uevent, .dev_release = fw_dev_release, }; static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status); return sprintf(buf, "%d\n", loading); } static void firmware_free_data(const struct firmware *fw) { int i; vunmap(fw->data); if (fw->pages) { for (i = 0; i < PFN_UP(fw->size); i++) __free_page(fw->pages[i]); kfree(fw->pages); } } /* Some architectures don't have PAGE_KERNEL_RO */ #ifndef PAGE_KERNEL_RO #define PAGE_KERNEL_RO PAGE_KERNEL #endif /** * firmware_loading_store - set value in the 'loading' control file * @dev: device pointer * @attr: device attribute pointer * @buf: buffer to scan for loading control value * @count: number of bytes in @buf * * The relevant values are: * * 1: Start a load, discarding any previous partial load. * 0: Conclude the load and hand the data to the driver code. * -1: Conclude the load with an error and discard any written data. **/ static ssize_t firmware_loading_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); if (!fw_priv->fw) goto out; switch (loading) { case 1: if (fw_priv->dest_addr) { set_bit(FW_STATUS_LOADING, &fw_priv->status); break; } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kfree(fw_priv->pages); fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { if (fw_priv->dest_addr) { complete(&fw_priv->completion); clear_bit(FW_STATUS_LOADING, &fw_priv->status); break; } vunmap(fw_priv->fw->data); fw_priv->fw->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, PAGE_KERNEL_RO); if (!fw_priv->fw->data) { dev_err(dev, "%s: vmap() failed\n", __func__); goto err; } /* Pages are now owned by 'struct firmware' */ fw_priv->fw->pages = fw_priv->pages; fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; complete(&fw_priv->completion); clear_bit(FW_STATUS_LOADING, &fw_priv->status); break; } /* fallthrough */ default: dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); /* fallthrough */ case -1: err: fw_load_abort(fw_priv); break; } out: mutex_unlock(&fw_lock); return count; } static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); static int __firmware_data_rw(struct firmware_priv *fw_priv, char *buffer, loff_t *offset, size_t count, int read) { u8 __iomem *fw_buf; int retval = count; if ((*offset + count) > fw_priv->dest_size) { pr_debug("%s: Failed size check.\n", __func__); retval = -EINVAL; goto out; } fw_buf = ioremap(fw_priv->dest_addr + *offset, count); if (!fw_buf) { pr_debug("%s: Failed ioremap.\n", __func__); retval = -ENOMEM; goto out; } if (read) memcpy(buffer, fw_buf, count); else memcpy(fw_buf, buffer, count); *offset += count; iounmap(fw_buf); out: return retval; } static ssize_t firmware_direct_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t ret_count; mutex_lock(&fw_lock); fw = fw_priv->fw; if (offset > fw->size) { ret_count = 0; goto out; } if (count > fw->size - offset) count = fw->size - offset; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { ret_count = -ENODEV; goto out; } ret_count = __firmware_data_rw(fw_priv, buffer, &offset, count, 1); out: mutex_unlock(&fw_lock); return ret_count; } static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t ret_count; mutex_lock(&fw_lock); fw = fw_priv->fw; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { ret_count = -ENODEV; goto out; } if (offset > fw->size) { ret_count = 0; goto out; } if (count > fw->size - offset) count = fw->size - offset; ret_count = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE-1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(fw_priv->pages[page_nr]); memcpy(buffer, page_data + page_ofs, page_cnt); kunmap(fw_priv->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } out: mutex_unlock(&fw_lock); return ret_count; } static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; /* If the array of pages is too small, grow it... */ if (fw_priv->page_array_size < pages_needed) { int new_array_size = max(pages_needed, fw_priv->page_array_size * 2); struct page **new_pages; new_pages = kmalloc(new_array_size * sizeof(void *), GFP_KERNEL); if (!new_pages) { fw_load_abort(fw_priv); return -ENOMEM; } memcpy(new_pages, fw_priv->pages, fw_priv->page_array_size * sizeof(void *)); memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * (new_array_size - fw_priv->page_array_size)); kfree(fw_priv->pages); fw_priv->pages = new_pages; fw_priv->page_array_size = new_array_size; } while (fw_priv->nr_pages < pages_needed) { fw_priv->pages[fw_priv->nr_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!fw_priv->pages[fw_priv->nr_pages]) { fw_load_abort(fw_priv); return -ENOMEM; } fw_priv->nr_pages++; } return 0; } static ssize_t firmware_direct_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t retval; if (!capable(CAP_SYS_RAWIO)) return -EPERM; mutex_lock(&fw_lock); fw = fw_priv->fw; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { retval = -ENODEV; goto out; } retval = __firmware_data_rw(fw_priv, buffer, &offset, count, 0); if (retval < 0) goto out; fw->size = max_t(size_t, offset, fw->size); out: mutex_unlock(&fw_lock); return retval; } /** * firmware_data_write - write method for firmware * @filp: open sysfs file * @kobj: kobject for the device * @bin_attr: bin_attr structure * @buffer: buffer being written * @offset: buffer offset for write in total data store area * @count: buffer size * * Data written to the 'data' attribute will be later handed to * the driver as a firmware image. **/ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t retval; if (!capable(CAP_SYS_RAWIO)) return -EPERM; mutex_lock(&fw_lock); fw = fw_priv->fw; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { retval = -ENODEV; goto out; } retval = fw_realloc_buffer(fw_priv, offset + count); if (retval) goto out; retval = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE - 1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(fw_priv->pages[page_nr]); memcpy(page_data + page_ofs, buffer, page_cnt); kunmap(fw_priv->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } fw->size = max_t(size_t, offset, fw->size); out: mutex_unlock(&fw_lock); return retval; } static struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, .read = firmware_data_read, .write = firmware_data_write, }; static struct bin_attribute firmware_direct_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, .read = firmware_direct_read, .write = firmware_direct_write, }; static void firmware_class_timeout(u_long data) { struct firmware_priv *fw_priv = (struct firmware_priv *) data; fw_load_abort(fw_priv); } static struct firmware_priv * fw_create_instance(struct firmware *firmware, const char *fw_name, struct device *device, bool uevent, bool nowait) { struct firmware_priv *fw_priv; struct device *f_dev; fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); if (!fw_priv) { dev_err(device, "%s: kmalloc failed\n", __func__); return ERR_PTR(-ENOMEM); } fw_priv->fw = firmware; fw_priv->nowait = nowait; strcpy(fw_priv->fw_id, fw_name); init_completion(&fw_priv->completion); setup_timer(&fw_priv->timeout, firmware_class_timeout, (u_long) fw_priv); f_dev = &fw_priv->dev; device_initialize(f_dev); dev_set_name(f_dev, "%s", dev_name(device)); f_dev->parent = device; f_dev->class = &firmware_class; return fw_priv; } static struct firmware_priv * _request_firmware_prepare(const struct firmware **firmware_p, const char *name, struct device *device, bool uevent, bool nowait) { struct firmware *firmware; struct firmware_priv *fw_priv; if (!firmware_p) return ERR_PTR(-EINVAL); *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); return ERR_PTR(-ENOMEM); } if (fw_get_builtin_firmware(firmware, name)) { dev_dbg(device, "firmware: using built-in firmware %s\n", name); return NULL; } fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); if (IS_ERR(fw_priv)) { release_firmware(firmware); *firmware_p = NULL; } return fw_priv; } static void _request_firmware_cleanup(const struct firmware **firmware_p) { release_firmware(*firmware_p); *firmware_p = NULL; } static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, long timeout) { int retval = 0; struct device *f_dev = &fw_priv->dev; struct bin_attribute *fw_attr_data = fw_priv->dest_addr ? &firmware_direct_attr_data : &firmware_attr_data; dev_set_uevent_suppress(f_dev, true); /* Need to pin this module until class device is destroyed */ __module_get(THIS_MODULE); retval = device_add(f_dev); if (retval) { dev_err(f_dev, "%s: device_register failed\n", __func__); goto err_put_dev; } retval = device_create_bin_file(f_dev, fw_attr_data); if (retval) { dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__); goto err_del_dev; } retval = device_create_file(f_dev, &dev_attr_loading); if (retval) { dev_err(f_dev, "%s: device_create_file failed\n", __func__); goto err_del_bin_attr; } if (uevent) { dev_set_uevent_suppress(f_dev, false); dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id); if (timeout != MAX_SCHEDULE_TIMEOUT) mod_timer(&fw_priv->timeout, round_jiffies_up(jiffies + timeout)); kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); } wait_for_completion(&fw_priv->completion); set_bit(FW_STATUS_DONE, &fw_priv->status); del_timer_sync(&fw_priv->timeout); mutex_lock(&fw_lock); if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) retval = -ENOENT; fw_priv->fw = NULL; mutex_unlock(&fw_lock); device_remove_file(f_dev, &dev_attr_loading); err_del_bin_attr: device_remove_bin_file(f_dev, fw_attr_data); err_del_dev: device_del(f_dev); err_put_dev: put_device(f_dev); return retval; } static int __request_firmware(const struct firmware **firmware_p, const char *name, struct device *device, phys_addr_t dest_addr, size_t size) { struct firmware_priv *fw_priv; int ret; if (!name || name[0] == '\0') return -EINVAL; fw_priv = _request_firmware_prepare(firmware_p, name, device, true, false); if (IS_ERR_OR_NULL(fw_priv)) return PTR_RET(fw_priv); fw_priv->dest_addr = dest_addr; fw_priv->dest_size = size; ret = usermodehelper_read_trylock(); if (WARN_ON(ret)) { dev_err(device, "firmware: %s will not be loaded\n", name); } else { ret = _request_firmware_load(fw_priv, true, firmware_loading_timeout()); usermodehelper_read_unlock(); } if (ret) _request_firmware_cleanup(firmware_p); return ret; } /** * request_firmware: - send firmware request and wait for it * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * @firmware_p will be used to return a firmware image by the name * of @name for device @device. * * Should be called from user context where sleeping is allowed. * * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. **/ int request_firmware(const struct firmware **firmware_p, const char *name, struct device *device) { return __request_firmware(firmware_p, name, device, 0, 0); } /** * request_firmware_direct: - send firmware request and wait for it * @name: name of firmware file * @device: device for which firmware is being loaded * @dest_addr: Destination address for the firmware * @dest_size: * * Similar to request_firmware, except takes in a buffer address and * copies firmware data directly to that buffer. Returns the size of * the firmware that was loaded at dest_addr. */ int request_firmware_direct(const char *name, struct device *device, phys_addr_t dest_addr, size_t dest_size) { const struct firmware *fp = NULL; int ret; ret = __request_firmware(&fp, name, device, dest_addr, dest_size); if (ret) return ret; ret = fp->size; release_firmware(fp); return ret; } /** * release_firmware: - release the resource associated with a firmware image * @fw: firmware resource to release **/ void release_firmware(const struct firmware *fw) { if (fw) { if (!fw_is_builtin_firmware(fw)) firmware_free_data(fw); kfree(fw); } } /* Async support */ struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); bool uevent; }; static void request_firmware_work_func(struct work_struct *work) { struct firmware_work *fw_work; const struct firmware *fw; struct firmware_priv *fw_priv; long timeout; int ret; fw_work = container_of(work, struct firmware_work, work); fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device, fw_work->uevent, true); if (IS_ERR_OR_NULL(fw_priv)) { ret = PTR_RET(fw_priv); goto out; } timeout = usermodehelper_read_lock_wait(firmware_loading_timeout()); if (timeout) { ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout); usermodehelper_read_unlock(); } else { dev_dbg(fw_work->device, "firmware: %s loading timed out\n", fw_work->name); ret = -EAGAIN; } if (ret) _request_firmware_cleanup(&fw); out: fw_work->cont(fw, fw_work->context); module_put(fw_work->module); kfree(fw_work); } /** * request_firmware_nowait - asynchronous version of request_firmware * @module: module requesting the firmware * @uevent: sends uevent to copy the firmware image if this flag * is non-zero else the firmware copy must be done manually. * @name: name of firmware file * @device: device for which firmware is being loaded * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware * request is over. * * Asynchronous variant of request_firmware() for user contexts where * it is not possible to sleep for long time. It can't be called * in atomic contexts. **/ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { struct firmware_work *fw_work; fw_work = kzalloc(sizeof (struct firmware_work), gfp); if (!fw_work) return -ENOMEM; fw_work->module = module; fw_work->name = name; fw_work->device = device; fw_work->context = context; fw_work->cont = cont; fw_work->uevent = uevent; if (!try_module_get(module)) { kfree(fw_work); return -EFAULT; } INIT_WORK(&fw_work->work, request_firmware_work_func); schedule_work(&fw_work->work); return 0; } static int __init firmware_class_init(void) { return class_register(&firmware_class); } static void __exit firmware_class_exit(void) { class_unregister(&firmware_class); } fs_initcall(firmware_class_init); module_exit(firmware_class_exit); EXPORT_SYMBOL(release_firmware); EXPORT_SYMBOL(request_firmware); EXPORT_SYMBOL(request_firmware_nowait);
gpl-2.0
mingit/mstcp
fs/xfs/xfs_dquot_buf.c
323
7791
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_quota.h" #include "xfs_trans.h" #include "xfs_qm.h" #include "xfs_error.h" #include "xfs_cksum.h" #include "xfs_trace.h" int xfs_calc_dquots_per_chunk( struct xfs_mount *mp, unsigned int nbblks) /* basic block units */ { unsigned int ndquots; ASSERT(nbblks > 0); ndquots = BBTOB(nbblks); do_div(ndquots, sizeof(xfs_dqblk_t)); return ndquots; } /* * Do some primitive error checking on ondisk dquot data structures. */ int xfs_dqcheck( struct xfs_mount *mp, xfs_disk_dquot_t *ddq, xfs_dqid_t id, uint type, /* used only when IO_dorepair is true */ uint flags, char *str) { xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; int errs = 0; /* * We can encounter an uninitialized dquot buffer for 2 reasons: * 1. If we crash while deleting the quotainode(s), and those blks got * used for user data. This is because we take the path of regular * file deletion; however, the size field of quotainodes is never * updated, so all the tricks that we play in itruncate_finish * don't quite matter. * * 2. We don't play the quota buffers when there's a quotaoff logitem. * But the allocation will be replayed so we'll end up with an * uninitialized quota block. * * This is all fine; things are still consistent, and we haven't lost * any quota information. Just don't complain about bad dquot blks. */ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); errs++; } if (ddq->d_version != XFS_DQUOT_VERSION) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", str, id, ddq->d_version, XFS_DQUOT_VERSION); errs++; } if (ddq->d_flags != XFS_DQ_USER && ddq->d_flags != XFS_DQ_PROJ && ddq->d_flags != XFS_DQ_GROUP) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : XFS dquot ID 0x%x, unknown flags 0x%x", str, id, ddq->d_flags); errs++; } if (id != -1 && id != be32_to_cpu(ddq->d_id)) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : ondisk-dquot 0x%p, ID mismatch: " "0x%x expected, found id 0x%x", str, ddq, id, be32_to_cpu(ddq->d_id)); errs++; } if (!errs && ddq->d_id) { if (ddq->d_blk_softlimit && be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit)) { if (!ddq->d_btimer) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED", str, (int)be32_to_cpu(ddq->d_id), ddq); errs++; } } if (ddq->d_ino_softlimit && be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit)) { if (!ddq->d_itimer) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED", str, (int)be32_to_cpu(ddq->d_id), ddq); errs++; } } if (ddq->d_rtb_softlimit && be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit)) { if (!ddq->d_rtbtimer) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED", str, (int)be32_to_cpu(ddq->d_id), ddq); errs++; } } } if (!errs || !(flags & XFS_QMOPT_DQREPAIR)) return errs; if (flags & XFS_QMOPT_DOWARN) xfs_notice(mp, "Re-initializing dquot ID 0x%x", id); /* * Typically, a repair is only requested by quotacheck. */ ASSERT(id != -1); ASSERT(flags & XFS_QMOPT_DQREPAIR); memset(d, 0, sizeof(xfs_dqblk_t)); d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); d->dd_diskdq.d_version = XFS_DQUOT_VERSION; d->dd_diskdq.d_flags = type; d->dd_diskdq.d_id = cpu_to_be32(id); if (xfs_sb_version_hascrc(&mp->m_sb)) { uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } return errs; } STATIC bool xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; int ndquots; int i; if (!xfs_sb_version_hascrc(&mp->m_sb)) return true; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(mp, XFS_BB_TO_FSB(mp, bp->b_length)); for (i = 0; i < ndquots; i++, d++) { if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF)) return false; if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid)) return false; } return true; } STATIC bool xfs_dquot_buf_verify( struct xfs_mount *mp, struct xfs_buf *bp) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dqid_t id = 0; int ndquots; int i; /* * if we are in log recovery, the quota subsystem has not been * initialised so we have no quotainfo structure. In that case, we need * to manually calculate the number of dquots in the buffer. */ if (mp->m_quotainfo) ndquots = mp->m_quotainfo->qi_dqperchunk; else ndquots = xfs_calc_dquots_per_chunk(mp, bp->b_length); /* * On the first read of the buffer, verify that each dquot is valid. * We don't know what the id of the dquot is supposed to be, just that * they should be increasing monotonically within the buffer. If the * first id is corrupt, then it will fail on the second dquot in the * buffer so corruptions could point to the wrong dquot in this case. */ for (i = 0; i < ndquots; i++) { struct xfs_disk_dquot *ddq; int error; ddq = &d[i].dd_diskdq; if (i == 0) id = be32_to_cpu(ddq->d_id); error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN, "xfs_dquot_buf_verify"); if (error) return false; } return true; } static void xfs_dquot_buf_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); } } /* * we don't calculate the CRC here as that is done when the dquot is flushed to * the buffer after the update is done. This ensures that the dquot in the * buffer always has an up-to-date CRC value. */ static void xfs_dquot_buf_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; if (!xfs_dquot_buf_verify(mp, bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); return; } } const struct xfs_buf_ops xfs_dquot_buf_ops = { .verify_read = xfs_dquot_buf_read_verify, .verify_write = xfs_dquot_buf_write_verify, };
gpl-2.0
barakinflorida/tmo_tab-open
drivers/watchdog/bcm47xx_wdt.c
579
6277
/* * Watchdog driver for Broadcom BCM47XX * * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs> * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/ssb/ssb_embedded.h> #include <asm/mach-bcm47xx/bcm47xx.h> #define DRV_NAME "bcm47xx_wdt" #define WDT_DEFAULT_TIME 30 /* seconds */ #define WDT_MAX_TIME 255 /* seconds */ static int wdt_time = WDT_DEFAULT_TIME; static int nowayout = WATCHDOG_NOWAYOUT; module_param(wdt_time, int, 0); MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default=" __MODULE_STRING(WDT_DEFAULT_TIME) ")"); #ifdef CONFIG_WATCHDOG_NOWAYOUT module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #endif static unsigned long bcm47xx_wdt_busy; static char expect_release; static struct timer_list wdt_timer; static atomic_t ticks; static inline void bcm47xx_wdt_hw_start(void) { /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */ ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff); } static inline int bcm47xx_wdt_hw_stop(void) { return ssb_watchdog_timer_set(&ssb_bcm47xx, 0); } static void bcm47xx_timer_tick(unsigned long unused) { if (!atomic_dec_and_test(&ticks)) { bcm47xx_wdt_hw_start(); mod_timer(&wdt_timer, jiffies + HZ); } else { printk(KERN_CRIT DRV_NAME "Watchdog will fire soon!!!\n"); } } static inline void bcm47xx_wdt_pet(void) { atomic_set(&ticks, wdt_time); } static void bcm47xx_wdt_start(void) { bcm47xx_wdt_pet(); bcm47xx_timer_tick(0); } static void bcm47xx_wdt_pause(void) { del_timer_sync(&wdt_timer); bcm47xx_wdt_hw_stop(); } static void bcm47xx_wdt_stop(void) { bcm47xx_wdt_pause(); } static int bcm47xx_wdt_settimeout(int new_time) { if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) return -EINVAL; wdt_time = new_time; return 0; } static int bcm47xx_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &bcm47xx_wdt_busy)) return -EBUSY; bcm47xx_wdt_start(); return nonseekable_open(inode, file); } static int bcm47xx_wdt_release(struct inode *inode, struct file *file) { if (expect_release == 42) { bcm47xx_wdt_stop(); } else { printk(KERN_CRIT DRV_NAME ": Unexpected close, not stopping watchdog!\n"); bcm47xx_wdt_start(); } clear_bit(0, &bcm47xx_wdt_busy); expect_release = 0; return 0; } static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; expect_release = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } bcm47xx_wdt_pet(); } return len; } static struct watchdog_info bcm47xx_wdt_info = { .identity = DRV_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static long bcm47xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value, retval = -EINVAL; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &bcm47xx_wdt_info, sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: if (get_user(new_value, p)) return -EFAULT; if (new_value & WDIOS_DISABLECARD) { bcm47xx_wdt_stop(); retval = 0; } if (new_value & WDIOS_ENABLECARD) { bcm47xx_wdt_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: bcm47xx_wdt_pet(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; if (bcm47xx_wdt_settimeout(new_value)) return -EINVAL; bcm47xx_wdt_pet(); case WDIOC_GETTIMEOUT: return put_user(wdt_time, p); default: return -ENOTTY; } } static int bcm47xx_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) bcm47xx_wdt_stop(); return NOTIFY_DONE; } static const struct file_operations bcm47xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = bcm47xx_wdt_ioctl, .open = bcm47xx_wdt_open, .release = bcm47xx_wdt_release, .write = bcm47xx_wdt_write, }; static struct miscdevice bcm47xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &bcm47xx_wdt_fops, }; static struct notifier_block bcm47xx_wdt_notifier = { .notifier_call = bcm47xx_wdt_notify_sys, }; static int __init bcm47xx_wdt_init(void) { int ret; if (bcm47xx_wdt_hw_stop() < 0) return -ENODEV; setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L); if (bcm47xx_wdt_settimeout(wdt_time)) { bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME); printk(KERN_INFO DRV_NAME ": " "wdt_time value must be 0 < wdt_time < %d, using %d\n", (WDT_MAX_TIME + 1), wdt_time); } ret = register_reboot_notifier(&bcm47xx_wdt_notifier); if (ret) return ret; ret = misc_register(&bcm47xx_wdt_miscdev); if (ret) { unregister_reboot_notifier(&bcm47xx_wdt_notifier); return ret; } printk(KERN_INFO "BCM47xx Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : ""); return 0; } static void __exit bcm47xx_wdt_exit(void) { if (!nowayout) bcm47xx_wdt_stop(); misc_deregister(&bcm47xx_wdt_miscdev); unregister_reboot_notifier(&bcm47xx_wdt_notifier); } module_init(bcm47xx_wdt_init); module_exit(bcm47xx_wdt_exit); MODULE_AUTHOR("Aleksandar Radovanovic"); MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
fritsch/linux
arch/powerpc/kernel/eeh_cache.c
579
8761
/* * PCI address cache; allows the lookup of PCI devices based on I/O address * * Copyright IBM Corporation 2004 * Copyright Linas Vepstas <linas@austin.ibm.com> 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/list.h> #include <linux/pci.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> /** * The pci address cache subsystem. This subsystem places * PCI device address resources into a red-black tree, sorted * according to the address range, so that given only an i/o * address, the corresponding PCI device can be **quickly** * found. It is safe to perform an address lookup in an interrupt * context; this ability is an important feature. * * Currently, the only customer of this code is the EEH subsystem; * thus, this code has been somewhat tailored to suit EEH better. * In particular, the cache does *not* hold the addresses of devices * for which EEH is not enabled. * * (Implementation Note: The RB tree seems to be better/faster * than any hash algo I could think of for this problem, even * with the penalty of slow pointer chases for d-cache misses). */ struct pci_io_addr_range { struct rb_node rb_node; unsigned long addr_lo; unsigned long addr_hi; struct eeh_dev *edev; struct pci_dev *pcidev; unsigned int flags; }; static struct pci_io_addr_cache { struct rb_root rb_root; spinlock_t piar_lock; } pci_io_addr_cache_root; static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) { struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; while (n) { struct pci_io_addr_range *piar; piar = rb_entry(n, struct pci_io_addr_range, rb_node); if (addr < piar->addr_lo) n = n->rb_left; else if (addr > piar->addr_hi) n = n->rb_right; else return piar->edev; } return NULL; } /** * eeh_addr_cache_get_dev - Get device, given only address * @addr: mmio (PIO) phys address or i/o port number * * Given an mmio phys address, or a port number, find a pci device * that implements this address. Be sure to pci_dev_put the device * when finished. I/O port numbers are assumed to be offset * from zero (that is, they do *not* have pci_io_addr added in). * It is safe to call this function within an interrupt. */ struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr) { struct eeh_dev *edev; unsigned long flags; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); edev = __eeh_addr_cache_get_device(addr); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); return edev; } #ifdef DEBUG /* * Handy-dandy debug print routine, does nothing more * than print out the contents of our addr cache. */ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) { struct rb_node *n; int cnt = 0; n = rb_first(&cache->rb_root); while (n) { struct pci_io_addr_range *piar; piar = rb_entry(n, struct pci_io_addr_range, rb_node); pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n", (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); cnt++; n = rb_next(n); } } #endif /* Insert address range into the rb tree. */ static struct pci_io_addr_range * eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, unsigned long ahi, unsigned int flags) { struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; struct rb_node *parent = NULL; struct pci_io_addr_range *piar; /* Walk tree, find a place to insert into tree */ while (*p) { parent = *p; piar = rb_entry(parent, struct pci_io_addr_range, rb_node); if (ahi < piar->addr_lo) { p = &parent->rb_left; } else if (alo > piar->addr_hi) { p = &parent->rb_right; } else { if (dev != piar->pcidev || alo != piar->addr_lo || ahi != piar->addr_hi) { pr_warning("PIAR: overlapping address range\n"); } return piar; } } piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); if (!piar) return NULL; piar->addr_lo = alo; piar->addr_hi = ahi; piar->edev = pci_dev_to_eeh_dev(dev); piar->pcidev = dev; piar->flags = flags; #ifdef DEBUG pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n", alo, ahi, pci_name(dev)); #endif rb_link_node(&piar->rb_node, parent, p); rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root); return piar; } static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) { struct device_node *dn; struct eeh_dev *edev; int i; dn = pci_device_to_OF_node(dev); if (!dn) { pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev)); return; } edev = of_node_to_eeh_dev(dn); if (!edev) { pr_warning("PCI: no EEH dev found for dn=%s\n", dn->full_name); return; } /* Skip any devices for which EEH is not enabled. */ if (!eeh_probe_mode_dev() && !edev->pe) { #ifdef DEBUG pr_info("PCI: skip building address cache for=%s - %s\n", pci_name(dev), dn->full_name); #endif return; } /* Walk resources on this device, poke them into the tree */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { unsigned long start = pci_resource_start(dev,i); unsigned long end = pci_resource_end(dev,i); unsigned int flags = pci_resource_flags(dev,i); /* We are interested only bus addresses, not dma or other stuff */ if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if (start == 0 || ~start == 0 || end == 0 || ~end == 0) continue; eeh_addr_cache_insert(dev, start, end, flags); } } /** * eeh_addr_cache_insert_dev - Add a device to the address cache * @dev: PCI device whose I/O addresses we are interested in. * * In order to support the fast lookup of devices based on addresses, * we maintain a cache of devices that can be quickly searched. * This routine adds a device to that cache. */ void eeh_addr_cache_insert_dev(struct pci_dev *dev) { unsigned long flags; /* Ignore PCI bridges */ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) return; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); __eeh_addr_cache_insert_dev(dev); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); } static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) { struct rb_node *n; restart: n = rb_first(&pci_io_addr_cache_root.rb_root); while (n) { struct pci_io_addr_range *piar; piar = rb_entry(n, struct pci_io_addr_range, rb_node); if (piar->pcidev == dev) { rb_erase(n, &pci_io_addr_cache_root.rb_root); kfree(piar); goto restart; } n = rb_next(n); } } /** * eeh_addr_cache_rmv_dev - remove pci device from addr cache * @dev: device to remove * * Remove a device from the addr-cache tree. * This is potentially expensive, since it will walk * the tree multiple times (once per resource). * But so what; device removal doesn't need to be that fast. */ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) { unsigned long flags; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); __eeh_addr_cache_rmv_dev(dev); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); } /** * eeh_addr_cache_build - Build a cache of I/O addresses * * Build a cache of pci i/o addresses. This cache will be used to * find the pci device that corresponds to a given address. * This routine scans all pci busses to build the cache. * Must be run late in boot process, after the pci controllers * have been scanned for devices (after all device resources are known). */ void eeh_addr_cache_build(void) { struct device_node *dn; struct eeh_dev *edev; struct pci_dev *dev = NULL; spin_lock_init(&pci_io_addr_cache_root.piar_lock); for_each_pci_dev(dev) { dn = pci_device_to_OF_node(dev); if (!dn) continue; edev = of_node_to_eeh_dev(dn); if (!edev) continue; dev->dev.archdata.edev = edev; edev->pdev = dev; eeh_addr_cache_insert_dev(dev); eeh_sysfs_add_device(dev); } #ifdef DEBUG /* Verify tree built up above, echo back the list of addrs. */ eeh_addr_cache_print(&pci_io_addr_cache_root); #endif }
gpl-2.0
laitianli/linux-encryption-request-2.6.32
drivers/infiniband/hw/ipath/ipath_init_chip.c
579
33441
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include "ipath_kernel.h" #include "ipath_common.h" /* * min buffers we want to have per port, after driver */ #define IPATH_MIN_USER_PORT_BUFCNT 7 /* * Number of ports we are configured to use (to allow for more pio * buffers per port, etc.) Zero means use chip value. */ static ushort ipath_cfgports; module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO); MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); /* * Number of buffers reserved for driver (verbs and layered drivers.) * Initialized based on number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different * numbers for different chip types. */ static ushort ipath_kpiobufs; static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, &ipath_kpiobufs, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); /** * create_port0_egr - allocate the eager TID buffers * @dd: the infinipath device * * This code is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance. * This is the kernel (port0) version. * * Allocate the eager TID buffers and program them into infinipath. * We use the network layer alloc_skb() allocator to allocate the * memory, and either use the buffers as is for things like verbs * packets, or pass the buffers up to the ipath layered driver and * thence the network layer, replacing them as we do so (see * ipath_rcv_layer()). */ static int create_port0_egr(struct ipath_devdata *dd) { unsigned e, egrcnt; struct ipath_skbinfo *skbinfo; int ret; egrcnt = dd->ipath_p0_rcvegrcnt; skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); if (skbinfo == NULL) { ipath_dev_err(dd, "allocation error for eager TID " "skb array\n"); ret = -ENOMEM; goto bail; } for (e = 0; e < egrcnt; e++) { /* * This is a bit tricky in that we allocate extra * space for 2 bytes of the 14 byte ethernet header. * These two bytes are passed in the ipath header so * the rest of the data is word aligned. We allocate * 4 bytes so that the data buffer stays word aligned. * See ipath_kreceive() for more details. */ skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL); if (!skbinfo[e].skb) { ipath_dev_err(dd, "SKB allocation error for " "eager TID %u\n", e); while (e != 0) dev_kfree_skb(skbinfo[--e].skb); vfree(skbinfo); ret = -ENOMEM; goto bail; } } /* * After loop above, so we can test non-NULL to see if ready * to use at receive, etc. */ dd->ipath_port0_skbinfo = skbinfo; for (e = 0; e < egrcnt; e++) { dd->ipath_port0_skbinfo[e].phys = ipath_map_single(dd->pcidev, dd->ipath_port0_skbinfo[e].skb->data, dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE); dd->ipath_f_put_tid(dd, e + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, dd->ipath_port0_skbinfo[e].phys); } ret = 0; bail: return ret; } static int bringup_link(struct ipath_devdata *dd) { u64 val, ibc; int ret = 0; /* hold IBC in reset */ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); /* * set initial max size pkt IBC will send, including ICRC; it's the * PIO buffer size in dwords, less 1; also see ipath_set_mtu() */ val = (dd->ipath_ibmaxlen >> 2) + 1; ibc = val << dd->ibcc_mpl_shift; /* flowcontrolwatermark is in units of KBytes */ ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; /* * How often flowctrl sent. More or less in usecs; balance against * watermark value, so that in theory senders always get a flow * control update in time to not let the IB link go idle. */ ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT; /* max error tolerance */ ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; /* use "real" buffer space for */ ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT; /* IB credit flow control. */ ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; /* initially come up waiting for TS1, without sending anything. */ dd->ipath_ibcctrl = ibc; /* * Want to start out with both LINKCMD and LINKINITCMD in NOP * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that * to stay a NOP. Flag that we are disabled, for the (unlikely) * case that some recovery path is trying to bring the link up * before we are ready. */ ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT; dd->ipath_flags |= IPATH_IB_LINK_DISABLED; ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", (unsigned long long) ibc); ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); // be sure chip saw it val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ret = dd->ipath_f_bringup_serdes(dd); if (ret) dev_info(&dd->pcidev->dev, "Could not initialize SerDes, " "not usable\n"); else { /* enable IBC */ dd->ipath_control |= INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); } return ret; } static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd) { struct ipath_portdata *pd = NULL; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (pd) { pd->port_dd = dd; pd->port_cnt = 1; /* The port 0 pkey table is used by the layer interface. */ pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; pd->port_seq_cnt = 1; } return pd; } static int init_chip_first(struct ipath_devdata *dd) { struct ipath_portdata *pd; int ret = 0; u64 val; spin_lock_init(&dd->ipath_kernel_tid_lock); spin_lock_init(&dd->ipath_user_tid_lock); spin_lock_init(&dd->ipath_sendctrl_lock); spin_lock_init(&dd->ipath_uctxt_lock); spin_lock_init(&dd->ipath_sdma_lock); spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_eep_st_lock); spin_lock_init(&dd->ipath_sdepb_lock); mutex_init(&dd->ipath_eep_lock); /* * skip cfgports stuff because we are not allocating memory, * and we don't want problems if the portcnt changed due to * cfgports. We do still check and report a difference, if * not same (should be impossible). */ dd->ipath_f_config_ports(dd, ipath_cfgports); if (!ipath_cfgports) dd->ipath_cfgports = dd->ipath_portcnt; else if (ipath_cfgports <= dd->ipath_portcnt) { dd->ipath_cfgports = ipath_cfgports; ipath_dbg("Configured to use %u ports out of %u in chip\n", dd->ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } else { dd->ipath_cfgports = dd->ipath_portcnt; ipath_dbg("Tried to configured to use %u ports; chip " "only supports %u\n", ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } /* * Allocate full portcnt array, rather than just cfgports, because * cleanup iterates across all possible ports. */ dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt, GFP_KERNEL); if (!dd->ipath_pd) { ipath_dev_err(dd, "Unable to allocate portdata array, " "failing\n"); ret = -ENOMEM; goto done; } pd = create_portdata0(dd); if (!pd) { ipath_dev_err(dd, "Unable to allocate portdata for port " "0, failing\n"); ret = -ENOMEM; goto done; } dd->ipath_pd[0] = pd; dd->ipath_rcvtidcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); dd->ipath_rcvtidbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); dd->ipath_rcvegrcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); dd->ipath_rcvegrbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); dd->ipath_palign = ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); dd->ipath_piobufbase = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); dd->ipath_piosize2k = val & ~0U; dd->ipath_piosize4k = val >> 32; if (dd->ipath_piosize4k == 0 && ipath_mtu4096) ipath_mtu4096 = 0; /* 4KB not supported by this chip */ dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048; val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); dd->ipath_piobcnt2k = val & ~0U; dd->ipath_piobcnt4k = val >> 32; dd->ipath_pio2kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase & 0xffffffff)); if (dd->ipath_piobcnt4k) { dd->ipath_pio4kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k, dd->ipath_palign); ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p " "(%x aligned)\n", dd->ipath_piobcnt2k, dd->ipath_piosize2k, dd->ipath_pio2kbase, dd->ipath_piobcnt4k, dd->ipath_piosize4k, dd->ipath_pio4kbase, dd->ipath_4kalign); } else ipath_dbg("%u 2k piobufs @ %p\n", dd->ipath_piobcnt2k, dd->ipath_pio2kbase); done: return ret; } /** * init_chip_reset - re-initialize after a reset, or enable * @dd: the infinipath device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explictly, in case reset * failed */ static int init_chip_reset(struct ipath_devdata *dd) { u32 rtmp; int i; unsigned long flags; /* * ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize */ dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift); for (i = 0; i < dd->ipath_portcnt; i++) { clear_bit(dd->ipath_r_portenable_shift + i, &dd->ipath_rcvctrl); clear_bit(dd->ipath_r_intravail_shift + i, &dd->ipath_rcvctrl); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl = 0U; /* no sdma, etc */ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); if (rtmp != dd->ipath_rcvtidcnt) dev_info(&dd->pcidev->dev, "tidcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); if (rtmp != dd->ipath_rcvtidbase) dev_info(&dd->pcidev->dev, "tidbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidbase, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); if (rtmp != dd->ipath_rcvegrcnt) dev_info(&dd->pcidev->dev, "egrcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); if (rtmp != dd->ipath_rcvegrbase) dev_info(&dd->pcidev->dev, "egrbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrbase, rtmp); return 0; } static int init_pioavailregs(struct ipath_devdata *dd) { int ret; dd->ipath_pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys, GFP_KERNEL); if (!dd->ipath_pioavailregs_dma) { ipath_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * we really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ dd->ipath_statusp = (u64 *) ((char *)dd->ipath_pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* copy the current value now that it's really allocated */ *dd->ipath_statusp = dd->_ipath_status; /* * setup buffer to hold freeze msg, accessible to apps, * following statusp */ dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1]; /* and its length */ dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]); ret = 0; done: return ret; } /** * init_shadow_tids - allocate the shadow TID array * @dd: the infinipath device * * allocate the shadow TID array, so we can ipath_munlock previous * entries. It may make more sense to move the pageshadow to the * port data structure, so we only allocate memory for ports actually * in use, since we at 8k per port, now. */ static void init_shadow_tids(struct ipath_devdata *dd) { struct page **pages; dma_addr_t *addrs; pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!pages) { ipath_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); dd->ipath_pageshadow = NULL; return; } addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { ipath_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); vfree(pages); dd->ipath_pageshadow = NULL; return; } memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); dd->ipath_pageshadow = pages; dd->ipath_physshadow = addrs; } static void enable_chip(struct ipath_devdata *dd, int reinit) { u32 val; u64 rcvmask; unsigned long flags; int i; if (!reinit) init_waitqueue_head(&ipath_state_wait); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); /* Enable PIO send, and update of PIOavail regs to memory. */ dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | INFINIPATH_S_PIOBUFAVAILUPD; /* * Set the PIO avail update threshold to host memory * on chips that support it. */ if (dd->ipath_pioupd_thresh) dd->ipath_sendctrl |= dd->ipath_pioupd_thresh << INFINIPATH_S_UPDTHRESH_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); /* * Enable kernel ports' receive and receive interrupt. * Other ports done as user opens and inits them. */ rcvmask = 1ULL; dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) | (rcvmask << dd->ipath_r_intravail_shift); if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* * now ready for use. this should be cleared whenever we * detect a reset, or initiate one. */ dd->ipath_flags |= IPATH_INITTED; /* * Init our shadow copies of head from tail values, * and write head values to match. */ val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); /* Initialize so we interrupt on next packet received */ ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | dd->ipath_pd[0]->port_head, 0); /* * by now pioavail updates to memory should have occurred, so * copy them into our working/shadow registers; this is in * case something went wrong with abort, but mostly to get the * initial values of the generation bit correct. */ for (i = 0; i < dd->ipath_pioavregs; i++) { __le64 pioavail; /* * Chip Errata bug 6641; even and odd qwords>3 are swapped. */ if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; else pioavail = dd->ipath_pioavailregs_dma[i]; /* * don't need to worry about ipath_pioavailkernel here * because we will call ipath_chg_pioavailkernel() later * in initialization, to busy out buffers as needed */ dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail); } /* can get counters, stats, etc. */ dd->ipath_flags |= IPATH_PRESENT; } static int init_housekeeping(struct ipath_devdata *dd, int reinit) { char boardn[40]; int ret = 0; /* * have to clear shadow copies of registers at init that are * not otherwise set here, or all kinds of bizarre things * happen with driver on chip reset */ dd->ipath_rcvhdrsize = 0; /* * Don't clear ipath_flags as 8bit mode was set before * entering this func. However, we do set the linkstate to * unknown, so we can watch for a transition. * PRESENT is set because we want register reads to work, * and the kernel infrastructure saw it in config space; * We clear it if we have failures. */ dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | IPATH_LINKDOWN | IPATH_LINKINIT); ipath_cdbg(VERBOSE, "Try to read spc chip revision\n"); dd->ipath_revision = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); /* * set up fundamental info we need to use the chip; we assume * if the revision reg and these regs are OK, we don't need to * special case the rest */ dd->ipath_sregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase); dd->ipath_cregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase); dd->ipath_uregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase); ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, " "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase, dd->ipath_uregbase, dd->ipath_cregbase); if ((dd->ipath_revision & 0xffffffff) == 0xffffffff || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { ipath_dev_err(dd, "Register read failures from chip, " "giving up initialization\n"); dd->ipath_flags &= ~IPATH_PRESENT; ret = -ENODEV; goto done; } /* clear diagctrl register, in case diags were running and crashed */ ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0); /* clear the initial reset flag, in case first driver load */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, INFINIPATH_E_RESET); ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n", (unsigned long long) dd->ipath_revision, dd->ipath_pcirev); if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { ipath_dev_err(dd, "Driver only handles version %d, " "chip swversion is %d (%llx), failng\n", IPATH_CHIP_SWVERSION, (int)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK, (unsigned long long) dd->ipath_revision); ret = -ENOSYS; goto done; } dd->ipath_majrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMAJOR_SHIFT) & INFINIPATH_R_CHIPREVMAJOR_MASK); dd->ipath_minrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT) & INFINIPATH_R_CHIPREVMINOR_MASK); dd->ipath_boardrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK); ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn); snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, " "SW Compat %u\n", IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn, (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) & INFINIPATH_R_ARCH_MASK, dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev, (unsigned)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK); ipath_dbg("%s", dd->ipath_boardversion); if (ret) goto done; if (reinit) ret = init_chip_reset(dd); else ret = init_chip_first(dd); done: return ret; } static void verify_interrupt(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *) opaque; if (!dd) return; /* being torn down */ /* * If we don't have any interrupts, let the user know and * don't bother checking again. */ if (dd->ipath_int_counter == 0) { if (!dd->ipath_f_intr_fallback(dd)) dev_err(&dd->pcidev->dev, "No interrupts detected, " "not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2); } else ipath_cdbg(VERBOSE, "%u interrupts at timer check\n", dd->ipath_int_counter); } /** * ipath_init_chip - do the actual initialization sequence on the chip * @dd: the infinipath device * @reinit: reinitializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int ipath_init_chip(struct ipath_devdata *dd, int reinit) { int ret = 0; u32 kpiobufs, defkbufs; u32 piobufs, uports; u64 val; struct ipath_portdata *pd; gfp_t gfp_flags = GFP_USER | __GFP_COMP; ret = init_housekeeping(dd, reinit); if (ret) goto done; /* * we ignore most issues after reporting them, but have to specially * handle hardware-disabled chips. */ if (ret == 2) { /* unique error, known to ipath_init_one */ ret = -EPERM; goto done; } /* * We could bump this to allow for full rcvegrcnt + rcvtidcnt, * but then it no longer nicely fits power of two, and since * we now use routines that backend onto __get_free_pages, the * rest would be wasted. */ dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, dd->ipath_rcvhdrcnt); /* * Set up the shadow copies of the piobufavail registers, * which we compare against the chip registers for now, and * the in memory DMA'ed copies of the registers. This has to * be done early, before we calculate lastport, etc. */ piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; /* * calc number of pioavail registers, and save it; we have 2 * bits per buffer. */ dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; if (piobufs > 144) defkbufs = 32 + dd->ipath_pioreserved; else defkbufs = 16 + dd->ipath_pioreserved; if (ipath_kpiobufs && (ipath_kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) { int i = (int) piobufs - (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); if (i < 1) i = 1; dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " "%d for kernel leaves too few for %d user ports " "(%d each); using %u\n", ipath_kpiobufs, piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); /* * shouldn't change ipath_kpiobufs, because could be * different for different devices... */ kpiobufs = i; } else if (ipath_kpiobufs) kpiobufs = ipath_kpiobufs; else kpiobufs = defkbufs; dd->ipath_lastport_piobuf = piobufs - kpiobufs; dd->ipath_pbufsport = uports ? dd->ipath_lastport_piobuf / uports : 0; /* if not an even divisor, some user ports get extra buffers */ dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); if (dd->ipath_ports_extrabuf) ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to " "ports <= %u\n", dd->ipath_pbufsport, dd->ipath_ports_extrabuf); dd->ipath_lastpioindex = 0; dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; /* ipath_pioavailshadow initialized earlier */ ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " "each for %u user ports\n", kpiobufs, piobufs, dd->ipath_pbufsport, uports); ret = dd->ipath_f_early_init(dd); if (ret) { ipath_dev_err(dd, "Early initialization failure\n"); goto done; } /* * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be * done after early_init. */ dd->ipath_hdrqlast = dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, dd->ipath_rcvhdrentsize); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize, dd->ipath_rcvhdrsize); if (!reinit) { ret = init_pioavailregs(dd); init_shadow_tids(dd); if (ret) goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, dd->ipath_pioavailregs_phys); /* * this is to detect s/w errors, which the h/w works around by * ignoring the low 6 bits of address, if it wasn't aligned. */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr); if (val != dd->ipath_pioavailregs_phys) { ipath_dev_err(dd, "Catastrophic software error, " "SendPIOAvailAddr written as %lx, " "read back as %llx\n", (unsigned long) dd->ipath_pioavailregs_phys, (unsigned long long) val); ret = -EINVAL; goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP); /* * make sure we are not in freeze, and PIO send enabled, so * writes to pbc happen */ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); /* * before error clears, since we expect serdes pll errors during * this, the first time after reset */ if (bringup_link(dd)) { dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n"); ret = -ENETDOWN; goto done; } /* * clear any "expected" hwerrs from reset and/or initialization * clear any that aren't enabled (at least this once), and then * set the enable mask */ dd->ipath_f_init_hwerrors(dd); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); /* clear all */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL); /* enable errors that are masked, at least this first time. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, ~dd->ipath_maskederrs); dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */ dd->ipath_errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask); /* clear any interrupts up to this point (ints still not enabled) */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); dd->ipath_f_tidtemplate(dd); /* * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of port 0 portdata as well. */ pd = dd->ipath_pd[0]; if (reinit) { struct ipath_portdata *npd; /* * Alloc and init new ipath_portdata for port0, * Then free old pd. Could lead to fragmentation, but also * makes later support for hot-swap easier. */ npd = create_portdata0(dd); if (npd) { ipath_free_pddata(dd, pd); dd->ipath_pd[0] = npd; pd = npd; } else { ipath_dev_err(dd, "Unable to allocate portdata" " for port 0, failing\n"); ret = -ENOMEM; goto done; } } ret = ipath_create_rcvhdrq(dd, pd); if (!ret) ret = create_port0_egr(dd); if (ret) { ipath_dev_err(dd, "failed to allocate kernel port's " "rcvhdrq and/or egr bufs\n"); goto done; } else enable_chip(dd, reinit); /* after enable_chip, so pioavailshadow setup */ ipath_chg_pioavailkernel(dd, 0, piobufs, 1); /* * Cancel any possible active sends from early driver load. * Follows early_init because some chips have to initialize * PIO buffers in early_init to avoid false parity errors. * After enable and ipath_chg_pioavailkernel so we can safely * enable pioavail updates and PIOENABLE; packets are now * ready to go out. */ ipath_cancel_sends(dd, 1); if (!reinit) { /* * Used when we close a port, for DMA already in flight * at close. */ dd->ipath_dummy_hdrq = dma_alloc_coherent( &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size, &dd->ipath_dummy_hdrq_phys, gfp_flags); if (!dd->ipath_dummy_hdrq) { dev_info(&dd->pcidev->dev, "Couldn't allocate 0x%lx bytes for dummy hdrq\n", dd->ipath_pd[0]->port_rcvhdrq_size); /* fallback to just 0'ing */ dd->ipath_dummy_hdrq_phys = 0UL; } } /* * cause retrigger of pending interrupts ignored during init, * even if we had errors */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); if (!dd->ipath_stats_timer_active) { /* * first init, or after an admin disable/enable * set up stats retrieval timer, even if we had errors * in last portion of setup */ init_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer.function = ipath_get_faststats; dd->ipath_stats_timer.data = (unsigned long) dd; /* every 5 seconds; */ dd->ipath_stats_timer.expires = jiffies + 5 * HZ; /* takes ~16 seconds to overflow at full IB 4x bandwdith */ add_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer_active = 1; } /* Set up SendDMA if chip supports it */ if (dd->ipath_flags & IPATH_HAS_SEND_DMA) ret = setup_sdma(dd); /* Set up HoL state */ init_timer(&dd->ipath_hol_timer); dd->ipath_hol_timer.function = ipath_hol_event; dd->ipath_hol_timer.data = (unsigned long)dd; dd->ipath_hol_state = IPATH_HOL_UP; done: if (!ret) { *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; if (!dd->ipath_f_intrsetup(dd)) { /* now we can enable all interrupts from the chip */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL); /* force re-interrupt of any pending interrupts. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); /* chip is usable; mark it as initialized */ *dd->ipath_statusp |= IPATH_STATUS_INITTED; /* * setup to verify we get an interrupt, and fallback * to an alternate if necessary and possible */ if (!reinit) { init_timer(&dd->ipath_intrchk_timer); dd->ipath_intrchk_timer.function = verify_interrupt; dd->ipath_intrchk_timer.data = (unsigned long) dd; } dd->ipath_intrchk_timer.expires = jiffies + HZ/2; add_timer(&dd->ipath_intrchk_timer); } else ipath_dev_err(dd, "No interrupts enabled, couldn't " "setup interrupt address\n"); if (dd->ipath_cfgports > ipath_stats.sps_nports) /* * sps_nports is a global, so, we set it to * the highest number of ports of any of the * chips we find; we never decrement it, at * least for now. Since this might have changed * over disable/enable or prior to reset, always * do the check and potentially adjust. */ ipath_stats.sps_nports = dd->ipath_cfgports; } else ipath_dbg("Failed (%d) to initialize chip\n", ret); /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp) { struct ipath_devdata *dd; unsigned long flags; unsigned short val; int ret; ret = ipath_parse_ushort(str, &val); spin_lock_irqsave(&ipath_devs_lock, flags); if (ret < 0) goto bail; if (val == 0) { ret = -EINVAL; goto bail; } list_for_each_entry(dd, &ipath_dev_list, ipath_list) { if (dd->ipath_kregbase) continue; if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) { ipath_dev_err( dd, "Allocating %d PIO bufs for kernel leaves " "too few for %d user ports (%d each)\n", val, dd->ipath_cfgports - 1, IPATH_MIN_USER_PORT_BUFCNT); ret = -EINVAL; goto bail; } dd->ipath_lastport_piobuf = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; } ipath_kpiobufs = val; ret = 0; bail: spin_unlock_irqrestore(&ipath_devs_lock, flags); return ret; }
gpl-2.0
TeamExodus/kernel_xiaomi_cancro
drivers/hwmon/ads7871.c
1603
7064
/* * ads7871 - driver for TI ADS7871 A/D converter * * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 or * later as publishhed by the Free Software Foundation. * * You need to have something like this in struct spi_board_info * { * .modalias = "ads7871", * .max_speed_hz = 2*1000*1000, * .chip_select = 0, * .bus_num = 1, * }, */ /*From figure 18 in the datasheet*/ /*Register addresses*/ #define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/ #define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/ #define REG_PGA_VALID 2 /*PGA Valid Register*/ #define REG_AD_CONTROL 3 /*A/D Control Register*/ #define REG_GAIN_MUX 4 /*Gain/Mux Register*/ #define REG_IO_STATE 5 /*Digital I/O State Register*/ #define REG_IO_CONTROL 6 /*Digital I/O Control Register*/ #define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/ #define REG_SER_CONTROL 24 /*Serial Interface Control Register*/ #define REG_ID 31 /*ID Register*/ /* * From figure 17 in the datasheet * These bits get ORed with the address to form * the instruction byte */ /*Instruction Bit masks*/ #define INST_MODE_bm (1<<7) #define INST_READ_bm (1<<6) #define INST_16BIT_bm (1<<5) /*From figure 18 in the datasheet*/ /*bit masks for Rev/Oscillator Control Register*/ #define MUX_CNV_bv 7 #define MUX_CNV_bm (1<<MUX_CNV_bv) #define MUX_M3_bm (1<<3) /*M3 selects single ended*/ #define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/ /*From figure 18 in the datasheet*/ /*bit masks for Rev/Oscillator Control Register*/ #define OSC_OSCR_bm (1<<5) #define OSC_OSCE_bm (1<<4) #define OSC_REFE_bm (1<<3) #define OSC_BUFE_bm (1<<2) #define OSC_R2V_bm (1<<1) #define OSC_RBG_bm (1<<0) #include <linux/module.h> #include <linux/init.h> #include <linux/spi/spi.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #define DEVICE_NAME "ads7871" struct ads7871_data { struct device *hwmon_dev; struct mutex update_lock; }; static int ads7871_read_reg8(struct spi_device *spi, int reg) { int ret; reg = reg | INST_READ_bm; ret = spi_w8r8(spi, reg); return ret; } static int ads7871_read_reg16(struct spi_device *spi, int reg) { int ret; reg = reg | INST_READ_bm | INST_16BIT_bm; ret = spi_w8r16(spi, reg); return ret; } static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val) { u8 tmp[2] = {reg, val}; return spi_write(spi, tmp, sizeof(tmp)); } static ssize_t show_voltage(struct device *dev, struct device_attribute *da, char *buf) { struct spi_device *spi = to_spi_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int ret, val, i = 0; uint8_t channel, mux_cnv; channel = attr->index; /* * TODO: add support for conversions * other than single ended with a gain of 1 */ /*MUX_M3_bm forces single ended*/ /*This is also where the gain of the PGA would be set*/ ads7871_write_reg8(spi, REG_GAIN_MUX, (MUX_CNV_bm | MUX_M3_bm | channel)); ret = ads7871_read_reg8(spi, REG_GAIN_MUX); mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv); /* * on 400MHz arm9 platform the conversion * is already done when we do this test */ while ((i < 2) && mux_cnv) { i++; ret = ads7871_read_reg8(spi, REG_GAIN_MUX); mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv); msleep_interruptible(1); } if (mux_cnv == 0) { val = ads7871_read_reg16(spi, REG_LS_BYTE); /*result in volts*10000 = (val/8192)*2.5*10000*/ val = ((val>>2) * 25000) / 8192; return sprintf(buf, "%d\n", val); } else { return -1; } } static ssize_t ads7871_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL); static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group ads7871_group = { .attrs = ads7871_attributes, }; static int __devinit ads7871_probe(struct spi_device *spi) { int ret, err; uint8_t val; struct ads7871_data *pdata; dev_dbg(&spi->dev, "probe\n"); /* Configure the SPI bus */ spi->mode = (SPI_MODE_0); spi->bits_per_word = 8; spi_setup(spi); ads7871_write_reg8(spi, REG_SER_CONTROL, 0); ads7871_write_reg8(spi, REG_AD_CONTROL, 0); val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm); ads7871_write_reg8(spi, REG_OSC_CONTROL, val); ret = ads7871_read_reg8(spi, REG_OSC_CONTROL); dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret); /* * because there is no other error checking on an SPI bus * we need to make sure we really have a chip */ if (val != ret) { err = -ENODEV; goto exit; } pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); if (!pdata) { err = -ENOMEM; goto exit; } err = sysfs_create_group(&spi->dev.kobj, &ads7871_group); if (err < 0) goto error_free; spi_set_drvdata(spi, pdata); pdata->hwmon_dev = hwmon_device_register(&spi->dev); if (IS_ERR(pdata->hwmon_dev)) { err = PTR_ERR(pdata->hwmon_dev); goto error_remove; } return 0; error_remove: sysfs_remove_group(&spi->dev.kobj, &ads7871_group); error_free: kfree(pdata); exit: return err; } static int __devexit ads7871_remove(struct spi_device *spi) { struct ads7871_data *pdata = spi_get_drvdata(spi); hwmon_device_unregister(pdata->hwmon_dev); sysfs_remove_group(&spi->dev.kobj, &ads7871_group); kfree(pdata); return 0; } static struct spi_driver ads7871_driver = { .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, }, .probe = ads7871_probe, .remove = __devexit_p(ads7871_remove), }; module_spi_driver(ads7871_driver); MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>"); MODULE_DESCRIPTION("TI ADS7871 A/D driver"); MODULE_LICENSE("GPL");
gpl-2.0
DespairFactor/N6
arch/s390/pci/pci_dma.c
1859
12616
/* * Copyright IBM Corp. 2012 * * Author(s): * Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/iommu-helper.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <asm/pci_dma.h> static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; static unsigned long *dma_alloc_cpu_table(void) { unsigned long *table, *entry; table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); if (!table) return NULL; for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; return table; } static void dma_free_cpu_table(void *table) { kmem_cache_free(dma_region_table_cache, table); } static unsigned long *dma_alloc_page_table(void) { unsigned long *table, *entry; table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); if (!table) return NULL; for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; return table; } static void dma_free_page_table(void *table) { kmem_cache_free(dma_page_table_cache, table); } static unsigned long *dma_get_seg_table_origin(unsigned long *entry) { unsigned long *sto; if (reg_entry_isvalid(*entry)) sto = get_rt_sto(*entry); else { sto = dma_alloc_cpu_table(); if (!sto) return NULL; set_rt_sto(entry, sto); validate_rt_entry(entry); entry_clr_protected(entry); } return sto; } static unsigned long *dma_get_page_table_origin(unsigned long *entry) { unsigned long *pto; if (reg_entry_isvalid(*entry)) pto = get_st_pto(*entry); else { pto = dma_alloc_page_table(); if (!pto) return NULL; set_st_pto(entry, pto); validate_st_entry(entry); entry_clr_protected(entry); } return pto; } static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) { unsigned long *sto, *pto; unsigned int rtx, sx, px; rtx = calc_rtx(dma_addr); sto = dma_get_seg_table_origin(&rto[rtx]); if (!sto) return NULL; sx = calc_sx(dma_addr); pto = dma_get_page_table_origin(&sto[sx]); if (!pto) return NULL; px = calc_px(dma_addr); return &pto[px]; } static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr, dma_addr_t dma_addr, int flags) { unsigned long *entry; entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); if (!entry) { WARN_ON_ONCE(1); return; } if (flags & ZPCI_PTE_INVALID) { invalidate_pt_entry(entry); return; } else { set_pt_pfaa(entry, page_addr); validate_pt_entry(entry); } if (flags & ZPCI_TABLE_PROTECTED) entry_set_protected(entry); else entry_clr_protected(entry); } static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, dma_addr_t dma_addr, size_t size, int flags) { unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; u8 *page_addr = (u8 *) (pa & PAGE_MASK); dma_addr_t start_dma_addr = dma_addr; unsigned long irq_flags; int i, rc = 0; if (!nr_pages) return -EINVAL; spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); if (!zdev->dma_table) { dev_err(&zdev->pdev->dev, "Missing DMA table\n"); goto no_refresh; } for (i = 0; i < nr_pages; i++) { dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); page_addr += PAGE_SIZE; dma_addr += PAGE_SIZE; } /* * rpcit is not required to establish new translations when previously * invalid translation-table entries are validated, however it is * required when altering previously valid entries. */ if (!zdev->tlb_refresh && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) /* * TODO: also need to check that the old entry is indeed INVALID * and not only for one page but for the whole range... * -> now we WARN_ON in that case but with lazy unmap that * needs to be redone! */ goto no_refresh; rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, nr_pages * PAGE_SIZE); no_refresh: spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); return rc; } static void dma_free_seg_table(unsigned long entry) { unsigned long *sto = get_rt_sto(entry); int sx; for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) if (reg_entry_isvalid(sto[sx])) dma_free_page_table(get_st_pto(sto[sx])); dma_free_cpu_table(sto); } static void dma_cleanup_tables(struct zpci_dev *zdev) { unsigned long *table; int rtx; if (!zdev || !zdev->dma_table) return; table = zdev->dma_table; for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) if (reg_entry_isvalid(table[rtx])) dma_free_seg_table(table[rtx]); dma_free_cpu_table(table); zdev->dma_table = NULL; } static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, int size) { unsigned long boundary_size = 0x1000000; return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, start, size, 0, boundary_size, 0); } static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) { unsigned long offset, flags; spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); if (offset == -1) offset = __dma_alloc_iommu(zdev, 0, size); if (offset != -1) { zdev->next_bit = offset + size; if (zdev->next_bit >= zdev->iommu_pages) zdev->next_bit = 0; } spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); return offset; } static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size) { unsigned long flags; spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); if (!zdev->iommu_bitmap) goto out; bitmap_clear(zdev->iommu_bitmap, offset, size); if (offset >= zdev->next_bit) zdev->next_bit = offset + size; out: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); } int dma_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } EXPORT_SYMBOL_GPL(dma_set_mask); static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); unsigned long nr_pages, iommu_page_index; unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); if (iommu_page_index == -1) goto out_err; /* Use rounded up size */ size = nr_pages * PAGE_SIZE; dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; if (dma_addr + size > zdev->end_dma) { dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n", dma_addr, size, zdev->end_dma); goto out_free; } if (direction == DMA_NONE || direction == DMA_TO_DEVICE) flags |= ZPCI_TABLE_PROTECTED; if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); return dma_addr + (offset & ~PAGE_MASK); } out_free: dma_free_iommu(zdev, iommu_page_index, nr_pages); out_err: dev_err(dev, "Failed to map addr: %lx\n", pa); return DMA_ERROR_CODE; } static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); unsigned long iommu_page_index; int npages; npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr = dma_addr & PAGE_MASK; if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr); atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); } static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); struct page *page; unsigned long pa; dma_addr_t map; size = PAGE_ALIGN(size); page = alloc_pages(flag, get_order(size)); if (!page) return NULL; atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE, size, DMA_BIDIRECTIONAL, NULL); if (dma_mapping_error(dev, map)) { free_pages(pa, get_order(size)); return NULL; } if (dma_handle) *dma_handle = map; return (void *) pa; } static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long) pa, get_order(size)); } static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, int nr_elements, enum dma_data_direction dir, struct dma_attrs *attrs) { int mapped_elements = 0; struct scatterlist *s; int i; for_each_sg(sg, s, nr_elements, i) { struct page *page = sg_page(s); s->dma_address = s390_dma_map_pages(dev, page, s->offset, s->length, dir, NULL); if (!dma_mapping_error(dev, s->dma_address)) { s->dma_length = s->length; mapped_elements++; } else goto unmap; } out: return mapped_elements; unmap: for_each_sg(sg, s, mapped_elements, i) { if (s->dma_address) s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); s->dma_address = 0; s->dma_length = 0; } mapped_elements = 0; goto out; } static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nr_elements, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s; int i; for_each_sg(sg, s, nr_elements, i) { s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); s->dma_address = 0; s->dma_length = 0; } } int zpci_dma_init_device(struct zpci_dev *zdev) { unsigned int bitmap_order; int rc; spin_lock_init(&zdev->iommu_bitmap_lock); spin_lock_init(&zdev->dma_table_lock); zdev->dma_table = dma_alloc_cpu_table(); if (!zdev->dma_table) { rc = -ENOMEM; goto out_clean; } zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; bitmap_order = get_order(zdev->iommu_pages / 8); pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n", zdev->iommu_size, zdev->iommu_pages, bitmap_order); zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, bitmap_order); if (!zdev->iommu_bitmap) { rc = -ENOMEM; goto out_reg; } rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, zdev->start_dma + zdev->iommu_size - 1, (u64) zdev->dma_table); if (rc) goto out_reg; return 0; out_reg: dma_free_cpu_table(zdev->dma_table); out_clean: return rc; } void zpci_dma_exit_device(struct zpci_dev *zdev) { zpci_unregister_ioat(zdev, 0); dma_cleanup_tables(zdev); free_pages((unsigned long) zdev->iommu_bitmap, get_order(zdev->iommu_pages / 8)); zdev->iommu_bitmap = NULL; zdev->next_bit = 0; } static int __init dma_alloc_cpu_table_caches(void) { dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN, 0, NULL); if (!dma_region_table_cache) return -ENOMEM; dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", ZPCI_PT_SIZE, ZPCI_PT_ALIGN, 0, NULL); if (!dma_page_table_cache) { kmem_cache_destroy(dma_region_table_cache); return -ENOMEM; } return 0; } int __init zpci_dma_init(void) { return dma_alloc_cpu_table_caches(); } void zpci_dma_exit(void) { kmem_cache_destroy(dma_page_table_cache); kmem_cache_destroy(dma_region_table_cache); } #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init); struct dma_map_ops s390_dma_ops = { .alloc = s390_dma_alloc, .free = s390_dma_free, .map_sg = s390_dma_map_sg, .unmap_sg = s390_dma_unmap_sg, .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, /* if we support direct DMA this must be conditional */ .is_phys = 0, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_dma_ops);
gpl-2.0
pazos/linux-2.6.35.3-kobo
fs/ubifs/find.c
1859
30493
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file contains functions for finding LEBs for various purposes e.g. * garbage collection. In general, lprops category heaps and lists are used * for fast access, falling back on scanning the LPT as a last resort. */ #include <linux/sort.h> #include "ubifs.h" /** * struct scan_data - data provided to scan callback functions * @min_space: minimum number of bytes for which to scan * @pick_free: whether it is OK to scan for empty LEBs * @lnum: LEB number found is returned here * @exclude_index: whether to exclude index LEBs */ struct scan_data { int min_space; int pick_free; int lnum; int exclude_index; }; /** * valuable - determine whether LEB properties are valuable. * @c: the UBIFS file-system description object * @lprops: LEB properties * * This function return %1 if the LEB properties should be added to the LEB * properties tree in memory. Otherwise %0 is returned. */ static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops) { int n, cat = lprops->flags & LPROPS_CAT_MASK; struct ubifs_lpt_heap *heap; switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: heap = &c->lpt_heap[cat - 1]; if (heap->cnt < heap->max_cnt) return 1; if (lprops->free + lprops->dirty >= c->dark_wm) return 1; return 0; case LPROPS_EMPTY: n = c->lst.empty_lebs + c->freeable_cnt - c->lst.taken_empty_lebs; if (n < c->lsave_cnt) return 1; return 0; case LPROPS_FREEABLE: return 1; case LPROPS_FRDI_IDX: return 1; } return 0; } /** * scan_for_dirty_cb - dirty space scan callback. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_dirty_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude LEBs with too little space */ if (lprops->free + lprops->dirty < data->min_space) return ret; /* If specified, exclude index LEBs */ if (data->exclude_index && lprops->flags & LPROPS_INDEX) return ret; /* If specified, exclude empty or freeable LEBs */ if (lprops->free + lprops->dirty == c->leb_size) { if (!data->pick_free) return ret; /* Exclude LEBs with too little dirty space (unless it is empty) */ } else if (lprops->dirty < c->dead_wm) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * scan_for_dirty - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount free plus dirty space the returned LEB has to * have * @pick_free: if it is OK to return a free or freeable LEB * @exclude_index: whether to exclude index LEBs * * This function returns a pointer to the LEB properties found or a negative * error code. */ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c, int min_space, int pick_free, int exclude_index) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i; /* There may be an LEB with enough dirty space on the free heap */ heap = &c->lpt_heap[LPROPS_FREE - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (lprops->free + lprops->dirty < min_space) continue; if (lprops->dirty < c->dead_wm) continue; return lprops; } /* * A LEB may have fallen off of the bottom of the dirty heap, and ended * up as uncategorized even though it has enough dirty space for us now, * so check the uncategorized list. N.B. neither empty nor freeable LEBs * can end up as uncategorized because they are kept on lists not * finite-sized heaps. */ list_for_each_entry(lprops, &c->uncat_list, list) { if (lprops->flags & LPROPS_TAKEN) continue; if (lprops->free + lprops->dirty < min_space) continue; if (exclude_index && (lprops->flags & LPROPS_INDEX)) continue; if (lprops->dirty < c->dead_wm) continue; return lprops; } /* We have looked everywhere in main memory, now scan the flash */ if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return ERR_PTR(-ENOSPC); data.min_space = min_space; data.pick_free = pick_free; data.lnum = -1; data.exclude_index = exclude_index; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_dirty_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty >= min_space); ubifs_assert(lprops->dirty >= c->dead_wm || (pick_free && lprops->free + lprops->dirty == c->leb_size)); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!exclude_index || !(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_dirty_leb - find a dirty LEB for the Garbage Collector. * @c: the UBIFS file-system description object * @ret_lp: LEB properties are returned here on exit * @min_space: minimum amount free plus dirty space the returned LEB has to * have * @pick_free: controls whether it is OK to pick empty or index LEBs * * This function tries to find a dirty logical eraseblock which has at least * @min_space free and dirty space. It prefers to take an LEB from the dirty or * dirty index heap, and it falls-back to LPT scanning if the heaps are empty * or do not have an LEB which satisfies the @min_space criteria. * * Note, LEBs which have less than dead watermark of free + dirty space are * never picked by this function. * * The additional @pick_free argument controls if this function has to return a * free or freeable LEB if one is present. For example, GC must to set it to %1, * when called from the journal space reservation function, because the * appearance of free space may coincide with the loss of enough dirty space * for GC to succeed anyway. * * In contrast, if the Garbage Collector is called from budgeting, it should * just make free space, not return LEBs which are already free or freeable. * * In addition @pick_free is set to %2 by the recovery process in order to * recover gc_lnum in which case an index LEB must not be returned. * * This function returns zero and the LEB properties of found dirty LEB in case * of success, %-ENOSPC if no dirty LEB was found and a negative error code in * case of other failures. The returned LEB is marked as "taken". */ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, int min_space, int pick_free) { int err = 0, sum, exclude_index = pick_free == 2 ? 1 : 0; const struct ubifs_lprops *lp = NULL, *idx_lp = NULL; struct ubifs_lpt_heap *heap, *idx_heap; ubifs_get_lprops(c); if (pick_free) { int lebs, rsvd_idx_lebs = 0; spin_lock(&c->space_lock); lebs = c->lst.empty_lebs + c->idx_gc_cnt; lebs += c->freeable_cnt - c->lst.taken_empty_lebs; /* * Note, the index may consume more LEBs than have been reserved * for it. It is OK because it might be consolidated by GC. * But if the index takes fewer LEBs than it is reserved for it, * this function must avoid picking those reserved LEBs. */ if (c->min_idx_lebs >= c->lst.idx_lebs) { rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; exclude_index = 1; } spin_unlock(&c->space_lock); /* Check if there are enough free LEBs for the index */ if (rsvd_idx_lebs < lebs) { /* OK, try to find an empty LEB */ lp = ubifs_fast_find_empty(c); if (lp) goto found; /* Or a freeable LEB */ lp = ubifs_fast_find_freeable(c); if (lp) goto found; } else /* * We cannot pick free/freeable LEBs in the below code. */ pick_free = 0; } else { spin_lock(&c->space_lock); exclude_index = (c->min_idx_lebs >= c->lst.idx_lebs); spin_unlock(&c->space_lock); } /* Look on the dirty and dirty index heaps */ heap = &c->lpt_heap[LPROPS_DIRTY - 1]; idx_heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; if (idx_heap->cnt && !exclude_index) { idx_lp = idx_heap->arr[0]; sum = idx_lp->free + idx_lp->dirty; /* * Since we reserve thrice as much space for the index than it * actually takes, it does not make sense to pick indexing LEBs * with less than, say, half LEB of dirty space. May be half is * not the optimal boundary - this should be tested and * checked. This boundary should determine how much we use * in-the-gaps to consolidate the index comparing to how much * we use garbage collector to consolidate it. The "half" * criteria just feels to be fine. */ if (sum < min_space || sum < c->half_leb_size) idx_lp = NULL; } if (heap->cnt) { lp = heap->arr[0]; if (lp->dirty + lp->free < min_space) lp = NULL; } /* Pick the LEB with most space */ if (idx_lp && lp) { if (idx_lp->free + idx_lp->dirty >= lp->free + lp->dirty) lp = idx_lp; } else if (idx_lp && !lp) lp = idx_lp; if (lp) { ubifs_assert(lp->free + lp->dirty >= c->dead_wm); goto found; } /* Did not find a dirty LEB on the dirty heaps, have to scan */ dbg_find("scanning LPT for a dirty LEB"); lp = scan_for_dirty(c, min_space, pick_free, exclude_index); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } ubifs_assert(lp->dirty >= c->dead_wm || (pick_free && lp->free + lp->dirty == c->leb_size)); found: dbg_find("found LEB %d, free %d, dirty %d, flags %#x", lp->lnum, lp->free, lp->dirty, lp->flags); lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_TAKEN, 0); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } memcpy(ret_lp, lp, sizeof(struct ubifs_lprops)); out: ubifs_release_lprops(c); return err; } /** * scan_for_free_cb - free space scan callback. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_free_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude index LEBs */ if (lprops->flags & LPROPS_INDEX) return ret; /* Exclude LEBs with too little space */ if (lprops->free < data->min_space) return ret; /* If specified, exclude empty LEBs */ if (!data->pick_free && lprops->free == c->leb_size) return ret; /* * LEBs that have only free and dirty space must not be allocated * because they may have been unmapped already or they may have data * that is obsolete only because of nodes that are still sitting in a * wbuf. */ if (lprops->free + lprops->dirty == c->leb_size && lprops->dirty > 0) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * do_find_free_space - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount of free space required * @pick_free: whether it is OK to scan for empty LEBs * @squeeze: whether to try to find space in a non-empty LEB first * * This function returns a pointer to the LEB properties found or a negative * error code. */ static const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c, int min_space, int pick_free, int squeeze) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i; if (squeeze) { lprops = ubifs_fast_find_free(c); if (lprops && lprops->free >= min_space) return lprops; } if (pick_free) { lprops = ubifs_fast_find_empty(c); if (lprops) return lprops; } if (!squeeze) { lprops = ubifs_fast_find_free(c); if (lprops && lprops->free >= min_space) return lprops; } /* There may be an LEB with enough free space on the dirty heap */ heap = &c->lpt_heap[LPROPS_DIRTY - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (lprops->free >= min_space) return lprops; } /* * A LEB may have fallen off of the bottom of the free heap, and ended * up as uncategorized even though it has enough free space for us now, * so check the uncategorized list. N.B. neither empty nor freeable LEBs * can end up as uncategorized because they are kept on lists not * finite-sized heaps. */ list_for_each_entry(lprops, &c->uncat_list, list) { if (lprops->flags & LPROPS_TAKEN) continue; if (lprops->flags & LPROPS_INDEX) continue; if (lprops->free >= min_space) return lprops; } /* We have looked everywhere in main memory, now scan the flash */ if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return ERR_PTR(-ENOSPC); data.min_space = min_space; data.pick_free = pick_free; data.lnum = -1; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_free_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free >= min_space); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_free_space - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount of required free space * @offs: contains offset of where free space starts on exit * @squeeze: whether to try to find space in a non-empty LEB first * * This function looks for an LEB with at least @min_space bytes of free space. * It tries to find an empty LEB if possible. If no empty LEBs are available, * this function searches for a non-empty data LEB. The returned LEB is marked * as "taken". * * This function returns found LEB number in case of success, %-ENOSPC if it * failed to find a LEB with @min_space bytes of free space and other a negative * error codes in case of failure. */ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, int squeeze) { const struct ubifs_lprops *lprops; int lebs, rsvd_idx_lebs, pick_free = 0, err, lnum, flags; dbg_find("min_space %d", min_space); ubifs_get_lprops(c); /* Check if there are enough empty LEBs for commit */ spin_lock(&c->space_lock); if (c->min_idx_lebs > c->lst.idx_lebs) rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs; else rsvd_idx_lebs = 0; lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; if (rsvd_idx_lebs < lebs) /* * OK to allocate an empty LEB, but we still don't want to go * looking for one if there aren't any. */ if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { pick_free = 1; /* * Because we release the space lock, we must account * for this allocation here. After the LEB properties * flags have been updated, we subtract one. Note, the * result of this is that lprops also decreases * @taken_empty_lebs in 'ubifs_change_lp()', so it is * off by one for a short period of time which may * introduce a small disturbance to budgeting * calculations, but this is harmless because at the * worst case this would make the budgeting subsystem * be more pessimistic than needed. * * Fundamentally, this is about serialization of the * budgeting and lprops subsystems. We could make the * @space_lock a mutex and avoid dropping it before * calling 'ubifs_change_lp()', but mutex is more * heavy-weight, and we want budgeting to be as fast as * possible. */ c->lst.taken_empty_lebs += 1; } spin_unlock(&c->space_lock); lprops = do_find_free_space(c, min_space, pick_free, squeeze); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } lnum = lprops->lnum; flags = lprops->flags | LPROPS_TAKEN; lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, flags, 0); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } if (pick_free) { spin_lock(&c->space_lock); c->lst.taken_empty_lebs -= 1; spin_unlock(&c->space_lock); } *offs = c->leb_size - lprops->free; ubifs_release_lprops(c); if (*offs == 0) { /* * Ensure that empty LEBs have been unmapped. They may not have * been, for example, because of an unclean unmount. Also * LEBs that were freeable LEBs (free + dirty == leb_size) will * not have been unmapped. */ err = ubifs_leb_unmap(c, lnum); if (err) return err; } dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); ubifs_assert(*offs <= c->leb_size - min_space); return lnum; out: if (pick_free) { spin_lock(&c->space_lock); c->lst.taken_empty_lebs -= 1; spin_unlock(&c->space_lock); } ubifs_release_lprops(c); return err; } /** * scan_for_idx_cb - callback used by the scan for a free LEB for the index. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_idx_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude index LEBS */ if (lprops->flags & LPROPS_INDEX) return ret; /* Exclude LEBs that cannot be made empty */ if (lprops->free + lprops->dirty != c->leb_size) return ret; /* * We are allocating for the index so it is safe to allocate LEBs with * only free and dirty space, because write buffers are sync'd at commit * start. */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * scan_for_leb_for_idx - scan for a free LEB for the index. * @c: the UBIFS file-system description object */ static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct scan_data data; int err; data.lnum = -1; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_idx_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty == c->leb_size); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_free_leb_for_idx - find a free LEB for the index. * @c: the UBIFS file-system description object * * This function looks for a free LEB and returns that LEB number. The returned * LEB is marked as "taken", "index". * * Only empty LEBs are allocated. This is for two reasons. First, the commit * calculates the number of LEBs to allocate based on the assumption that they * will be empty. Secondly, free space at the end of an index LEB is not * guaranteed to be empty because it may have been used by the in-the-gaps * method prior to an unclean unmount. * * If no LEB is found %-ENOSPC is returned. For other failures another negative * error code is returned. */ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) { const struct ubifs_lprops *lprops; int lnum = -1, err, flags; ubifs_get_lprops(c); lprops = ubifs_fast_find_empty(c); if (!lprops) { lprops = ubifs_fast_find_freeable(c); if (!lprops) { ubifs_assert(c->freeable_cnt == 0); if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { lprops = scan_for_leb_for_idx(c); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } } } } if (!lprops) { err = -ENOSPC; goto out; } lnum = lprops->lnum; dbg_find("found LEB %d, free %d, dirty %d, flags %#x", lnum, lprops->free, lprops->dirty, lprops->flags); flags = lprops->flags | LPROPS_TAKEN | LPROPS_INDEX; lprops = ubifs_change_lp(c, lprops, c->leb_size, 0, flags, 0); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } ubifs_release_lprops(c); /* * Ensure that empty LEBs have been unmapped. They may not have been, * for example, because of an unclean unmount. Also LEBs that were * freeable LEBs (free + dirty == leb_size) will not have been unmapped. */ err = ubifs_leb_unmap(c, lnum); if (err) { ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, LPROPS_TAKEN | LPROPS_INDEX, 0); return err; } return lnum; out: ubifs_release_lprops(c); return err; } static int cmp_dirty_idx(const struct ubifs_lprops **a, const struct ubifs_lprops **b) { const struct ubifs_lprops *lpa = *a; const struct ubifs_lprops *lpb = *b; return lpa->dirty + lpa->free - lpb->dirty - lpb->free; } static void swap_dirty_idx(struct ubifs_lprops **a, struct ubifs_lprops **b, int size) { struct ubifs_lprops *t = *a; *a = *b; *b = t; } /** * ubifs_save_dirty_idx_lnums - save an array of the most dirty index LEB nos. * @c: the UBIFS file-system description object * * This function is called each commit to create an array of LEB numbers of * dirty index LEBs sorted in order of dirty and free space. This is used by * the in-the-gaps method of TNC commit. */ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c) { int i; ubifs_get_lprops(c); /* Copy the LPROPS_DIRTY_IDX heap */ c->dirty_idx.cnt = c->lpt_heap[LPROPS_DIRTY_IDX - 1].cnt; memcpy(c->dirty_idx.arr, c->lpt_heap[LPROPS_DIRTY_IDX - 1].arr, sizeof(void *) * c->dirty_idx.cnt); /* Sort it so that the dirtiest is now at the end */ sort(c->dirty_idx.arr, c->dirty_idx.cnt, sizeof(void *), (int (*)(const void *, const void *))cmp_dirty_idx, (void (*)(void *, void *, int))swap_dirty_idx); dbg_find("found %d dirty index LEBs", c->dirty_idx.cnt); if (c->dirty_idx.cnt) dbg_find("dirtiest index LEB is %d with dirty %d and free %d", c->dirty_idx.arr[c->dirty_idx.cnt - 1]->lnum, c->dirty_idx.arr[c->dirty_idx.cnt - 1]->dirty, c->dirty_idx.arr[c->dirty_idx.cnt - 1]->free); /* Replace the lprops pointers with LEB numbers */ for (i = 0; i < c->dirty_idx.cnt; i++) c->dirty_idx.arr[i] = (void *)(size_t)c->dirty_idx.arr[i]->lnum; ubifs_release_lprops(c); return 0; } /** * scan_dirty_idx_cb - callback used by the scan for a dirty index LEB. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_dirty_idx_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude non-index LEBs */ if (!(lprops->flags & LPROPS_INDEX)) return ret; /* Exclude LEBs with too little space */ if (lprops->free + lprops->dirty < c->min_idx_node_sz) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * find_dirty_idx_leb - find a dirty index LEB. * @c: the UBIFS file-system description object * * This function returns LEB number upon success and a negative error code upon * failure. In particular, -ENOSPC is returned if a dirty index LEB is not * found. * * Note that this function scans the entire LPT but it is called very rarely. */ static int find_dirty_idx_leb(struct ubifs_info *c) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i, ret; /* Check all structures in memory first */ data.lnum = -1; heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } list_for_each_entry(lprops, &c->frdi_idx_list, list) { ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } list_for_each_entry(lprops, &c->uncat_list, list) { ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return -ENOSPC; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_dirty_idx_cb, &data); if (err) return err; found: ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return PTR_ERR(lprops); ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty >= c->min_idx_node_sz); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert((lprops->flags & LPROPS_INDEX)); dbg_find("found dirty LEB %d, free %d, dirty %d, flags %#x", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, lprops->flags | LPROPS_TAKEN, 0); if (IS_ERR(lprops)) return PTR_ERR(lprops); return lprops->lnum; } /** * get_idx_gc_leb - try to get a LEB number from trivial GC. * @c: the UBIFS file-system description object */ static int get_idx_gc_leb(struct ubifs_info *c) { const struct ubifs_lprops *lp; int err, lnum; err = ubifs_get_idx_gc_leb(c); if (err < 0) return err; lnum = err; /* * The LEB was due to be unmapped after the commit but * it is needed now for this commit. */ lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) return PTR_ERR(lp); lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_INDEX, -1); if (IS_ERR(lp)) return PTR_ERR(lp); dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp->free, lp->flags); return lnum; } /** * find_dirtiest_idx_leb - find dirtiest index LEB from dirtiest array. * @c: the UBIFS file-system description object */ static int find_dirtiest_idx_leb(struct ubifs_info *c) { const struct ubifs_lprops *lp; int lnum; while (1) { if (!c->dirty_idx.cnt) return -ENOSPC; /* The lprops pointers were replaced by LEB numbers */ lnum = (size_t)c->dirty_idx.arr[--c->dirty_idx.cnt]; lp = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lp)) return PTR_ERR(lp); if ((lp->flags & LPROPS_TAKEN) || !(lp->flags & LPROPS_INDEX)) continue; lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_TAKEN, 0); if (IS_ERR(lp)) return PTR_ERR(lp); break; } dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp->free, lp->flags); ubifs_assert(lp->flags | LPROPS_TAKEN); ubifs_assert(lp->flags | LPROPS_INDEX); return lnum; } /** * ubifs_find_dirty_idx_leb - try to find dirtiest index LEB as at last commit. * @c: the UBIFS file-system description object * * This function attempts to find an untaken index LEB with the most free and * dirty space that can be used without overwriting index nodes that were in the * last index committed. */ int ubifs_find_dirty_idx_leb(struct ubifs_info *c) { int err; ubifs_get_lprops(c); /* * We made an array of the dirtiest index LEB numbers as at the start of * last commit. Try that array first. */ err = find_dirtiest_idx_leb(c); /* Next try scanning the entire LPT */ if (err == -ENOSPC) err = find_dirty_idx_leb(c); /* Finally take any index LEBs awaiting trivial GC */ if (err == -ENOSPC) err = get_idx_gc_leb(c); ubifs_release_lprops(c); return err; }
gpl-2.0
LuisCastillo98/FenomenalMod_alto45_kernel
drivers/spi/spi-altera.c
2115
7949
/* * Altera SPI driver * * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw> * * Based on spi_s3c24xx.c, which is: * Copyright (c) 2006 Ben Dooks * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/io.h> #include <linux/of.h> #define DRV_NAME "spi_altera" #define ALTERA_SPI_RXDATA 0 #define ALTERA_SPI_TXDATA 4 #define ALTERA_SPI_STATUS 8 #define ALTERA_SPI_CONTROL 12 #define ALTERA_SPI_SLAVE_SEL 20 #define ALTERA_SPI_STATUS_ROE_MSK 0x8 #define ALTERA_SPI_STATUS_TOE_MSK 0x10 #define ALTERA_SPI_STATUS_TMT_MSK 0x20 #define ALTERA_SPI_STATUS_TRDY_MSK 0x40 #define ALTERA_SPI_STATUS_RRDY_MSK 0x80 #define ALTERA_SPI_STATUS_E_MSK 0x100 #define ALTERA_SPI_CONTROL_IROE_MSK 0x8 #define ALTERA_SPI_CONTROL_ITOE_MSK 0x10 #define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40 #define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80 #define ALTERA_SPI_CONTROL_IE_MSK 0x100 #define ALTERA_SPI_CONTROL_SSO_MSK 0x400 struct altera_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; struct completion done; void __iomem *base; int irq; int len; int count; int bytes_per_word; unsigned long imr; /* data buffers */ const unsigned char *tx; unsigned char *rx; }; static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev) { return spi_master_get_devdata(sdev->master); } static void altera_spi_chipsel(struct spi_device *spi, int value) { struct altera_spi *hw = altera_spi_to_hw(spi); if (spi->mode & SPI_CS_HIGH) { switch (value) { case BITBANG_CS_INACTIVE: writel(1 << spi->chip_select, hw->base + ALTERA_SPI_SLAVE_SEL); hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); break; case BITBANG_CS_ACTIVE: hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); writel(0, hw->base + ALTERA_SPI_SLAVE_SEL); break; } } else { switch (value) { case BITBANG_CS_INACTIVE: hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); break; case BITBANG_CS_ACTIVE: writel(1 << spi->chip_select, hw->base + ALTERA_SPI_SLAVE_SEL); hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); break; } } } static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) { return 0; } static int altera_spi_setup(struct spi_device *spi) { return 0; } static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) { if (hw->tx) { switch (hw->bytes_per_word) { case 1: return hw->tx[count]; case 2: return (hw->tx[count * 2] | (hw->tx[count * 2 + 1] << 8)); } } return 0; } static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) { struct altera_spi *hw = altera_spi_to_hw(spi); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->count = 0; hw->bytes_per_word = t->bits_per_word / 8; hw->len = t->len / hw->bytes_per_word; if (hw->irq >= 0) { /* enable receive interrupt */ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); /* send the first byte */ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); wait_for_completion(&hw->done); /* disable receive interrupt */ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); } else { /* send the first byte */ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); while (1) { unsigned int rxd; while (!(readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK)) cpu_relax(); rxd = readl(hw->base + ALTERA_SPI_RXDATA); if (hw->rx) { switch (hw->bytes_per_word) { case 1: hw->rx[hw->count] = rxd; break; case 2: hw->rx[hw->count * 2] = rxd; hw->rx[hw->count * 2 + 1] = rxd >> 8; break; } } hw->count++; if (hw->count < hw->len) writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA); else break; } } return hw->count * hw->bytes_per_word; } static irqreturn_t altera_spi_irq(int irq, void *dev) { struct altera_spi *hw = dev; unsigned int rxd; rxd = readl(hw->base + ALTERA_SPI_RXDATA); if (hw->rx) { switch (hw->bytes_per_word) { case 1: hw->rx[hw->count] = rxd; break; case 2: hw->rx[hw->count * 2] = rxd; hw->rx[hw->count * 2 + 1] = rxd >> 8; break; } } hw->count++; if (hw->count < hw->len) writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA); else complete(&hw->done); return IRQ_HANDLED; } static int altera_spi_probe(struct platform_device *pdev) { struct altera_spi_platform_data *platp = pdev->dev.platform_data; struct altera_spi *hw; struct spi_master *master; struct resource *res; int err = -ENODEV; master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); if (!master) return err; /* setup the master state. */ master->bus_num = pdev->id; master->num_chipselect = 16; master->mode_bits = SPI_CS_HIGH; master->setup = altera_spi_setup; hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); /* setup the state for the bitbang driver */ hw->bitbang.master = spi_master_get(master); if (!hw->bitbang.master) return err; hw->bitbang.setup_transfer = altera_spi_setupxfer; hw->bitbang.chipselect = altera_spi_chipsel; hw->bitbang.txrx_bufs = altera_spi_txrx; /* find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) goto exit_busy; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) goto exit_busy; hw->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!hw->base) goto exit_busy; /* program defaults into the registers */ hw->imr = 0; /* disable spi interrupts */ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */ if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK) readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */ /* irq is optional */ hw->irq = platform_get_irq(pdev, 0); if (hw->irq >= 0) { init_completion(&hw->done); err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0, pdev->name, hw); if (err) goto exit; } /* find platform data */ if (!platp) hw->bitbang.master->dev.of_node = pdev->dev.of_node; /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); if (err) goto exit; dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); return 0; exit_busy: err = -EBUSY; exit: platform_set_drvdata(pdev, NULL); spi_master_put(master); return err; } static int altera_spi_remove(struct platform_device *dev) { struct altera_spi *hw = platform_get_drvdata(dev); struct spi_master *master = hw->bitbang.master; spi_bitbang_stop(&hw->bitbang); platform_set_drvdata(dev, NULL); spi_master_put(master); return 0; } #ifdef CONFIG_OF static const struct of_device_id altera_spi_match[] = { { .compatible = "ALTR,spi-1.0", }, {}, }; MODULE_DEVICE_TABLE(of, altera_spi_match); #endif /* CONFIG_OF */ static struct platform_driver altera_spi_driver = { .probe = altera_spi_probe, .remove = altera_spi_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = NULL, .of_match_table = of_match_ptr(altera_spi_match), }, }; module_platform_driver(altera_spi_driver); MODULE_DESCRIPTION("Altera SPI driver"); MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
mrg666/android_kernel_shooter
drivers/firewire/nosy.c
2627
17868
/* * nosy - Snoop mode driver for TI PCILynx 1394 controllers * Copyright (C) 2002-2007 Kristian Høgsberg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/sched.h> /* required for linux/wait.h */ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/timex.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <asm/atomic.h> #include <asm/byteorder.h> #include "nosy.h" #include "nosy-user.h" #define TCODE_PHY_PACKET 0x10 #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 static char driver_name[] = KBUILD_MODNAME; /* this is the physical layout of a PCL, its size is 128 bytes */ struct pcl { __le32 next; __le32 async_error_next; u32 user_data; __le32 pcl_status; __le32 remaining_transfer_count; __le32 next_data_buffer; struct { __le32 control; __le32 pointer; } buffer[13]; }; struct packet { unsigned int length; char data[0]; }; struct packet_buffer { char *data; size_t capacity; long total_packet_count, lost_packet_count; atomic_t size; struct packet *head, *tail; wait_queue_head_t wait; }; struct pcilynx { struct pci_dev *pci_device; __iomem char *registers; struct pcl *rcv_start_pcl, *rcv_pcl; __le32 *rcv_buffer; dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; spinlock_t client_list_lock; struct list_head client_list; struct miscdevice misc; struct list_head link; struct kref kref; }; static inline struct pcilynx * lynx_get(struct pcilynx *lynx) { kref_get(&lynx->kref); return lynx; } static void lynx_release(struct kref *kref) { kfree(container_of(kref, struct pcilynx, kref)); } static inline void lynx_put(struct pcilynx *lynx) { kref_put(&lynx->kref, lynx_release); } struct client { struct pcilynx *lynx; u32 tcode_mask; struct packet_buffer buffer; struct list_head link; }; static DEFINE_MUTEX(card_mutex); static LIST_HEAD(card_list); static int packet_buffer_init(struct packet_buffer *buffer, size_t capacity) { buffer->data = kmalloc(capacity, GFP_KERNEL); if (buffer->data == NULL) return -ENOMEM; buffer->head = (struct packet *) buffer->data; buffer->tail = (struct packet *) buffer->data; buffer->capacity = capacity; buffer->lost_packet_count = 0; atomic_set(&buffer->size, 0); init_waitqueue_head(&buffer->wait); return 0; } static void packet_buffer_destroy(struct packet_buffer *buffer) { kfree(buffer->data); } static int packet_buffer_get(struct client *client, char __user *data, size_t user_length) { struct packet_buffer *buffer = &client->buffer; size_t length; char *end; if (wait_event_interruptible(buffer->wait, atomic_read(&buffer->size) > 0) || list_empty(&client->lynx->link)) return -ERESTARTSYS; if (atomic_read(&buffer->size) == 0) return -ENODEV; /* FIXME: Check length <= user_length. */ end = buffer->data + buffer->capacity; length = buffer->head->length; if (&buffer->head->data[length] < end) { if (copy_to_user(data, buffer->head->data, length)) return -EFAULT; buffer->head = (struct packet *) &buffer->head->data[length]; } else { size_t split = end - buffer->head->data; if (copy_to_user(data, buffer->head->data, split)) return -EFAULT; if (copy_to_user(data + split, buffer->data, length - split)) return -EFAULT; buffer->head = (struct packet *) &buffer->data[length - split]; } /* * Decrease buffer->size as the last thing, since this is what * keeps the interrupt from overwriting the packet we are * retrieving from the buffer. */ atomic_sub(sizeof(struct packet) + length, &buffer->size); return length; } static void packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) { char *end; buffer->total_packet_count++; if (buffer->capacity < atomic_read(&buffer->size) + sizeof(struct packet) + length) { buffer->lost_packet_count++; return; } end = buffer->data + buffer->capacity; buffer->tail->length = length; if (&buffer->tail->data[length] < end) { memcpy(buffer->tail->data, data, length); buffer->tail = (struct packet *) &buffer->tail->data[length]; } else { size_t split = end - buffer->tail->data; memcpy(buffer->tail->data, data, split); memcpy(buffer->data, data + split, length - split); buffer->tail = (struct packet *) &buffer->data[length - split]; } /* Finally, adjust buffer size and wake up userspace reader. */ atomic_add(sizeof(struct packet) + length, &buffer->size); wake_up_interruptible(&buffer->wait); } static inline void reg_write(struct pcilynx *lynx, int offset, u32 data) { writel(data, lynx->registers + offset); } static inline u32 reg_read(struct pcilynx *lynx, int offset) { return readl(lynx->registers + offset); } static inline void reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) { reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); } /* * Maybe the pcl programs could be set up to just append data instead * of using a whole packet. */ static inline void run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, int dmachan) { reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); } static int set_phy_reg(struct pcilynx *lynx, int addr, int val) { if (addr > 15) { dev_err(&lynx->pci_device->dev, "PHY register address %d out of range\n", addr); return -1; } if (val > 0xff) { dev_err(&lynx->pci_device->dev, "PHY register value %d out of range\n", val); return -1; } reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); return 0; } static int nosy_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct client *client; struct pcilynx *tmp, *lynx = NULL; mutex_lock(&card_mutex); list_for_each_entry(tmp, &card_list, link) if (tmp->misc.minor == minor) { lynx = lynx_get(tmp); break; } mutex_unlock(&card_mutex); if (lynx == NULL) return -ENODEV; client = kmalloc(sizeof *client, GFP_KERNEL); if (client == NULL) goto fail; client->tcode_mask = ~0; client->lynx = lynx; INIT_LIST_HEAD(&client->link); if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) goto fail; file->private_data = client; return nonseekable_open(inode, file); fail: kfree(client); lynx_put(lynx); return -ENOMEM; } static int nosy_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct pcilynx *lynx = client->lynx; spin_lock_irq(&lynx->client_list_lock); list_del_init(&client->link); spin_unlock_irq(&lynx->client_list_lock); packet_buffer_destroy(&client->buffer); kfree(client); lynx_put(lynx); return 0; } static unsigned int nosy_poll(struct file *file, poll_table *pt) { struct client *client = file->private_data; unsigned int ret = 0; poll_wait(file, &client->buffer.wait, pt); if (atomic_read(&client->buffer.size) > 0) ret = POLLIN | POLLRDNORM; if (list_empty(&client->lynx->link)) ret |= POLLHUP; return ret; } static ssize_t nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct client *client = file->private_data; return packet_buffer_get(client, buffer, count); } static long nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; switch (cmd) { case NOSY_IOC_GET_STATS: spin_lock_irq(client_list_lock); stats.total_packet_count = client->buffer.total_packet_count; stats.lost_packet_count = client->buffer.lost_packet_count; spin_unlock_irq(client_list_lock); if (copy_to_user((void __user *) arg, &stats, sizeof stats)) return -EFAULT; else return 0; case NOSY_IOC_START: spin_lock_irq(client_list_lock); list_add_tail(&client->link, &client->lynx->client_list); spin_unlock_irq(client_list_lock); return 0; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); list_del_init(&client->link); spin_unlock_irq(client_list_lock); return 0; case NOSY_IOC_FILTER: spin_lock_irq(client_list_lock); client->tcode_mask = arg; spin_unlock_irq(client_list_lock); return 0; default: return -EINVAL; /* Flush buffer, configure filter. */ } } static const struct file_operations nosy_ops = { .owner = THIS_MODULE, .read = nosy_read, .unlocked_ioctl = nosy_ioctl, .poll = nosy_poll, .open = nosy_open, .release = nosy_release, }; #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ static void packet_irq_handler(struct pcilynx *lynx) { struct client *client; u32 tcode_mask, tcode; size_t length; struct timeval tv; /* FIXME: Also report rcv_speed. */ length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; do_gettimeofday(&tv); lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec; if (length == PHY_PACKET_SIZE) tcode_mask = 1 << TCODE_PHY_PACKET; else tcode_mask = 1 << tcode; spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) if (client->tcode_mask & tcode_mask) packet_buffer_put(&client->buffer, lynx->rcv_buffer, length + 4); spin_unlock(&lynx->client_list_lock); } static void bus_reset_irq_handler(struct pcilynx *lynx) { struct client *client; struct timeval tv; do_gettimeofday(&tv); spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) packet_buffer_put(&client->buffer, &tv.tv_usec, 4); spin_unlock(&lynx->client_list_lock); } static irqreturn_t irq_handler(int irq, void *device) { struct pcilynx *lynx = device; u32 pci_int_status; pci_int_status = reg_read(lynx, PCI_INT_STATUS); if (pci_int_status == ~0) /* Card was ejected. */ return IRQ_NONE; if ((pci_int_status & PCI_INT_INT_PEND) == 0) /* Not our interrupt, bail out quickly. */ return IRQ_NONE; if ((pci_int_status & PCI_INT_P1394_INT) != 0) { u32 link_int_status; link_int_status = reg_read(lynx, LINK_INT_STATUS); reg_write(lynx, LINK_INT_STATUS, link_int_status); if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) bus_reset_irq_handler(lynx); } /* Clear the PCI_INT_STATUS register only after clearing the * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will * be set again immediately. */ reg_write(lynx, PCI_INT_STATUS, pci_int_status); if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { packet_irq_handler(lynx); run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); } return IRQ_HANDLED; } static void remove_card(struct pci_dev *dev) { struct pcilynx *lynx = pci_get_drvdata(dev); struct client *client; mutex_lock(&card_mutex); list_del_init(&lynx->link); misc_deregister(&lynx->misc); mutex_unlock(&card_mutex); reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); spin_lock_irq(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) wake_up_interruptible(&client->buffer.wait); spin_unlock_irq(&lynx->client_list_lock); pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); pci_disable_device(dev); lynx_put(lynx); } #define RCV_BUFFER_SIZE (16 * 1024) static int __devinit add_card(struct pci_dev *dev, const struct pci_device_id *unused) { struct pcilynx *lynx; u32 p, end; int ret, i; if (pci_set_dma_mask(dev, 0xffffffff)) { dev_err(&dev->dev, "DMA address limits not supported for PCILynx hardware\n"); return -ENXIO; } if (pci_enable_device(dev)) { dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); return -ENXIO; } pci_set_master(dev); lynx = kzalloc(sizeof *lynx, GFP_KERNEL); if (lynx == NULL) { dev_err(&dev->dev, "Failed to allocate control structure\n"); ret = -ENOMEM; goto fail_disable; } lynx->pci_device = dev; pci_set_drvdata(dev, lynx); spin_lock_init(&lynx->client_list_lock); INIT_LIST_HEAD(&lynx->client_list); kref_init(&lynx->kref); lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_start_pcl_bus); lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_pcl_bus); lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); if (lynx->rcv_start_pcl == NULL || lynx->rcv_pcl == NULL || lynx->rcv_buffer == NULL) { dev_err(&dev->dev, "Failed to allocate receive buffer\n"); ret = -ENOMEM; goto fail_deallocate; } lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); lynx->rcv_pcl->buffer[0].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); lynx->rcv_pcl->buffer[0].pointer = cpu_to_le32(lynx->rcv_buffer_bus + 4); p = lynx->rcv_buffer_bus + 2048; end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; for (i = 1; p < end; i++, p += 2048) { lynx->rcv_pcl->buffer[i].control = cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); } lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); /* Fix buggy cards with autoboot pin not tied low: */ reg_write(lynx, DMA0_CHAN_CTRL, 0); reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); #if 0 /* now, looking for PHY register set */ if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { lynx->phyic.reg_1394a = 1; PRINT(KERN_INFO, lynx->id, "found 1394a conform PHY (using extended register set)"); lynx->phyic.vendor = get_phy_vendorid(lynx); lynx->phyic.product = get_phy_productid(lynx); } else { lynx->phyic.reg_1394a = 0; PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); } #endif /* Setup the general receive FIFO max size. */ reg_write(lynx, FIFO_SIZES, 255); reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | LINK_INT_AT_STUCK | LINK_INT_SNTRJ | LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); /* Disable the L flag in self ID packets. */ set_phy_reg(lynx, 4, 0); /* Put this baby into snoop mode */ reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); if (request_irq(dev->irq, irq_handler, IRQF_SHARED, driver_name, lynx)) { dev_err(&dev->dev, "Failed to allocate shared interrupt %d\n", dev->irq); ret = -EIO; goto fail_deallocate; } lynx->misc.parent = &dev->dev; lynx->misc.minor = MISC_DYNAMIC_MINOR; lynx->misc.name = "nosy"; lynx->misc.fops = &nosy_ops; mutex_lock(&card_mutex); ret = misc_register(&lynx->misc); if (ret) { dev_err(&dev->dev, "Failed to register misc char device\n"); mutex_unlock(&card_mutex); goto fail_free_irq; } list_add_tail(&lynx->link, &card_list); mutex_unlock(&card_mutex); dev_info(&dev->dev, "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); return 0; fail_free_irq: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); fail_deallocate: if (lynx->rcv_start_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); if (lynx->rcv_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); if (lynx->rcv_buffer) pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); kfree(lynx); fail_disable: pci_disable_device(dev); return ret; } static struct pci_device_id pci_table[] __devinitdata = { { .vendor = PCI_VENDOR_ID_TI, .device = PCI_DEVICE_ID_TI_PCILYNX, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } /* Terminating entry */ }; static struct pci_driver lynx_pci_driver = { .name = driver_name, .id_table = pci_table, .probe = add_card, .remove = remove_card, }; MODULE_AUTHOR("Kristian Hoegsberg"); MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pci_table); static int __init nosy_init(void) { return pci_register_driver(&lynx_pci_driver); } static void __exit nosy_cleanup(void) { pci_unregister_driver(&lynx_pci_driver); pr_info("Unloaded %s\n", driver_name); } module_init(nosy_init); module_exit(nosy_cleanup);
gpl-2.0
lyapota/m8_gpe_marshmallow
drivers/net/ethernet/broadcom/tg3.c
2627
428638
/* * tg3.c: Broadcom Tigon3 ethernet driver. * * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2005-2012 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, * Copyright (C) 2000-2003 Broadcom Corporation. * * Permission is hereby granted for the distribution of this firmware * data in hexadecimal or equivalent format, provided this copyright * notice is accompanying it. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/in.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/brcmphy.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <net/checksum.h> #include <net/ip.h> #include <linux/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #ifdef CONFIG_SPARC #include <asm/idprom.h> #include <asm/prom.h> #endif #define BAR_0 0 #define BAR_2 2 #include "tg3.h" /* Functions & macros to verify TG3_FLAGS types */ static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) { return test_bit(flag, bits); } static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) { set_bit(flag, bits); } static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) { clear_bit(flag, bits); } #define tg3_flag(tp, flag) \ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) #define tg3_flag_set(tp, flag) \ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) #define tg3_flag_clear(tp, flag) \ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 #define TG3_MIN_NUM 123 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) #define DRV_MODULE_RELDATE "March 21, 2012" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 #define RESET_KIND_SUSPEND 2 #define TG3_DEF_RX_MODE 0 #define TG3_DEF_TX_MODE 0 #define TG3_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ #define TG3_TX_TIMEOUT (5 * HZ) /* hardware minimum and maximum for a single frame's data payload */ #define TG3_MIN_MTU 60 #define TG3_MAX_MTU(tp) \ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) /* These numbers seem to be hard coded in the NIC firmware somehow. * You can't change the ring sizes, but you can change where you place * them in the NIC onboard memory. */ #define TG3_RX_STD_RING_SIZE(tp) \ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) #define TG3_DEF_RX_RING_PENDING 200 #define TG3_RX_JMB_RING_SIZE(tp) \ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et * al. operations are done with shifts and masks instead of with * hw multiply/modulo instructions. Another solution would be to * replace things like '% foo' with '& (foo - 1)'. */ #define TG3_TX_RING_SIZE 512 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) #define TG3_RX_STD_RING_BYTES(tp) \ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) #define TG3_RX_JMB_RING_BYTES(tp) \ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) #define TG3_RX_RCB_RING_BYTES(tp) \ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define TG3_DMA_BYTE_ENAB 64 #define TG3_RX_STD_DMA_SZ 1536 #define TG3_RX_JMB_DMA_SZ 9046 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) /* Due to a hardware bug, the 5701 can only DMA to memory addresses * that are at least dword aligned when used in PCIX mode. The driver * works around this bug by double copying the packet. This workaround * is built into the normal double copy length check for efficiency. * * However, the double copy is only necessary on those architectures * where unaligned memory accesses are inefficient. For those architectures * where unaligned memory accesses incur little penalty, we can reintegrate * the 5701 in the normal rx path. Doing so saves a device structure * dereference by hardcoding the double copy threshold in place. */ #define TG3_RX_COPY_THRESHOLD 256 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD #else #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) #endif #if (NET_IP_ALIGN != 0) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) #else #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) #endif /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) #define TG3_TX_BD_DMA_MAX_2K 2048 #define TG3_TX_BD_DMA_MAX_4K 4096 #define TG3_RAW_IP_ALIGN 2 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) #define FIRMWARE_TG3 "tigon/tg3.bin" #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FIRMWARE_TG3); MODULE_FIRMWARE(FIRMWARE_TG3TSO); MODULE_FIRMWARE(FIRMWARE_TG3TSO5); static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ module_param(tg3_debug, int, 0); MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ {} }; MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "rx_octets" }, { "rx_fragments" }, { "rx_ucast_packets" }, { "rx_mcast_packets" }, { "rx_bcast_packets" }, { "rx_fcs_errors" }, { "rx_align_errors" }, { "rx_xon_pause_rcvd" }, { "rx_xoff_pause_rcvd" }, { "rx_mac_ctrl_rcvd" }, { "rx_xoff_entered" }, { "rx_frame_too_long_errors" }, { "rx_jabbers" }, { "rx_undersize_packets" }, { "rx_in_length_errors" }, { "rx_out_length_errors" }, { "rx_64_or_less_octet_packets" }, { "rx_65_to_127_octet_packets" }, { "rx_128_to_255_octet_packets" }, { "rx_256_to_511_octet_packets" }, { "rx_512_to_1023_octet_packets" }, { "rx_1024_to_1522_octet_packets" }, { "rx_1523_to_2047_octet_packets" }, { "rx_2048_to_4095_octet_packets" }, { "rx_4096_to_8191_octet_packets" }, { "rx_8192_to_9022_octet_packets" }, { "tx_octets" }, { "tx_collisions" }, { "tx_xon_sent" }, { "tx_xoff_sent" }, { "tx_flow_control" }, { "tx_mac_errors" }, { "tx_single_collisions" }, { "tx_mult_collisions" }, { "tx_deferred" }, { "tx_excessive_collisions" }, { "tx_late_collisions" }, { "tx_collide_2times" }, { "tx_collide_3times" }, { "tx_collide_4times" }, { "tx_collide_5times" }, { "tx_collide_6times" }, { "tx_collide_7times" }, { "tx_collide_8times" }, { "tx_collide_9times" }, { "tx_collide_10times" }, { "tx_collide_11times" }, { "tx_collide_12times" }, { "tx_collide_13times" }, { "tx_collide_14times" }, { "tx_collide_15times" }, { "tx_ucast_packets" }, { "tx_mcast_packets" }, { "tx_bcast_packets" }, { "tx_carrier_sense_errors" }, { "tx_discards" }, { "tx_errors" }, { "dma_writeq_full" }, { "dma_write_prioq_full" }, { "rxbds_empty" }, { "rx_discards" }, { "rx_errors" }, { "rx_threshold_hit" }, { "dma_readq_full" }, { "dma_read_prioq_full" }, { "tx_comp_queue_full" }, { "ring_set_send_prod_index" }, { "ring_status_update" }, { "nic_irqs" }, { "nic_avoided_irqs" }, { "nic_tx_threshold_hit" }, { "mbuf_lwm_thresh_hit" }, }; #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_test_keys[] = { { "nvram test (online) " }, { "link test (online) " }, { "register test (offline)" }, { "memory test (offline)" }, { "mac loopback test (offline)" }, { "phy loopback test (offline)" }, { "ext loopback test (offline)" }, { "interrupt test (offline)" }, }; #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) static void tg3_write32(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->regs + off); } static u32 tg3_read32(struct tg3 *tp, u32 off) { return readl(tp->regs + off); } static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->aperegs + off); } static u32 tg3_ape_read32(struct tg3 *tp, u32 off) { return readl(tp->aperegs + off); } static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); spin_unlock_irqrestore(&tp->indirect_lock, flags); } static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->regs + off); readl(tp->regs + off); } static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) { unsigned long flags; u32 val; spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); spin_unlock_irqrestore(&tp->indirect_lock, flags); return val; } static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + TG3_64BIT_REG_LOW, val); return; } if (off == TG3_RX_STD_PROD_IDX_REG) { pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + TG3_64BIT_REG_LOW, val); return; } spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); spin_unlock_irqrestore(&tp->indirect_lock, flags); /* In indirect mode when disabling interrupts, we also need * to clear the interrupt bit in the GRC local ctrl register. */ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && (val == 0x1)) { pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); } } static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) { unsigned long flags; u32 val; spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); spin_unlock_irqrestore(&tp->indirect_lock, flags); return val; } /* usec_wait specifies the wait time in usec when writing to certain registers * where it is unsafe to read back the register without some delay. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. */ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) { if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) /* Non-posted methods */ tp->write32(tp, off, val); else { /* Posted method */ tg3_write32(tp, off, val); if (usec_wait) udelay(usec_wait); tp->read32(tp, off); } /* Wait again after the read for the posted method to guarantee that * the wait time is met. */ if (usec_wait) udelay(usec_wait); } static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) { tp->write32_mbox(tp, off, val); if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) tp->read32_mbox(tp, off); } static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) { void __iomem *mbox = tp->regs + off; writel(val, mbox); if (tg3_flag(tp, TXD_MBOX_HWBUG)) writel(val, mbox); if (tg3_flag(tp, MBOX_WRITE_REORDER)) readl(mbox); } static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) { return readl(tp->regs + off + GRCMBOX_BASE); } static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) { writel(val, tp->regs + off + GRCMBOX_BASE); } #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) #define tw32(reg, val) tp->write32(tp, reg, val) #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) #define tr32(reg) tp->read32(tp, reg) static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) return; spin_lock_irqsave(&tp->indirect_lock, flags); if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); } else { tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); tw32_f(TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); } spin_unlock_irqrestore(&tp->indirect_lock, flags); } static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { unsigned long flags; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { *val = 0; return; } spin_lock_irqsave(&tp->indirect_lock, flags); if (tg3_flag(tp, SRAM_USE_CONFIG)) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); } else { tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); *val = tr32(TG3PCI_MEM_WIN_DATA); /* Always leave this as zero. */ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); } spin_unlock_irqrestore(&tp->indirect_lock, flags); } static void tg3_ape_lock_init(struct tg3 *tp) { int i; u32 regbase, bit; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) regbase = TG3_APE_LOCK_GRANT; else regbase = TG3_APE_PER_LOCK_GRANT; /* Make sure the driver hasn't any stale locks. */ for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { switch (i) { case TG3_APE_LOCK_PHY0: case TG3_APE_LOCK_PHY1: case TG3_APE_LOCK_PHY2: case TG3_APE_LOCK_PHY3: bit = APE_LOCK_GRANT_DRIVER; break; default: if (!tp->pci_fn) bit = APE_LOCK_GRANT_DRIVER; else bit = 1 << tp->pci_fn; } tg3_ape_write32(tp, regbase + 4 * i, bit); } } static int tg3_ape_lock(struct tg3 *tp, int locknum) { int i, off; int ret = 0; u32 status, req, gnt, bit; if (!tg3_flag(tp, ENABLE_APE)) return 0; switch (locknum) { case TG3_APE_LOCK_GPIO: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) return 0; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) bit = APE_LOCK_REQ_DRIVER; else bit = 1 << tp->pci_fn; break; default: return -EINVAL; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { req = TG3_APE_LOCK_REQ; gnt = TG3_APE_LOCK_GRANT; } else { req = TG3_APE_PER_LOCK_REQ; gnt = TG3_APE_PER_LOCK_GRANT; } off = 4 * locknum; tg3_ape_write32(tp, req + off, bit); /* Wait for up to 1 millisecond to acquire lock. */ for (i = 0; i < 100; i++) { status = tg3_ape_read32(tp, gnt + off); if (status == bit) break; udelay(10); } if (status != bit) { /* Revoke the lock request. */ tg3_ape_write32(tp, gnt + off, bit); ret = -EBUSY; } return ret; } static void tg3_ape_unlock(struct tg3 *tp, int locknum) { u32 gnt, bit; if (!tg3_flag(tp, ENABLE_APE)) return; switch (locknum) { case TG3_APE_LOCK_GPIO: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) return; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) bit = APE_LOCK_GRANT_DRIVER; else bit = 1 << tp->pci_fn; break; default: return; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) gnt = TG3_APE_LOCK_GRANT; else gnt = TG3_APE_PER_LOCK_GRANT; tg3_ape_write32(tp, gnt + 4 * locknum, bit); } static void tg3_ape_send_event(struct tg3 *tp, u32 event) { int i; u32 apedata; /* NCSI does not support APE events */ if (tg3_flag(tp, APE_HAS_NCSI)) return; apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); if (apedata != APE_SEG_SIG_MAGIC) return; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); if (!(apedata & APE_FW_STATUS_READY)) return; /* Wait for up to 1 millisecond for APE to service previous event. */ for (i = 0; i < 10; i++) { if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) return; apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, event | APE_EVENT_STATUS_EVENT_PENDING); tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) break; udelay(100); } if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); } static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) { u32 event; u32 apedata; if (!tg3_flag(tp, ENABLE_APE)) return; switch (kind) { case RESET_KIND_INIT: tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, APE_HOST_SEG_SIG_MAGIC); tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, APE_HOST_SEG_LEN_MAGIC); apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, APE_HOST_BEHAV_NO_PHYLOCK); tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, TG3_APE_HOST_DRVR_STATE_START); event = APE_EVENT_STATUS_STATE_START; break; case RESET_KIND_SHUTDOWN: /* With the interface we are currently using, * APE does not track driver state. Wiping * out the HOST SEGMENT SIGNATURE forces * the APE to assume OS absent status. */ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); if (device_may_wakeup(&tp->pdev->dev) && tg3_flag(tp, WOL_ENABLE)) { tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, TG3_APE_HOST_WOL_SPEED_AUTO); apedata = TG3_APE_HOST_DRVR_STATE_WOL; } else apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); event = APE_EVENT_STATUS_STATE_UNLOAD; break; case RESET_KIND_SUSPEND: event = APE_EVENT_STATUS_STATE_SUSPEND; break; default: return; } event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; tg3_ape_send_event(tp, event); } static void tg3_disable_ints(struct tg3 *tp) { int i; tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); for (i = 0; i < tp->irq_max; i++) tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); } static void tg3_enable_ints(struct tg3 *tp) { int i; tp->irq_sync = 0; wmb(); tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); if (tg3_flag(tp, 1SHOT_MSI)) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); tp->coal_now |= tnapi->coal_now; } /* Force an initial interrupt */ if (!tg3_flag(tp, TAGGED_STATUS) && (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); else tw32(HOSTCC_MODE, tp->coal_now); tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); } static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) { struct tg3 *tp = tnapi->tp; struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int work_exists = 0; /* check for phy events */ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { if (sblk->status & SD_STATUS_LINK_CHG) work_exists = 1; } /* check for TX work to do */ if (sblk->idx[0].tx_consumer != tnapi->tx_cons) work_exists = 1; /* check for RX work to do */ if (tnapi->rx_rcb_prod_idx && *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_exists = 1; return work_exists; } /* tg3_int_reenable * similar to tg3_enable_ints, but it accurately determines whether there * is new work pending and can return without flushing the PIO write * which reenables interrupts */ static void tg3_int_reenable(struct tg3_napi *tnapi) { struct tg3 *tp = tnapi->tp; tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); mmiowb(); /* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of * work we've completed. */ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) tw32(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); } static void tg3_switch_clocks(struct tg3 *tp) { u32 clock_ctrl; u32 orig_clock_ctrl; if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) return; clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); orig_clock_ctrl = clock_ctrl; clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE | 0x1f); tp->pci_clock_ctrl = clock_ctrl; if (tg3_flag(tp, 5705_PLUS)) { if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl | CLOCK_CTRL_625_CORE, 40); } } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 40); tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl | (CLOCK_CTRL_ALTCLK), 40); } tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); } #define PHY_BUSY_LOOPS 5000 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) { u32 frame_val; unsigned int loops; int ret; if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); udelay(80); } *val = 0x0; frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); frame_val |= (MI_COM_CMD_READ | MI_COM_START); tw32_f(MAC_MI_COM, frame_val); loops = PHY_BUSY_LOOPS; while (loops != 0) { udelay(10); frame_val = tr32(MAC_MI_COM); if ((frame_val & MI_COM_BUSY) == 0) { udelay(5); frame_val = tr32(MAC_MI_COM); break; } loops -= 1; } ret = -EBUSY; if (loops != 0) { *val = frame_val & MI_COM_DATA_MASK; ret = 0; } if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); } return ret; } static int tg3_writephy(struct tg3 *tp, int reg, u32 val) { u32 frame_val; unsigned int loops; int ret; if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) return 0; if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); udelay(80); } frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & MI_COM_PHY_ADDR_MASK); frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); frame_val |= (val & MI_COM_DATA_MASK); frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); tw32_f(MAC_MI_COM, frame_val); loops = PHY_BUSY_LOOPS; while (loops != 0) { udelay(10); frame_val = tr32(MAC_MI_COM); if ((frame_val & MI_COM_BUSY) == 0) { udelay(5); frame_val = tr32(MAC_MI_COM); break; } loops -= 1; } ret = -EBUSY; if (loops != 0) ret = 0; if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); } return ret; } static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) { int err; err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); if (err) goto done; err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); if (err) goto done; err = tg3_writephy(tp, MII_TG3_MMD_CTRL, MII_TG3_MMD_CTRL_DATA_NOINC | devad); if (err) goto done; err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); done: return err; } static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) { int err; err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); if (err) goto done; err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); if (err) goto done; err = tg3_writephy(tp, MII_TG3_MMD_CTRL, MII_TG3_MMD_CTRL_DATA_NOINC | devad); if (err) goto done; err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); done: return err; } static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) { int err; err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); if (!err) err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); return err; } static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) { int err; err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); if (!err) err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); return err; } static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) { int err; err = tg3_writephy(tp, MII_TG3_AUX_CTRL, (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | MII_TG3_AUXCTL_SHDWSEL_MISC); if (!err) err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); return err; } static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) { if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) set |= MII_TG3_AUXCTL_MISC_WREN; return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); } #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ MII_TG3_AUXCTL_ACTL_TX_6DB) #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ MII_TG3_AUXCTL_ACTL_TX_6DB); static int tg3_bmcr_reset(struct tg3 *tp) { u32 phy_control; int limit, err; /* OK, reset it, and poll the BMCR_RESET bit until it * clears or we time out. */ phy_control = BMCR_RESET; err = tg3_writephy(tp, MII_BMCR, phy_control); if (err != 0) return -EBUSY; limit = 5000; while (limit--) { err = tg3_readphy(tp, MII_BMCR, &phy_control); if (err != 0) return -EBUSY; if ((phy_control & BMCR_RESET) == 0) { udelay(40); break; } udelay(10); } if (limit < 0) return -EBUSY; return 0; } static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) { struct tg3 *tp = bp->priv; u32 val; spin_lock_bh(&tp->lock); if (tg3_readphy(tp, reg, &val)) val = -EIO; spin_unlock_bh(&tp->lock); return val; } static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) { struct tg3 *tp = bp->priv; u32 ret = 0; spin_lock_bh(&tp->lock); if (tg3_writephy(tp, reg, val)) ret = -EIO; spin_unlock_bh(&tp->lock); return ret; } static int tg3_mdio_reset(struct mii_bus *bp) { return 0; } static void tg3_mdio_config_5785(struct tg3 *tp) { u32 val; struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: val = MAC_PHYCFG2_50610_LED_MODES; break; case PHY_ID_BCMAC131: val = MAC_PHYCFG2_AC131_LED_MODES; break; case PHY_ID_RTL8211C: val = MAC_PHYCFG2_RTL8211C_LED_MODES; break; case PHY_ID_RTL8201E: val = MAC_PHYCFG2_RTL8201E_LED_MODES; break; default: return; } if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { tw32(MAC_PHYCFG2, val); val = tr32(MAC_PHYCFG1); val &= ~(MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; tw32(MAC_PHYCFG1, val); return; } if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) val |= MAC_PHYCFG2_EMODE_MASK_MASK | MAC_PHYCFG2_FMODE_MASK_MASK | MAC_PHYCFG2_GMODE_MASK_MASK | MAC_PHYCFG2_ACT_MASK_MASK | MAC_PHYCFG2_QUAL_MASK_MASK | MAC_PHYCFG2_INBAND_ENABLE; tw32(MAC_PHYCFG2, val); val = tr32(MAC_PHYCFG1); val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; } val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; tw32(MAC_PHYCFG1, val); val = tr32(MAC_EXT_RGMII_MODE); val &= ~(MAC_RGMII_MODE_RX_INT_B | MAC_RGMII_MODE_RX_QUALITY | MAC_RGMII_MODE_RX_ACTIVITY | MAC_RGMII_MODE_RX_ENG_DET | MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET); if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) val |= MAC_RGMII_MODE_RX_INT_B | MAC_RGMII_MODE_RX_QUALITY | MAC_RGMII_MODE_RX_ACTIVITY | MAC_RGMII_MODE_RX_ENG_DET; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) val |= MAC_RGMII_MODE_TX_ENABLE | MAC_RGMII_MODE_TX_LOWPWR | MAC_RGMII_MODE_TX_RESET; } tw32(MAC_EXT_RGMII_MODE, val); } static void tg3_mdio_start(struct tg3 *tp) { tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); if (tg3_flag(tp, MDIOBUS_INITED) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); } static int tg3_mdio_init(struct tg3 *tp) { int i; u32 reg; struct phy_device *phydev; if (tg3_flag(tp, 5717_PLUS)) { u32 is_serdes; tp->phy_addr = tp->pci_fn + 1; if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; else is_serdes = tr32(TG3_CPMU_PHY_STRAP) & TG3_CPMU_PHY_STRAP_IS_SERDES; if (is_serdes) tp->phy_addr += 7; } else tp->phy_addr = TG3_PHY_MII_ADDR; tg3_mdio_start(tp); if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) return 0; tp->mdio_bus = mdiobus_alloc(); if (tp->mdio_bus == NULL) return -ENOMEM; tp->mdio_bus->name = "tg3 mdio bus"; snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", (tp->pdev->bus->number << 8) | tp->pdev->devfn); tp->mdio_bus->priv = tp; tp->mdio_bus->parent = &tp->pdev->dev; tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->reset = &tg3_mdio_reset; tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); tp->mdio_bus->irq = &tp->mdio_irq[0]; for (i = 0; i < PHY_MAX_ADDR; i++) tp->mdio_bus->irq[i] = PHY_POLL; /* The bus registration will look for all the PHYs on the mdio bus. * Unfortunately, it does not ensure the PHY is powered up before * accessing the PHY ID registers. A chip reset is the * quickest way to bring the device back to an operational state.. */ if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN)) tg3_bmcr_reset(tp); i = mdiobus_register(tp->mdio_bus); if (i) { dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); mdiobus_free(tp->mdio_bus); return i; } phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); return -ENODEV; } switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM57780: phydev->interface = PHY_INTERFACE_MODE_GMII; phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; break; case PHY_ID_BCM50610: case PHY_ID_BCM50610M: phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | PHY_BRCM_RX_REFCLK_UNUSED | PHY_BRCM_DIS_TXCRXC_NOENRGY | PHY_BRCM_AUTO_PWRDWN_ENABLE; if (tg3_flag(tp, RGMII_INBAND_DISABLE)) phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; /* fallthru */ case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; case PHY_ID_RTL8201E: case PHY_ID_BCMAC131: phydev->interface = PHY_INTERFACE_MODE_MII; phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; tp->phy_flags |= TG3_PHYFLG_IS_FET; break; } tg3_flag_set(tp, MDIOBUS_INITED); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_mdio_config_5785(tp); return 0; } static void tg3_mdio_fini(struct tg3 *tp) { if (tg3_flag(tp, MDIOBUS_INITED)) { tg3_flag_clear(tp, MDIOBUS_INITED); mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); } } /* tp->lock is held. */ static inline void tg3_generate_fw_event(struct tg3 *tp) { u32 val; val = tr32(GRC_RX_CPU_EVENT); val |= GRC_RX_CPU_DRIVER_EVENT; tw32_f(GRC_RX_CPU_EVENT, val); tp->last_event_jiffies = jiffies; } #define TG3_FW_EVENT_TIMEOUT_USEC 2500 /* tp->lock is held. */ static void tg3_wait_for_event_ack(struct tg3 *tp) { int i; unsigned int delay_cnt; long time_remain; /* If enough time has passed, no wait is necessary. */ time_remain = (long)(tp->last_event_jiffies + 1 + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - (long)jiffies; if (time_remain < 0) return; /* Check if we can shorten the wait time. */ delay_cnt = jiffies_to_usecs(time_remain); if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; delay_cnt = (delay_cnt >> 3) + 1; for (i = 0; i < delay_cnt; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; udelay(8); } } /* tp->lock is held. */ static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) { u32 reg, val; val = 0; if (!tg3_readphy(tp, MII_BMCR, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_BMSR, &reg)) val |= (reg & 0xffff); *data++ = val; val = 0; if (!tg3_readphy(tp, MII_ADVERTISE, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_LPA, &reg)) val |= (reg & 0xffff); *data++ = val; val = 0; if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { if (!tg3_readphy(tp, MII_CTRL1000, &reg)) val = reg << 16; if (!tg3_readphy(tp, MII_STAT1000, &reg)) val |= (reg & 0xffff); } *data++ = val; if (!tg3_readphy(tp, MII_PHYADDR, &reg)) val = reg << 16; else val = 0; *data++ = val; } /* tp->lock is held. */ static void tg3_ump_link_report(struct tg3 *tp) { u32 data[4]; if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) return; tg3_phy_gather_ump_data(tp, data); tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); tg3_generate_fw_event(tp); } /* tp->lock is held. */ static void tg3_stop_fw(struct tg3 *tp) { if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { /* Wait for RX cpu to ACK the previous event. */ tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); tg3_generate_fw_event(tp); /* Wait for RX cpu to ACK this event. */ tg3_wait_for_event_ack(tp); } } /* tp->lock is held. */ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) { tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, NIC_SRAM_FIRMWARE_MBOX_MAGIC1); if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_START); break; case RESET_KIND_SHUTDOWN: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_UNLOAD); break; case RESET_KIND_SUSPEND: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_SUSPEND); break; default: break; } } if (kind == RESET_KIND_INIT || kind == RESET_KIND_SUSPEND) tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) { if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_START_DONE); break; case RESET_KIND_SHUTDOWN: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_UNLOAD_DONE); break; default: break; } } if (kind == RESET_KIND_SHUTDOWN) tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ static void tg3_write_sig_legacy(struct tg3 *tp, int kind) { if (tg3_flag(tp, ENABLE_ASF)) { switch (kind) { case RESET_KIND_INIT: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_START); break; case RESET_KIND_SHUTDOWN: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_UNLOAD); break; case RESET_KIND_SUSPEND: tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, DRV_STATE_SUSPEND); break; default: break; } } } static int tg3_poll_fw(struct tg3 *tp) { int i; u32 val; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* Wait up to 20ms for init done. */ for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) return 0; udelay(100); } return -ENODEV; } /* Wait for firmware initialization to complete. */ for (i = 0; i < 100000; i++) { tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; udelay(10); } /* Chip might not be fitted with firmware. Some Sun onboard * parts are configured like that. So don't signal the timeout * of the above loop as an error, but do report the lack of * running firmware once. */ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { tg3_flag_set(tp, NO_FWARE_REPORTED); netdev_info(tp->dev, "No firmware running\n"); } if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { /* The 57765 A0 needs a little more * time to do some important work. */ mdelay(10); } return 0; } static void tg3_link_report(struct tg3 *tp) { if (!netif_carrier_ok(tp->dev)) { netif_info(tp, link, tp->dev, "Link is down\n"); tg3_ump_link_report(tp); } else if (netif_msg_link(tp)) { netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", (tp->link_config.active_speed == SPEED_1000 ? 1000 : (tp->link_config.active_speed == SPEED_100 ? 100 : 10)), (tp->link_config.active_duplex == DUPLEX_FULL ? "full" : "half")); netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? "on" : "off", (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? "on" : "off"); if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) netdev_info(tp->dev, "EEE is %s\n", tp->setlpicnt ? "enabled" : "disabled"); tg3_ump_link_report(tp); } } static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) { u16 miireg; if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) miireg = ADVERTISE_1000XPAUSE; else if (flow_ctrl & FLOW_CTRL_TX) miireg = ADVERTISE_1000XPSE_ASYM; else if (flow_ctrl & FLOW_CTRL_RX) miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; else miireg = 0; return miireg; } static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { cap = FLOW_CTRL_TX | FLOW_CTRL_RX; } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { if (lcladv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_RX; if (rmtadv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_TX; } return cap; } static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) { u8 autoneg; u8 flowctrl = 0; u32 old_rx_mode = tp->rx_mode; u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; else autoneg = tp->link_config.autoneg; if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); else flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); } else flowctrl = tp->link_config.flowctrl; tp->link_config.active_flowctrl = flowctrl; if (flowctrl & FLOW_CTRL_RX) tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; else tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; if (old_rx_mode != tp->rx_mode) tw32_f(MAC_RX_MODE, tp->rx_mode); if (flowctrl & FLOW_CTRL_TX) tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; else tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; if (old_tx_mode != tp->tx_mode) tw32_f(MAC_TX_MODE, tp->tx_mode); } static void tg3_adjust_link(struct net_device *dev) { u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; spin_lock_bh(&tp->lock); mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); oldflowctrl = tp->link_config.active_flowctrl; if (phydev->link) { lcl_adv = 0; rmt_adv = 0; if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) mac_mode |= MAC_MODE_PORT_MODE_MII; else if (phydev->speed == SPEED_1000 || GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) mac_mode |= MAC_MODE_PORT_MODE_GMII; else mac_mode |= MAC_MODE_PORT_MODE_MII; if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; else { lcl_adv = mii_advertise_flowctrl( tp->link_config.flowctrl); if (phydev->pause) rmt_adv = LPA_PAUSE_CAP; if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; } tg3_setup_flow_control(tp, lcl_adv, rmt_adv); } else mac_mode |= MAC_MODE_PORT_MODE_GMII; if (mac_mode != tp->mac_mode) { tp->mac_mode = mac_mode; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { if (phydev->speed == SPEED_10) tw32(MAC_MI_STAT, MAC_MI_STAT_10MBPS_MODE | MAC_MI_STAT_LNKSTAT_ATTN_ENAB); else tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); } if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) tw32(MAC_TX_LENGTHS, ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT) | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); else tw32(MAC_TX_LENGTHS, ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT) | (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); if (phydev->link != tp->old_link || phydev->speed != tp->link_config.active_speed || phydev->duplex != tp->link_config.active_duplex || oldflowctrl != tp->link_config.active_flowctrl) linkmesg = 1; tp->old_link = phydev->link; tp->link_config.active_speed = phydev->speed; tp->link_config.active_duplex = phydev->duplex; spin_unlock_bh(&tp->lock); if (linkmesg) tg3_link_report(tp); } static int tg3_phy_init(struct tg3 *tp) { struct phy_device *phydev; if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) return 0; /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; /* Attach the MAC to the PHY. */ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, phydev->dev_flags, phydev->interface); if (IS_ERR(phydev)) { dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } /* Mask with MAC supported features. */ switch (phydev->interface) { case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { phydev->supported &= (PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); break; } /* fallthru */ case PHY_INTERFACE_MODE_MII: phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); break; default: phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); return -EINVAL; } tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; phydev->advertising = phydev->supported; return 0; } static void tg3_phy_start(struct tg3 *tp) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; phydev->speed = tp->link_config.speed; phydev->duplex = tp->link_config.duplex; phydev->autoneg = tp->link_config.autoneg; phydev->advertising = tp->link_config.advertising; } phy_start(phydev); phy_start_aneg(phydev); } static void tg3_phy_stop(struct tg3 *tp) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } static int tg3_phy_set_extloopbk(struct tg3 *tp) { int err; u32 val; if (tp->phy_flags & TG3_PHYFLG_IS_FET) return 0; if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 0x4c20); goto done; } err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); if (err) return err; val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); done: return err; } static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) { u32 phytest; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { u32 phy; tg3_writephy(tp, MII_TG3_FET_TEST, phytest | MII_TG3_FET_SHADOW_EN); if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { if (enable) phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; else phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); } tg3_writephy(tp, MII_TG3_FET_TEST, phytest); } } static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) { u32 reg; if (!tg3_flag(tp, 5705_PLUS) || (tg3_flag(tp, 5717_PLUS) && (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) return; if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_phy_fet_toggle_apd(tp, enable); return; } reg = MII_TG3_MISC_SHDW_WREN | MII_TG3_MISC_SHDW_SCR5_SEL | MII_TG3_MISC_SHDW_SCR5_LPED | MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL | MII_TG3_MISC_SHDW_SCR5_C125OE; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); reg = MII_TG3_MISC_SHDW_WREN | MII_TG3_MISC_SHDW_APD_SEL | MII_TG3_MISC_SHDW_APD_WKTM_84MS; if (enable) reg |= MII_TG3_MISC_SHDW_APD_ENABLE; tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); } static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) { u32 phy; if (!tg3_flag(tp, 5705_PLUS) || (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) return; if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 ephy; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { u32 reg = MII_TG3_FET_SHDW_MISCCTRL; tg3_writephy(tp, MII_TG3_FET_TEST, ephy | MII_TG3_FET_SHADOW_EN); if (!tg3_readphy(tp, reg, &phy)) { if (enable) phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; else phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; tg3_writephy(tp, reg, phy); } tg3_writephy(tp, MII_TG3_FET_TEST, ephy); } } else { int ret; ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); if (!ret) { if (enable) phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; else phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, phy); } } } static void tg3_phy_set_wirespeed(struct tg3 *tp) { int ret; u32 val; if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) return; ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); if (!ret) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); } static void tg3_phy_apply_otp(struct tg3 *tp) { u32 otp, phy; if (!tp->phy_otp) return; otp = tp->phy_otp; if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) return; phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) { u32 val; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return; tp->setlpicnt = 0; if (tp->link_config.autoneg == AUTONEG_ENABLE && current_link_up == 1 && tp->link_config.active_duplex == DUPLEX_FULL && (tp->link_config.active_speed == SPEED_100 || tp->link_config.active_speed == SPEED_1000)) { u32 eeectl; if (tp->link_config.active_speed == SPEED_1000) eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; else eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; tw32(TG3_CPMU_EEE_CTRL, eeectl); tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val); if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || val == TG3_CL45_D7_EEERES_STAT_LP_100TX) tp->setlpicnt = 2; } if (!tp->setlpicnt) { if (current_link_up == 1 && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } val = tr32(TG3_CPMU_EEE_MODE); tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); } } static void tg3_phy_eee_enable(struct tg3 *tp) { u32 val; if (tp->link_config.active_speed == SPEED_1000 && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || tg3_flag(tp, 57765_CLASS)) && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO; tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } val = tr32(TG3_CPMU_EEE_MODE); tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); } static int tg3_wait_macro_done(struct tg3 *tp) { int limit = 100; while (limit--) { u32 tmp32; if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { if ((tmp32 & 0x1000) == 0) break; } } if (limit < 0) return -EBUSY; return 0; } static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) { static const u32 test_pat[4][6] = { { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } }; int chan; for (chan = 0; chan < 4; chan++) { int i; tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, test_pat[chan][i]); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } for (i = 0; i < 6; i += 2) { u32 low, high; if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } low &= 0x7fff; high &= 0x000f; if (low != test_pat[chan][i] || high != test_pat[chan][i+1]) { tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); return -EBUSY; } } } return 0; } static int tg3_phy_reset_chanpat(struct tg3 *tp) { int chan; for (chan = 0; chan < 4; chan++) { int i; tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) return -EBUSY; } return 0; } static int tg3_phy_reset_5703_4_5(struct tg3 *tp) { u32 reg32, phy9_orig; int retries, do_phy_reset, err; retries = 10; do_phy_reset = 1; do { if (do_phy_reset) { err = tg3_bmcr_reset(tp); if (err) return err; do_phy_reset = 0; } /* Disable transmitter and interrupt. */ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) continue; reg32 |= 0x3000; tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); /* Set full-duplex, 1000 mbps. */ tg3_writephy(tp, MII_BMCR, BMCR_FULLDPLX | BMCR_SPEED1000); /* Set to master mode. */ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) continue; tg3_writephy(tp, MII_CTRL1000, CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); if (err) return err; /* Block the PHY control access. */ tg3_phydsp_write(tp, 0x8005, 0x0800); err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); if (!err) break; } while (--retries); err = tg3_phy_reset_chanpat(tp); if (err) return err; tg3_phydsp_write(tp, 0x8005, 0x0000); tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); tg3_writephy(tp, MII_CTRL1000, phy9_orig); if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) { reg32 &= ~0x3000; tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); } else if (!err) err = -EBUSY; return err; } /* This will reset the tigon3 PHY if there is no valid * link unless the FORCE argument is non-zero. */ static int tg3_phy_reset(struct tg3 *tp) { u32 val, cpmuctrl; int err; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); udelay(40); } err = tg3_readphy(tp, MII_BMSR, &val); err |= tg3_readphy(tp, MII_BMSR, &val); if (err != 0) return -EBUSY; if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { netif_carrier_off(tp->dev); tg3_link_report(tp); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { err = tg3_phy_reset_5703_4_5(tp); if (err) return err; goto out; } cpmuctrl = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { cpmuctrl = tr32(TG3_CPMU_CTRL); if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) tw32(TG3_CPMU_CTRL, cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); } err = tg3_bmcr_reset(tp); if (err) return err; if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); tw32(TG3_CPMU_CTRL, cpmuctrl); } if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { val = tr32(TG3_CPMU_LSPD_1000MB_CLK); if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == CPMU_LSPD_1000MB_MACCLK_12_5) { val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; udelay(40); tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); } } if (tg3_flag(tp, 5717_PLUS) && (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) return 0; tg3_phy_apply_otp(tp); if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); else tg3_phy_toggle_apd(tp, false); out: if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { tg3_phydsp_write(tp, 0x201f, 0x2aaa); tg3_phydsp_write(tp, 0x000a, 0x0323); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); } if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { tg3_phydsp_write(tp, 0x000a, 0x310b); tg3_phydsp_write(tp, 0x201f, 0x9506); tg3_phydsp_write(tp, 0x401f, 0x14e2); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); tg3_writephy(tp, MII_TG3_TEST1, MII_TG3_TEST1_TRIM_EN | 0x4); } else tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); } } /* Set Extended packet length bit (bit 14) on all chips that */ /* support jumbo frames */ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { /* Cannot do read-modify-write on 5401 */ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); } else if (tg3_flag(tp, JUMBO_CAPABLE)) { /* Set bit 14 with read-modify-write to preserve other bits */ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); if (!err) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); } /* Set phy register 0x10 bit 0 to high fifo elasticity to support * jumbo frames transmission. */ if (tg3_flag(tp, JUMBO_CAPABLE)) { if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) tg3_writephy(tp, MII_TG3_EXT_CTRL, val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* adjust output voltage */ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); } tg3_phy_toggle_automdix(tp, 1); tg3_phy_set_wirespeed(tp); return 0; } #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ TG3_GPIO_MSG_NEED_VAUX) #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ (TG3_GPIO_MSG_DRVR_PRES << 4) | \ (TG3_GPIO_MSG_DRVR_PRES << 8) | \ (TG3_GPIO_MSG_DRVR_PRES << 12)) #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ (TG3_GPIO_MSG_NEED_VAUX << 4) | \ (TG3_GPIO_MSG_NEED_VAUX << 8) | \ (TG3_GPIO_MSG_NEED_VAUX << 12)) static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) { u32 status, shift; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); else status = tr32(TG3_CPMU_DRV_STATUS); shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; status &= ~(TG3_GPIO_MSG_MASK << shift); status |= (newstat << shift); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); else tw32(TG3_CPMU_DRV_STATUS, status); return status >> TG3_APE_GPIO_MSG_SHIFT; } static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) { if (!tg3_flag(tp, IS_NIC)) return 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) return -EIO; tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); } else { tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); } return 0; } static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) { u32 grc_local_ctrl; if (!tg3_flag(tp, IS_NIC) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) return; grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, TG3_GRC_LCLCTL_PWRSW_DELAY); tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, TG3_GRC_LCLCTL_PWRSW_DELAY); } static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) { if (!tg3_flag(tp, IS_NIC)) return; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | (GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1), TG3_GRC_LCLCTL_PWRSW_DELAY); } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1 | tp->grc_local_ctrl; tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); } else { u32 no_gpio2; u32 grc_local_ctrl = 0; /* Workaround to prevent overdrawing Amps. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); } /* On 5753 and variants, GPIO2 cannot be used. */ no_gpio2 = tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_NO_GPIO2; grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; if (no_gpio2) { grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT2); } tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); if (!no_gpio2) { grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | grc_local_ctrl, TG3_GRC_LCLCTL_PWRSW_DELAY); } } } static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) { u32 msg = 0; /* Serialize power state transitions */ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) return; if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) msg = TG3_GPIO_MSG_NEED_VAUX; msg = tg3_set_function_status(tp, msg); if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) goto done; if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) tg3_pwrsrc_switch_to_vaux(tp); else tg3_pwrsrc_die_with_vmain(tp); done: tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); } static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) { bool need_vaux = false; /* The GPIOs do something completely different on 57765. */ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) return; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { tg3_frob_aux_power_5717(tp, include_wol ? tg3_flag(tp, WOL_ENABLE) != 0 : 0); return; } if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { struct net_device *dev_peer; dev_peer = pci_get_drvdata(tp->pdev_peer); /* remove_one() may have been run on the peer. */ if (dev_peer) { struct tg3 *tp_peer = netdev_priv(dev_peer); if (tg3_flag(tp_peer, INIT_COMPLETE)) return; if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || tg3_flag(tp_peer, ENABLE_ASF)) need_vaux = true; } } if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || tg3_flag(tp, ENABLE_ASF)) need_vaux = true; if (need_vaux) tg3_pwrsrc_switch_to_vaux(tp); else tg3_pwrsrc_die_with_vmain(tp); } static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) { if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) return 1; else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { if (speed != SPEED_10) return 1; } else if (speed == SPEED_10) return 1; return 0; } static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); u32 serdes_cfg = tr32(MAC_SERDES_CFG); sg_dig_ctrl |= SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; tw32(SG_DIG_CTRL, sg_dig_ctrl); tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); } return; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tg3_bmcr_reset(tp); val = tr32(GRC_MISC_CFG); tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); udelay(40); return; } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 phytest; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { u32 phy; tg3_writephy(tp, MII_ADVERTISE, 0); tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); tg3_writephy(tp, MII_TG3_FET_TEST, phytest | MII_TG3_FET_SHADOW_EN); if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; tg3_writephy(tp, MII_TG3_FET_SHDW_AUXMODE4, phy); } tg3_writephy(tp, MII_TG3_FET_TEST, phytest); } return; } else if (do_low_power) { tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | MII_TG3_AUXCTL_PCTL_VREG_11V; tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); } /* The PHY should not be powered down on some chips because * of bugs. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && !tp->pci_fn)) return; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { val = tr32(TG3_CPMU_LSPD_1000MB_CLK); val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; val |= CPMU_LSPD_1000MB_MACCLK_12_5; tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); } tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); } /* tp->lock is held. */ static int tg3_nvram_lock(struct tg3 *tp) { if (tg3_flag(tp, NVRAM)) { int i; if (tp->nvram_lock_cnt == 0) { tw32(NVRAM_SWARB, SWARB_REQ_SET1); for (i = 0; i < 8000; i++) { if (tr32(NVRAM_SWARB) & SWARB_GNT1) break; udelay(20); } if (i == 8000) { tw32(NVRAM_SWARB, SWARB_REQ_CLR1); return -ENODEV; } } tp->nvram_lock_cnt++; } return 0; } /* tp->lock is held. */ static void tg3_nvram_unlock(struct tg3 *tp) { if (tg3_flag(tp, NVRAM)) { if (tp->nvram_lock_cnt > 0) tp->nvram_lock_cnt--; if (tp->nvram_lock_cnt == 0) tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); } } /* tp->lock is held. */ static void tg3_enable_nvram_access(struct tg3 *tp) { if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); } } /* tp->lock is held. */ static void tg3_disable_nvram_access(struct tg3 *tp) { if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); } } static int tg3_nvram_read_using_eeprom(struct tg3 *tp, u32 offset, u32 *val) { u32 tmp; int i; if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) return -EINVAL; tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | EEPROM_ADDR_READ); tw32(GRC_EEPROM_ADDR, tmp | (0 << EEPROM_ADDR_DEVID_SHIFT) | ((offset << EEPROM_ADDR_ADDR_SHIFT) & EEPROM_ADDR_ADDR_MASK) | EEPROM_ADDR_READ | EEPROM_ADDR_START); for (i = 0; i < 1000; i++) { tmp = tr32(GRC_EEPROM_ADDR); if (tmp & EEPROM_ADDR_COMPLETE) break; msleep(1); } if (!(tmp & EEPROM_ADDR_COMPLETE)) return -EBUSY; tmp = tr32(GRC_EEPROM_DATA); /* * The data will always be opposite the native endian * format. Perform a blind byteswap to compensate. */ *val = swab32(tmp); return 0; } #define NVRAM_CMD_TIMEOUT 10000 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) { int i; tw32(NVRAM_CMD, nvram_cmd); for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { udelay(10); if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { udelay(10); break; } } if (i == NVRAM_CMD_TIMEOUT) return -EBUSY; return 0; } static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) { if (tg3_flag(tp, NVRAM) && tg3_flag(tp, NVRAM_BUFFERED) && tg3_flag(tp, FLASH) && !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr / tp->nvram_pagesize) << ATMEL_AT45DB0X1B_PAGE_POS) + (addr % tp->nvram_pagesize); return addr; } static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) { if (tg3_flag(tp, NVRAM) && tg3_flag(tp, NVRAM_BUFFERED) && tg3_flag(tp, FLASH) && !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * tp->nvram_pagesize) + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); return addr; } /* NOTE: Data read in from NVRAM is byteswapped according to * the byteswapping settings for all other register accesses. * tg3 devices are BE devices, so on a BE machine, the data * returned will be exactly as it is seen in NVRAM. On a LE * machine, the 32-bit value will be byteswapped. */ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) { int ret; if (!tg3_flag(tp, NVRAM)) return tg3_nvram_read_using_eeprom(tp, offset, val); offset = tg3_nvram_phys_addr(tp, offset); if (offset > NVRAM_ADDR_MSK) return -EINVAL; ret = tg3_nvram_lock(tp); if (ret) return ret; tg3_enable_nvram_access(tp); tw32(NVRAM_ADDR, offset); ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); if (ret == 0) *val = tr32(NVRAM_RDDATA); tg3_disable_nvram_access(tp); tg3_nvram_unlock(tp); return ret; } /* Ensures NVRAM data is in bytestream format. */ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) { u32 v; int res = tg3_nvram_read(tp, offset, &v); if (!res) *val = cpu_to_be32(v); return res; } static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int i, j, rc = 0; u32 val; for (i = 0; i < len; i += 4) { u32 addr; __be32 data; addr = offset + i; memcpy(&data, buf + i, 4); /* * The SEEPROM interface expects the data to always be opposite * the native endian format. We accomplish this by reversing * all the operations that would have been performed on the * data from a call to tg3_nvram_read_be32(). */ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); val = tr32(GRC_EEPROM_ADDR); tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | EEPROM_ADDR_READ); tw32(GRC_EEPROM_ADDR, val | (0 << EEPROM_ADDR_DEVID_SHIFT) | (addr & EEPROM_ADDR_ADDR_MASK) | EEPROM_ADDR_START | EEPROM_ADDR_WRITE); for (j = 0; j < 1000; j++) { val = tr32(GRC_EEPROM_ADDR); if (val & EEPROM_ADDR_COMPLETE) break; msleep(1); } if (!(val & EEPROM_ADDR_COMPLETE)) { rc = -EBUSY; break; } } return rc; } /* offset and length are dword aligned */ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int ret = 0; u32 pagesize = tp->nvram_pagesize; u32 pagemask = pagesize - 1; u32 nvram_cmd; u8 *tmp; tmp = kmalloc(pagesize, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; while (len) { int j; u32 phy_addr, page_off, size; phy_addr = offset & ~pagemask; for (j = 0; j < pagesize; j += 4) { ret = tg3_nvram_read_be32(tp, phy_addr + j, (__be32 *) (tmp + j)); if (ret) break; } if (ret) break; page_off = offset & pagemask; size = pagesize; if (len < size) size = len; len -= size; memcpy(tmp + page_off, buf, size); offset = offset + (pagesize - page_off); tg3_enable_nvram_access(tp); /* * Before we can erase the flash page, we need * to issue a special "write enable" command. */ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; if (tg3_nvram_exec_cmd(tp, nvram_cmd)) break; /* Erase the target page */ tw32(NVRAM_ADDR, phy_addr); nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; if (tg3_nvram_exec_cmd(tp, nvram_cmd)) break; /* Issue another write enable to start the write. */ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; if (tg3_nvram_exec_cmd(tp, nvram_cmd)) break; for (j = 0; j < pagesize; j += 4) { __be32 data; data = *((__be32 *) (tmp + j)); tw32(NVRAM_WRDATA, be32_to_cpu(data)); tw32(NVRAM_ADDR, phy_addr + j); nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; if (j == 0) nvram_cmd |= NVRAM_CMD_FIRST; else if (j == (pagesize - 4)) nvram_cmd |= NVRAM_CMD_LAST; ret = tg3_nvram_exec_cmd(tp, nvram_cmd); if (ret) break; } if (ret) break; } nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; tg3_nvram_exec_cmd(tp, nvram_cmd); kfree(tmp); return ret; } /* offset and length are dword aligned */ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int i, ret = 0; for (i = 0; i < len; i += 4, offset += 4) { u32 page_off, phy_addr, nvram_cmd; __be32 data; memcpy(&data, buf + i, 4); tw32(NVRAM_WRDATA, be32_to_cpu(data)); page_off = offset % tp->nvram_pagesize; phy_addr = tg3_nvram_phys_addr(tp, offset); nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; if (page_off == 0 || i == 0) nvram_cmd |= NVRAM_CMD_FIRST; if (page_off == (tp->nvram_pagesize - 4)) nvram_cmd |= NVRAM_CMD_LAST; if (i == (len - 4)) nvram_cmd |= NVRAM_CMD_LAST; if ((nvram_cmd & NVRAM_CMD_FIRST) || !tg3_flag(tp, FLASH) || !tg3_flag(tp, 57765_PLUS)) tw32(NVRAM_ADDR, phy_addr); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && !tg3_flag(tp, 5755_PLUS) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { u32 cmd; cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; ret = tg3_nvram_exec_cmd(tp, cmd); if (ret) break; } if (!tg3_flag(tp, FLASH)) { /* We always do complete word writes to eeprom. */ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); } ret = tg3_nvram_exec_cmd(tp, nvram_cmd); if (ret) break; } return ret; } /* offset and length are dword aligned */ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { int ret; if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & ~GRC_LCLCTRL_GPIO_OUTPUT1); udelay(40); } if (!tg3_flag(tp, NVRAM)) { ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); } else { u32 grc_mode; ret = tg3_nvram_lock(tp); if (ret) return ret; tg3_enable_nvram_access(tp); if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { ret = tg3_nvram_write_block_buffered(tp, offset, len, buf); } else { ret = tg3_nvram_write_block_unbuffered(tp, offset, len, buf); } grc_mode = tr32(GRC_MODE); tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); tg3_disable_nvram_access(tp); tg3_nvram_unlock(tp); } if (tg3_flag(tp, EEPROM_WRITE_PROT)) { tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(40); } return ret; } #define RX_CPU_SCRATCH_BASE 0x30000 #define RX_CPU_SCRATCH_SIZE 0x04000 #define TX_CPU_SCRATCH_BASE 0x34000 #define TX_CPU_SCRATCH_SIZE 0x04000 /* tp->lock is held. */ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) { int i; BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); return 0; } if (offset == RX_CPU_BASE) { for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); tw32(offset + CPU_MODE, CPU_MODE_HALT); if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) break; } tw32(offset + CPU_STATE, 0xffffffff); tw32_f(offset + CPU_MODE, CPU_MODE_HALT); udelay(10); } else { for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); tw32(offset + CPU_MODE, CPU_MODE_HALT); if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) break; } } if (i >= 10000) { netdev_err(tp->dev, "%s timed out, %s CPU\n", __func__, offset == RX_CPU_BASE ? "RX" : "TX"); return -ENODEV; } /* Clear firmware's nvram arbitration. */ if (tg3_flag(tp, NVRAM)) tw32(NVRAM_SWARB, SWARB_REQ_CLR0); return 0; } struct fw_info { unsigned int fw_base; unsigned int fw_len; const __be32 *fw_data; }; /* tp->lock is held. */ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, int cpu_scratch_size, struct fw_info *info) { int err, lock_err, i; void (*write_op)(struct tg3 *, u32, u32); if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", __func__); return -EINVAL; } if (tg3_flag(tp, 5705_PLUS)) write_op = tg3_write_mem; else write_op = tg3_write_indirect_reg32; /* It is possible that bootcode is still loading at this point. * Get the nvram lock first before halting the cpu. */ lock_err = tg3_nvram_lock(tp); err = tg3_halt_cpu(tp, cpu_base); if (!lock_err) tg3_nvram_unlock(tp); if (err) goto out; for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) write_op(tp, cpu_scratch_base + i, 0); tw32(cpu_base + CPU_STATE, 0xffffffff); tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); for (i = 0; i < (info->fw_len / sizeof(u32)); i++) write_op(tp, (cpu_scratch_base + (info->fw_base & 0xffff) + (i * sizeof(u32))), be32_to_cpu(info->fw_data[i])); err = 0; out: return err; } /* tp->lock is held. */ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) { struct fw_info info; const __be32 *fw_data; int err, i; fw_data = (void *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. length = end_address_of_bss - start_address_of_text. Remainder is the blob to be loaded contiguously from start address. */ info.fw_base = be32_to_cpu(fw_data[1]); info.fw_len = tp->fw->size - 12; info.fw_data = &fw_data[3]; err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, &info); if (err) return err; err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, &info); if (err) return err; /* Now startup only the RX cpu. */ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); for (i = 0; i < 5; i++) { if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) break; tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); udelay(1000); } if (i >= 5) { netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " "should be %08x\n", __func__, tr32(RX_CPU_BASE + CPU_PC), info.fw_base); return -ENODEV; } tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); return 0; } /* tp->lock is held. */ static int tg3_load_tso_firmware(struct tg3 *tp) { struct fw_info info; const __be32 *fw_data; unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; int err, i; if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) return 0; fw_data = (void *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. length = end_address_of_bss - start_address_of_text. Remainder is the blob to be loaded contiguously from start address. */ info.fw_base = be32_to_cpu(fw_data[1]); cpu_scratch_size = tp->fw_len; info.fw_len = tp->fw->size - 12; info.fw_data = &fw_data[3]; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { cpu_base = RX_CPU_BASE; cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; } else { cpu_base = TX_CPU_BASE; cpu_scratch_base = TX_CPU_SCRATCH_BASE; cpu_scratch_size = TX_CPU_SCRATCH_SIZE; } err = tg3_load_firmware_cpu(tp, cpu_base, cpu_scratch_base, cpu_scratch_size, &info); if (err) return err; /* Now startup the cpu. */ tw32(cpu_base + CPU_STATE, 0xffffffff); tw32_f(cpu_base + CPU_PC, info.fw_base); for (i = 0; i < 5; i++) { if (tr32(cpu_base + CPU_PC) == info.fw_base) break; tw32(cpu_base + CPU_STATE, 0xffffffff); tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); tw32_f(cpu_base + CPU_PC, info.fw_base); udelay(1000); } if (i >= 5) { netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", __func__, tr32(cpu_base + CPU_PC), info.fw_base); return -ENODEV; } tw32(cpu_base + CPU_STATE, 0xffffffff); tw32_f(cpu_base + CPU_MODE, 0x00000000); return 0; } /* tp->lock is held. */ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) { u32 addr_high, addr_low; int i; addr_high = ((tp->dev->dev_addr[0] << 8) | tp->dev->dev_addr[1]); addr_low = ((tp->dev->dev_addr[2] << 24) | (tp->dev->dev_addr[3] << 16) | (tp->dev->dev_addr[4] << 8) | (tp->dev->dev_addr[5] << 0)); for (i = 0; i < 4; i++) { if (i == 1 && skip_mac_1) continue; tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { for (i = 0; i < 12; i++) { tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); } } addr_high = (tp->dev->dev_addr[0] + tp->dev->dev_addr[1] + tp->dev->dev_addr[2] + tp->dev->dev_addr[3] + tp->dev->dev_addr[4] + tp->dev->dev_addr[5]) & TX_BACKOFF_SEED_MASK; tw32(MAC_TX_BACKOFF_SEED, addr_high); } static void tg3_enable_register_access(struct tg3 *tp) { /* * Make sure register accesses (indirect or otherwise) will function * correctly. */ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); } static int tg3_power_up(struct tg3 *tp) { int err; tg3_enable_register_access(tp); err = pci_set_power_state(tp->pdev, PCI_D0); if (!err) { /* Switch out of Vaux if it is a NIC */ tg3_pwrsrc_switch_to_vmain(tp); } else { netdev_err(tp->dev, "Transition to D0 failed\n"); } return err; } static int tg3_setup_phy(struct tg3 *, int); static int tg3_power_down_prepare(struct tg3 *tp) { u32 misc_host_ctrl; bool device_should_wake, do_low_power; tg3_enable_register_access(tp); /* Restore the CLKREQ setting. */ if (tg3_flag(tp, CLKREQ_BUG)) { u16 lnkctl; pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, &lnkctl); lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, lnkctl); } misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); tw32(TG3PCI_MISC_HOST_CTRL, misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); device_should_wake = device_may_wakeup(&tp->pdev->dev) && tg3_flag(tp, WOL_ENABLE); if (tg3_flag(tp, USE_PHYLIB)) { do_low_power = false; if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { struct phy_device *phydev; u32 phyid, advertising; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; tp->link_config.speed = phydev->speed; tp->link_config.duplex = phydev->duplex; tp->link_config.autoneg = phydev->autoneg; tp->link_config.advertising = phydev->advertising; advertising = ADVERTISED_TP | ADVERTISED_Pause | ADVERTISED_Autoneg | ADVERTISED_10baseT_Half; if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { if (tg3_flag(tp, WOL_SPEED_100MB)) advertising |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Full; else advertising |= ADVERTISED_10baseT_Full; } phydev->advertising = advertising; phy_start_aneg(phydev); phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; if (phyid != PHY_ID_BCMAC131) { phyid &= PHY_BCM_OUI_MASK; if (phyid == PHY_BCM_OUI_1 || phyid == PHY_BCM_OUI_2 || phyid == PHY_BCM_OUI_3) do_low_power = true; } } } else { do_low_power = true; if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) tg3_setup_phy(tp, 0); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { u32 val; val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); } else if (!tg3_flag(tp, ENABLE_ASF)) { int i; u32 val; for (i = 0; i < 200; i++) { tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; msleep(1); } } if (tg3_flag(tp, WOL_CAP)) tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | WOL_DRV_STATE_SHUTDOWN | WOL_DRV_WOL | WOL_SET_MAGIC_PKT); if (device_should_wake) { u32 mac_mode; if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { if (do_low_power && !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, MII_TG3_AUXCTL_PCTL_WOL_EN | MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); udelay(40); } if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) mac_mode = MAC_MODE_PORT_MODE_GMII; else mac_mode = MAC_MODE_PORT_MODE_MII; mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? SPEED_100 : SPEED_10; if (tg3_5700_link_polarity(tp, speed)) mac_mode |= MAC_MODE_LINK_POLARITY; else mac_mode &= ~MAC_MODE_LINK_POLARITY; } } else { mac_mode = MAC_MODE_PORT_MODE_TBI; } if (!tg3_flag(tp, 5750_PLUS)) tw32(MAC_LED_CTRL, tp->led_ctrl); mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; if (tg3_flag(tp, ENABLE_APE)) mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN | MAC_MODE_TDE_ENABLE; tw32_f(MAC_MODE, mac_mode); udelay(100); tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); udelay(10); } if (!tg3_flag(tp, WOL_SPEED_100MB) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 base_val; base_val = tp->pci_clock_ctrl; base_val |= (CLOCK_CTRL_RXCLK_DISABLE | CLOCK_CTRL_TXCLK_DISABLE); tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | CLOCK_CTRL_PWRDOWN_PLL133, 40); } else if (tg3_flag(tp, 5780_CLASS) || tg3_flag(tp, CPMU_PRESENT) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { /* do nothing */ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { u32 newbits1, newbits2; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_ALTCLK); newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; } else if (tg3_flag(tp, 5705_PLUS)) { newbits1 = CLOCK_CTRL_625_CORE; newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; } else { newbits1 = CLOCK_CTRL_ALTCLK; newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; } tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 40); tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 40); if (!tg3_flag(tp, 5705_PLUS)) { u32 newbits3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | CLOCK_CTRL_TXCLK_DISABLE | CLOCK_CTRL_44MHZ_CORE); } else { newbits3 = CLOCK_CTRL_44MHZ_CORE; } tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits3, 40); } } if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) tg3_power_down_phy(tp, do_low_power); tg3_frob_aux_power(tp, true); /* Workaround for unstable PLL clock */ if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { u32 val = tr32(0x7d00); val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); tw32(0x7d00, val); if (!tg3_flag(tp, ENABLE_ASF)) { int err; err = tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); if (!err) tg3_nvram_unlock(tp); } } tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); return 0; } static void tg3_power_down(struct tg3 *tp) { tg3_power_down_prepare(tp); pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); pci_set_power_state(tp->pdev, PCI_D3hot); } static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) { switch (val & MII_TG3_AUX_STAT_SPDMASK) { case MII_TG3_AUX_STAT_10HALF: *speed = SPEED_10; *duplex = DUPLEX_HALF; break; case MII_TG3_AUX_STAT_10FULL: *speed = SPEED_10; *duplex = DUPLEX_FULL; break; case MII_TG3_AUX_STAT_100HALF: *speed = SPEED_100; *duplex = DUPLEX_HALF; break; case MII_TG3_AUX_STAT_100FULL: *speed = SPEED_100; *duplex = DUPLEX_FULL; break; case MII_TG3_AUX_STAT_1000HALF: *speed = SPEED_1000; *duplex = DUPLEX_HALF; break; case MII_TG3_AUX_STAT_1000FULL: *speed = SPEED_1000; *duplex = DUPLEX_FULL; break; default: if (tp->phy_flags & TG3_PHYFLG_IS_FET) { *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : SPEED_10; *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : DUPLEX_HALF; break; } *speed = SPEED_UNKNOWN; *duplex = DUPLEX_UNKNOWN; break; } } static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) { int err = 0; u32 val, new_adv; new_adv = ADVERTISE_CSMA; new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; new_adv |= mii_advertise_flowctrl(flowctrl); err = tg3_writephy(tp, MII_ADVERTISE, new_adv); if (err) goto done; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; err = tg3_writephy(tp, MII_CTRL1000, new_adv); if (err) goto done; } if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) goto done; tw32(TG3_CPMU_EEE_MODE, tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); if (!err) { u32 err2; val = 0; /* Advertise 100-BaseTX EEE ability */ if (advertise & ADVERTISED_100baseT_Full) val |= MDIO_AN_EEE_ADV_100TX; /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { case ASIC_REV_5717: case ASIC_REV_57765: case ASIC_REV_57766: case ASIC_REV_5719: /* If we advertised any eee advertisements above... */ if (val) val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO | MII_TG3_DSP_TAP26_OPCSINPT; tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); /* Fall through */ case ASIC_REV_5720: if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | MII_TG3_DSP_CH34TP2_HIBW01); } err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); if (!err) err = err2; } done: return err; } static void tg3_phy_copper_begin(struct tg3 *tp) { if (tp->link_config.autoneg == AUTONEG_ENABLE || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { u32 adv, fc; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; if (tg3_flag(tp, WOL_SPEED_100MB)) adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; fc = FLOW_CTRL_TX | FLOW_CTRL_RX; } else { adv = tp->link_config.advertising; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) adv &= ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); fc = tp->link_config.flowctrl; } tg3_phy_autoneg_cfg(tp, adv, fc); tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); } else { int i; u32 bmcr, orig_bmcr; tp->link_config.active_speed = tp->link_config.speed; tp->link_config.active_duplex = tp->link_config.duplex; bmcr = 0; switch (tp->link_config.speed) { default: case SPEED_10: break; case SPEED_100: bmcr |= BMCR_SPEED100; break; case SPEED_1000: bmcr |= BMCR_SPEED1000; break; } if (tp->link_config.duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && (bmcr != orig_bmcr)) { tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); for (i = 0; i < 1500; i++) { u32 tmp; udelay(10); if (tg3_readphy(tp, MII_BMSR, &tmp) || tg3_readphy(tp, MII_BMSR, &tmp)) continue; if (!(tmp & BMSR_LSTATUS)) { udelay(40); break; } } tg3_writephy(tp, MII_BMCR, bmcr); udelay(40); } } } static int tg3_init_5401phy_dsp(struct tg3 *tp) { int err; /* Turn off tap power management. */ /* Set Extended packet length bit */ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); err |= tg3_phydsp_write(tp, 0x0012, 0x1804); err |= tg3_phydsp_write(tp, 0x0013, 0x1204); err |= tg3_phydsp_write(tp, 0x8006, 0x0132); err |= tg3_phydsp_write(tp, 0x8006, 0x0232); err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); udelay(40); return err; } static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) { u32 advmsk, tgtadv, advertising; advertising = tp->link_config.advertising; tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; advmsk = ADVERTISE_ALL; if (tp->link_config.active_duplex == DUPLEX_FULL) { tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; } if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) return false; if ((*lcladv & advmsk) != tgtadv) return false; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) return false; if (tgtadv && (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) { tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); } else { tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); } if (tg3_ctrl != tgtadv) return false; } return true; } static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) { u32 lpeth = 0; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 val; if (tg3_readphy(tp, MII_STAT1000, &val)) return false; lpeth = mii_stat1000_to_ethtool_lpa_t(val); } if (tg3_readphy(tp, MII_LPA, rmtadv)) return false; lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); tp->link_config.rmt_adv = lpeth; return true; } static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) { int current_link_up; u32 bmsr, val; u32 lcl_adv, rmt_adv; u16 current_speed; u8 current_duplex; int i, err; tw32(MAC_EVENT, 0); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | MAC_STATUS_MI_COMPLETION | MAC_STATUS_LNKSTATE_CHANGED)); udelay(40); if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); udelay(80); } tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); /* Some third-party PHYs need to be reset on link going * down. */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && netif_carrier_ok(tp->dev)) { tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && !(bmsr & BMSR_LSTATUS)) force_reset = 1; } if (force_reset) tg3_phy_reset(tp); if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { tg3_readphy(tp, MII_BMSR, &bmsr); if (tg3_readphy(tp, MII_BMSR, &bmsr) || !tg3_flag(tp, INIT_COMPLETE)) bmsr = 0; if (!(bmsr & BMSR_LSTATUS)) { err = tg3_init_5401phy_dsp(tp); if (err) return err; tg3_readphy(tp, MII_BMSR, &bmsr); for (i = 0; i < 1000; i++) { udelay(10); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) { udelay(40); break; } } if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == TG3_PHY_REV_BCM5401_B0 && !(bmsr & BMSR_LSTATUS) && tp->link_config.active_speed == SPEED_1000) { err = tg3_phy_reset(tp); if (!err) err = tg3_init_5401phy_dsp(tp); if (err) return err; } } } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { /* 5701 {A0,B0} CRC bug workaround */ tg3_writephy(tp, 0x15, 0x0a75); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); } /* Clear pending interrupts... */ tg3_readphy(tp, MII_TG3_ISTAT, &val); tg3_readphy(tp, MII_TG3_ISTAT, &val); if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_LNK3_LED_MODE); else tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); } current_link_up = 0; current_speed = SPEED_UNKNOWN; current_duplex = DUPLEX_UNKNOWN; tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; tp->link_config.rmt_adv = 0; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISCTEST, &val); if (!err && !(val & (1 << 10))) { tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISCTEST, val | (1 << 10)); goto relink; } } bmsr = 0; for (i = 0; i < 100; i++) { tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) break; udelay(40); } if (bmsr & BMSR_LSTATUS) { u32 aux_stat, bmcr; tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); for (i = 0; i < 2000; i++) { udelay(10); if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && aux_stat) break; } tg3_aux_stat_to_speed_duplex(tp, aux_stat, &current_speed, &current_duplex); bmcr = 0; for (i = 0; i < 200; i++) { tg3_readphy(tp, MII_BMCR, &bmcr); if (tg3_readphy(tp, MII_BMCR, &bmcr)) continue; if (bmcr && bmcr != 0x7fff) break; udelay(10); } lcl_adv = 0; rmt_adv = 0; tp->link_config.active_speed = current_speed; tp->link_config.active_duplex = current_duplex; if (tp->link_config.autoneg == AUTONEG_ENABLE) { if ((bmcr & BMCR_ANENABLE) && tg3_phy_copper_an_config_ok(tp, &lcl_adv) && tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) current_link_up = 1; } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && tp->link_config.duplex == current_duplex && tp->link_config.flowctrl == tp->link_config.active_flowctrl) { current_link_up = 1; } } if (current_link_up == 1 && tp->link_config.active_duplex == DUPLEX_FULL) { u32 reg, bit; if (tp->phy_flags & TG3_PHYFLG_IS_FET) { reg = MII_TG3_FET_GEN_STAT; bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; } else { reg = MII_TG3_EXT_STAT; bit = MII_TG3_EXT_STAT_MDIX; } if (!tg3_readphy(tp, reg, &val) && (val & bit)) tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; tg3_setup_flow_control(tp, lcl_adv, rmt_adv); } } relink: if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { tg3_phy_copper_begin(tp); tg3_readphy(tp, MII_BMSR, &bmsr); if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) current_link_up = 1; } tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; if (current_link_up == 1) { if (tp->link_config.active_speed == SPEED_100 || tp->link_config.active_speed == SPEED_10) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; if (tp->link_config.active_duplex == DUPLEX_HALF) tp->mac_mode |= MAC_MODE_HALF_DUPLEX; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { if (current_link_up == 1 && tg3_5700_link_polarity(tp, tp->link_config.active_speed)) tp->mac_mode |= MAC_MODE_LINK_POLARITY; else tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } /* ??? Without this setting Netgear GA302T PHY does not * ??? send/receive packets... */ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); } tw32_f(MAC_MODE, tp->mac_mode); udelay(40); tg3_phy_eee_adjust(tp, current_link_up); if (tg3_flag(tp, USE_LINKCHG_REG)) { /* Polled via timer. */ tw32_f(MAC_EVENT, 0); } else { tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); } udelay(40); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && current_link_up == 1 && tp->link_config.active_speed == SPEED_1000 && (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { udelay(120); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)); udelay(40); tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, NIC_SRAM_FIRMWARE_MBOX_MAGIC2); } /* Prevent send BD corruption. */ if (tg3_flag(tp, CLKREQ_BUG)) { u16 oldlnkctl, newlnkctl; pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, &oldlnkctl); if (tp->link_config.active_speed == SPEED_100 || tp->link_config.active_speed == SPEED_10) newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; else newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; if (newlnkctl != oldlnkctl) pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, newlnkctl); } if (current_link_up != netif_carrier_ok(tp->dev)) { if (current_link_up) netif_carrier_on(tp->dev); else netif_carrier_off(tp->dev); tg3_link_report(tp); } return 0; } struct tg3_fiber_aneginfo { int state; #define ANEG_STATE_UNKNOWN 0 #define ANEG_STATE_AN_ENABLE 1 #define ANEG_STATE_RESTART_INIT 2 #define ANEG_STATE_RESTART 3 #define ANEG_STATE_DISABLE_LINK_OK 4 #define ANEG_STATE_ABILITY_DETECT_INIT 5 #define ANEG_STATE_ABILITY_DETECT 6 #define ANEG_STATE_ACK_DETECT_INIT 7 #define ANEG_STATE_ACK_DETECT 8 #define ANEG_STATE_COMPLETE_ACK_INIT 9 #define ANEG_STATE_COMPLETE_ACK 10 #define ANEG_STATE_IDLE_DETECT_INIT 11 #define ANEG_STATE_IDLE_DETECT 12 #define ANEG_STATE_LINK_OK 13 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 #define ANEG_STATE_NEXT_PAGE_WAIT 15 u32 flags; #define MR_AN_ENABLE 0x00000001 #define MR_RESTART_AN 0x00000002 #define MR_AN_COMPLETE 0x00000004 #define MR_PAGE_RX 0x00000008 #define MR_NP_LOADED 0x00000010 #define MR_TOGGLE_TX 0x00000020 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 #define MR_LP_ADV_SYM_PAUSE 0x00000100 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 #define MR_LP_ADV_NEXT_PAGE 0x00001000 #define MR_TOGGLE_RX 0x00002000 #define MR_NP_RX 0x00004000 #define MR_LINK_OK 0x80000000 unsigned long link_time, cur_time; u32 ability_match_cfg; int ability_match_count; char ability_match, idle_match, ack_match; u32 txconfig, rxconfig; #define ANEG_CFG_NP 0x00000080 #define ANEG_CFG_ACK 0x00000040 #define ANEG_CFG_RF2 0x00000020 #define ANEG_CFG_RF1 0x00000010 #define ANEG_CFG_PS2 0x00000001 #define ANEG_CFG_PS1 0x00008000 #define ANEG_CFG_HD 0x00004000 #define ANEG_CFG_FD 0x00002000 #define ANEG_CFG_INVAL 0x00001f06 }; #define ANEG_OK 0 #define ANEG_DONE 1 #define ANEG_TIMER_ENAB 2 #define ANEG_FAILED -1 #define ANEG_STATE_SETTLE_TIME 10000 static int tg3_fiber_aneg_smachine(struct tg3 *tp, struct tg3_fiber_aneginfo *ap) { u16 flowctrl; unsigned long delta; u32 rx_cfg_reg; int ret; if (ap->state == ANEG_STATE_UNKNOWN) { ap->rxconfig = 0; ap->link_time = 0; ap->cur_time = 0; ap->ability_match_cfg = 0; ap->ability_match_count = 0; ap->ability_match = 0; ap->idle_match = 0; ap->ack_match = 0; } ap->cur_time++; if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); if (rx_cfg_reg != ap->ability_match_cfg) { ap->ability_match_cfg = rx_cfg_reg; ap->ability_match = 0; ap->ability_match_count = 0; } else { if (++ap->ability_match_count > 1) { ap->ability_match = 1; ap->ability_match_cfg = rx_cfg_reg; } } if (rx_cfg_reg & ANEG_CFG_ACK) ap->ack_match = 1; else ap->ack_match = 0; ap->idle_match = 0; } else { ap->idle_match = 1; ap->ability_match_cfg = 0; ap->ability_match_count = 0; ap->ability_match = 0; ap->ack_match = 0; rx_cfg_reg = 0; } ap->rxconfig = rx_cfg_reg; ret = ANEG_OK; switch (ap->state) { case ANEG_STATE_UNKNOWN: if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; /* fallthru */ case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { ap->link_time = 0; ap->cur_time = 0; ap->ability_match_cfg = 0; ap->ability_match_count = 0; ap->ability_match = 0; ap->idle_match = 0; ap->ack_match = 0; ap->state = ANEG_STATE_RESTART_INIT; } else { ap->state = ANEG_STATE_DISABLE_LINK_OK; } break; case ANEG_STATE_RESTART_INIT: ap->link_time = ap->cur_time; ap->flags &= ~(MR_NP_LOADED); ap->txconfig = 0; tw32(MAC_TX_AUTO_NEG, 0); tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; /* fallthru */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) ap->state = ANEG_STATE_ABILITY_DETECT_INIT; else ret = ANEG_TIMER_ENAB; break; case ANEG_STATE_DISABLE_LINK_OK: ret = ANEG_DONE; break; case ANEG_STATE_ABILITY_DETECT_INIT: ap->flags &= ~(MR_TOGGLE_TX); ap->txconfig = ANEG_CFG_FD; flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); if (flowctrl & ADVERTISE_1000XPAUSE) ap->txconfig |= ANEG_CFG_PS1; if (flowctrl & ADVERTISE_1000XPSE_ASYM) ap->txconfig |= ANEG_CFG_PS2; tw32(MAC_TX_AUTO_NEG, ap->txconfig); tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); ap->state = ANEG_STATE_ABILITY_DETECT; break; case ANEG_STATE_ABILITY_DETECT: if (ap->ability_match != 0 && ap->rxconfig != 0) ap->state = ANEG_STATE_ACK_DETECT_INIT; break; case ANEG_STATE_ACK_DETECT_INIT: ap->txconfig |= ANEG_CFG_ACK; tw32(MAC_TX_AUTO_NEG, ap->txconfig); tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); ap->state = ANEG_STATE_ACK_DETECT; /* fallthru */ case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { ap->state = ANEG_STATE_COMPLETE_ACK_INIT; } else { ap->state = ANEG_STATE_AN_ENABLE; } } else if (ap->ability_match != 0 && ap->rxconfig == 0) { ap->state = ANEG_STATE_AN_ENABLE; } break; case ANEG_STATE_COMPLETE_ACK_INIT: if (ap->rxconfig & ANEG_CFG_INVAL) { ret = ANEG_FAILED; break; } ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | MR_LP_ADV_HALF_DUPLEX | MR_LP_ADV_SYM_PAUSE | MR_LP_ADV_ASYM_PAUSE | MR_LP_ADV_REMOTE_FAULT1 | MR_LP_ADV_REMOTE_FAULT2 | MR_LP_ADV_NEXT_PAGE | MR_TOGGLE_RX | MR_NP_RX); if (ap->rxconfig & ANEG_CFG_FD) ap->flags |= MR_LP_ADV_FULL_DUPLEX; if (ap->rxconfig & ANEG_CFG_HD) ap->flags |= MR_LP_ADV_HALF_DUPLEX; if (ap->rxconfig & ANEG_CFG_PS1) ap->flags |= MR_LP_ADV_SYM_PAUSE; if (ap->rxconfig & ANEG_CFG_PS2) ap->flags |= MR_LP_ADV_ASYM_PAUSE; if (ap->rxconfig & ANEG_CFG_RF1) ap->flags |= MR_LP_ADV_REMOTE_FAULT1; if (ap->rxconfig & ANEG_CFG_RF2) ap->flags |= MR_LP_ADV_REMOTE_FAULT2; if (ap->rxconfig & ANEG_CFG_NP) ap->flags |= MR_LP_ADV_NEXT_PAGE; ap->link_time = ap->cur_time; ap->flags ^= (MR_TOGGLE_TX); if (ap->rxconfig & 0x0008) ap->flags |= MR_TOGGLE_RX; if (ap->rxconfig & ANEG_CFG_NP) ap->flags |= MR_NP_RX; ap->flags |= MR_PAGE_RX; ap->state = ANEG_STATE_COMPLETE_ACK; ret = ANEG_TIMER_ENAB; break; case ANEG_STATE_COMPLETE_ACK: if (ap->ability_match != 0 && ap->rxconfig == 0) { ap->state = ANEG_STATE_AN_ENABLE; break; } delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) { if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { ap->state = ANEG_STATE_IDLE_DETECT_INIT; } else { if ((ap->txconfig & ANEG_CFG_NP) == 0 && !(ap->flags & MR_NP_RX)) { ap->state = ANEG_STATE_IDLE_DETECT_INIT; } else { ret = ANEG_FAILED; } } } break; case ANEG_STATE_IDLE_DETECT_INIT: ap->link_time = ap->cur_time; tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); ap->state = ANEG_STATE_IDLE_DETECT; ret = ANEG_TIMER_ENAB; break; case ANEG_STATE_IDLE_DETECT: if (ap->ability_match != 0 && ap->rxconfig == 0) { ap->state = ANEG_STATE_AN_ENABLE; break; } delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) { /* XXX another gem from the Broadcom driver :( */ ap->state = ANEG_STATE_LINK_OK; } break; case ANEG_STATE_LINK_OK: ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); ret = ANEG_DONE; break; case ANEG_STATE_NEXT_PAGE_WAIT_INIT: /* ??? unimplemented */ break; case ANEG_STATE_NEXT_PAGE_WAIT: /* ??? unimplemented */ break; default: ret = ANEG_FAILED; break; } return ret; } static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) { int res = 0; struct tg3_fiber_aneginfo aninfo; int status = ANEG_FAILED; unsigned int tick; u32 tmp; tw32_f(MAC_TX_AUTO_NEG, 0); tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); udelay(40); tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); udelay(40); memset(&aninfo, 0, sizeof(aninfo)); aninfo.flags |= MR_AN_ENABLE; aninfo.state = ANEG_STATE_UNKNOWN; aninfo.cur_time = 0; tick = 0; while (++tick < 195000) { status = tg3_fiber_aneg_smachine(tp, &aninfo); if (status == ANEG_DONE || status == ANEG_FAILED) break; udelay(1); } tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); *txflags = aninfo.txconfig; *rxflags = aninfo.flags; if (status == ANEG_DONE && (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | MR_LP_ADV_FULL_DUPLEX))) res = 1; return res; } static void tg3_init_bcm8002(struct tg3 *tp) { u32 mac_status = tr32(MAC_STATUS); int i; /* Reset when initting first time or we have a link. */ if (tg3_flag(tp, INIT_COMPLETE) && !(mac_status & MAC_STATUS_PCS_SYNCED)) return; /* Set PLL lock range. */ tg3_writephy(tp, 0x16, 0x8007); /* SW reset */ tg3_writephy(tp, MII_BMCR, BMCR_RESET); /* Wait for reset to complete. */ /* XXX schedule_timeout() ... */ for (i = 0; i < 500; i++) udelay(10); /* Config mode; select PMA/Ch 1 regs. */ tg3_writephy(tp, 0x10, 0x8411); /* Enable auto-lock and comdet, select txclk for tx. */ tg3_writephy(tp, 0x11, 0x0a10); tg3_writephy(tp, 0x18, 0x00a0); tg3_writephy(tp, 0x16, 0x41ff); /* Assert and deassert POR. */ tg3_writephy(tp, 0x13, 0x0400); udelay(40); tg3_writephy(tp, 0x13, 0x0000); tg3_writephy(tp, 0x11, 0x0a50); udelay(40); tg3_writephy(tp, 0x11, 0x0a10); /* Wait for signal to stabilize */ /* XXX schedule_timeout() ... */ for (i = 0; i < 15000; i++) udelay(10); /* Deselect the channel register so we can read the PHYID * later. */ tg3_writephy(tp, 0x10, 0x8011); } static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) { u16 flowctrl; u32 sg_dig_ctrl, sg_dig_status; u32 serdes_cfg, expected_sg_dig_ctrl; int workaround, port_a; int current_link_up; serdes_cfg = 0; expected_sg_dig_ctrl = 0; workaround = 0; port_a = 1; current_link_up = 0; if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { workaround = 1; if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) port_a = 0; /* preserve bits 0-11,13,14 for signal pre-emphasis */ /* preserve bits 20-23 for voltage regulator */ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; } sg_dig_ctrl = tr32(SG_DIG_CTRL); if (tp->link_config.autoneg != AUTONEG_ENABLE) { if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { if (workaround) { u32 val = serdes_cfg; if (port_a) val |= 0xc010000; else val |= 0x4010000; tw32_f(MAC_SERDES_CFG, val); } tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); } if (mac_status & MAC_STATUS_PCS_SYNCED) { tg3_setup_flow_control(tp, 0, 0); current_link_up = 1; } goto out; } /* Want auto-negotiation. */ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); if (flowctrl & ADVERTISE_1000XPAUSE) expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; if (flowctrl & ADVERTISE_1000XPSE_ASYM) expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; if (sg_dig_ctrl != expected_sg_dig_ctrl) { if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && tp->serdes_counter && ((mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_RCVD_CFG)) == MAC_STATUS_PCS_SYNCED)) { tp->serdes_counter--; current_link_up = 1; goto out; } restart_autoneg: if (workaround) tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); udelay(5); tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET)) { sg_dig_status = tr32(SG_DIG_STATUS); mac_status = tr32(MAC_STATUS); if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && (mac_status & MAC_STATUS_PCS_SYNCED)) { u32 local_adv = 0, remote_adv = 0; if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) local_adv |= ADVERTISE_1000XPAUSE; if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) local_adv |= ADVERTISE_1000XPSE_ASYM; if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) remote_adv |= LPA_1000XPAUSE; if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; tp->link_config.rmt_adv = mii_adv_to_ethtool_adv_x(remote_adv); tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { if (tp->serdes_counter) tp->serdes_counter--; else { if (workaround) { u32 val = serdes_cfg; if (port_a) val |= 0xc010000; else val |= 0x4010000; tw32_f(MAC_SERDES_CFG, val); } tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); udelay(40); /* Link parallel detection - link is up */ /* only if we have PCS_SYNC and not */ /* receiving config code words */ mac_status = tr32(MAC_STATUS); if ((mac_status & MAC_STATUS_PCS_SYNCED) && !(mac_status & MAC_STATUS_RCVD_CFG)) { tg3_setup_flow_control(tp, 0, 0); current_link_up = 1; tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; tp->serdes_counter = SERDES_PARALLEL_DET_TIMEOUT; } else goto restart_autoneg; } } } else { tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } out: return current_link_up; } static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) { int current_link_up = 0; if (!(mac_status & MAC_STATUS_PCS_SYNCED)) goto out; if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 txflags, rxflags; int i; if (fiber_autoneg(tp, &txflags, &rxflags)) { u32 local_adv = 0, remote_adv = 0; if (txflags & ANEG_CFG_PS1) local_adv |= ADVERTISE_1000XPAUSE; if (txflags & ANEG_CFG_PS2) local_adv |= ADVERTISE_1000XPSE_ASYM; if (rxflags & MR_LP_ADV_SYM_PAUSE) remote_adv |= LPA_1000XPAUSE; if (rxflags & MR_LP_ADV_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; tp->link_config.rmt_adv = mii_adv_to_ethtool_adv_x(remote_adv); tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; } for (i = 0; i < 30; i++) { udelay(20); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)); udelay(40); if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)) == 0) break; } mac_status = tr32(MAC_STATUS); if (current_link_up == 0 && (mac_status & MAC_STATUS_PCS_SYNCED) && !(mac_status & MAC_STATUS_RCVD_CFG)) current_link_up = 1; } else { tg3_setup_flow_control(tp, 0, 0); /* Forcing 1000FD link up. */ current_link_up = 1; tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); udelay(40); tw32_f(MAC_MODE, tp->mac_mode); udelay(40); } out: return current_link_up; } static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) { u32 orig_pause_cfg; u16 orig_active_speed; u8 orig_active_duplex; u32 mac_status; int current_link_up; int i; orig_pause_cfg = tp->link_config.active_flowctrl; orig_active_speed = tp->link_config.active_speed; orig_active_duplex = tp->link_config.active_duplex; if (!tg3_flag(tp, HW_AUTONEG) && netif_carrier_ok(tp->dev) && tg3_flag(tp, INIT_COMPLETE)) { mac_status = tr32(MAC_STATUS); mac_status &= (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET | MAC_STATUS_CFG_CHANGED | MAC_STATUS_RCVD_CFG); if (mac_status == (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET)) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)); return 0; } } tw32_f(MAC_TX_AUTO_NEG, 0); tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); if (tp->phy_id == TG3_PHY_ID_BCM8002) tg3_init_bcm8002(tp); /* Enable link change event even when serdes polling. */ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); udelay(40); current_link_up = 0; tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); if (tg3_flag(tp, HW_AUTONEG)) current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); tp->napi[0].hw_status->status = (SD_STATUS_UPDATED | (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); for (i = 0; i < 100; i++) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED)); udelay(5); if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | MAC_STATUS_LNKSTATE_CHANGED)) == 0) break; } mac_status = tr32(MAC_STATUS); if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { current_link_up = 0; if (tp->link_config.autoneg == AUTONEG_ENABLE && tp->serdes_counter == 0) { tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); udelay(1); tw32_f(MAC_MODE, tp->mac_mode); } } if (current_link_up == 1) { tp->link_config.active_speed = SPEED_1000; tp->link_config.active_duplex = DUPLEX_FULL; tw32(MAC_LED_CTRL, (tp->led_ctrl | LED_CTRL_LNKLED_OVERRIDE | LED_CTRL_1000MBPS_ON)); } else { tp->link_config.active_speed = SPEED_UNKNOWN; tp->link_config.active_duplex = DUPLEX_UNKNOWN; tw32(MAC_LED_CTRL, (tp->led_ctrl | LED_CTRL_LNKLED_OVERRIDE | LED_CTRL_TRAFFIC_OVERRIDE)); } if (current_link_up != netif_carrier_ok(tp->dev)) { if (current_link_up) netif_carrier_on(tp->dev); else netif_carrier_off(tp->dev); tg3_link_report(tp); } else { u32 now_pause_cfg = tp->link_config.active_flowctrl; if (orig_pause_cfg != now_pause_cfg || orig_active_speed != tp->link_config.active_speed || orig_active_duplex != tp->link_config.active_duplex) tg3_link_report(tp); } return 0; } static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) { int current_link_up, err = 0; u32 bmsr, bmcr; u16 current_speed; u8 current_duplex; u32 local_adv, remote_adv; tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); tw32(MAC_EVENT, 0); tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | MAC_STATUS_MI_COMPLETION | MAC_STATUS_LNKSTATE_CHANGED)); udelay(40); if (force_reset) tg3_phy_reset(tp); current_link_up = 0; current_speed = SPEED_UNKNOWN; current_duplex = DUPLEX_UNKNOWN; tp->link_config.rmt_adv = 0; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) bmsr |= BMSR_LSTATUS; else bmsr &= ~BMSR_LSTATUS; } err |= tg3_readphy(tp, MII_BMCR, &bmcr); if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 adv, newadv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM | ADVERTISE_SLCT); newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { tg3_writephy(tp, MII_ADVERTISE, newadv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; return err; } } else { u32 new_bmcr; bmcr &= ~BMCR_SPEED1000; new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); if (tp->link_config.duplex == DUPLEX_FULL) new_bmcr |= BMCR_FULLDPLX; if (new_bmcr != bmcr) { /* BMCR_SPEED1000 is a reserved bit that needs * to be set on write. */ new_bmcr |= BMCR_SPEED1000; /* Force a linkdown */ if (netif_carrier_ok(tp->dev)) { u32 adv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | ADVERTISE_SLCT); tg3_writephy(tp, MII_ADVERTISE, adv); tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); udelay(10); netif_carrier_off(tp->dev); } tg3_writephy(tp, MII_BMCR, new_bmcr); bmcr = new_bmcr; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) bmsr |= BMSR_LSTATUS; else bmsr &= ~BMSR_LSTATUS; } tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } if (bmsr & BMSR_LSTATUS) { current_speed = SPEED_1000; current_link_up = 1; if (bmcr & BMCR_FULLDPLX) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; local_adv = 0; remote_adv = 0; if (bmcr & BMCR_ANENABLE) { u32 common; err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); err |= tg3_readphy(tp, MII_LPA, &remote_adv); common = local_adv & remote_adv; if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { if (common & ADVERTISE_1000XFULL) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; tp->link_config.rmt_adv = mii_adv_to_ethtool_adv_x(remote_adv); } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { current_link_up = 0; } } } if (current_link_up == 1 && current_duplex == DUPLEX_FULL) tg3_setup_flow_control(tp, local_adv, remote_adv); tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; if (tp->link_config.active_duplex == DUPLEX_HALF) tp->mac_mode |= MAC_MODE_HALF_DUPLEX; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); tp->link_config.active_speed = current_speed; tp->link_config.active_duplex = current_duplex; if (current_link_up != netif_carrier_ok(tp->dev)) { if (current_link_up) netif_carrier_on(tp->dev); else { netif_carrier_off(tp->dev); tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } tg3_link_report(tp); } return err; } static void tg3_serdes_parallel_detect(struct tg3 *tp) { if (tp->serdes_counter) { /* Give autoneg time to complete. */ tp->serdes_counter--; return; } if (!netif_carrier_ok(tp->dev) && (tp->link_config.autoneg == AUTONEG_ENABLE)) { u32 bmcr; tg3_readphy(tp, MII_BMCR, &bmcr); if (bmcr & BMCR_ANENABLE) { u32 phy1, phy2; /* Select shadow register 0x1f */ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); /* Select expansion interrupt status register */ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP1_INT_STAT); tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if ((phy1 & 0x10) && !(phy2 & 0x20)) { /* We have signal detect and not receiving * config code words, link is up by parallel * detection. */ bmcr &= ~BMCR_ANENABLE; bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; tg3_writephy(tp, MII_BMCR, bmcr); tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; } } } else if (netif_carrier_ok(tp->dev) && (tp->link_config.autoneg == AUTONEG_ENABLE) && (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { u32 phy2; /* Select expansion interrupt status register */ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP1_INT_STAT); tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if (phy2 & 0x20) { u32 bmcr; /* Config code words received, turn on autoneg. */ tg3_readphy(tp, MII_BMCR, &bmcr); tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } } static int tg3_setup_phy(struct tg3 *tp, int force_reset) { u32 val; int err; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) err = tg3_setup_fiber_phy(tp, force_reset); else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) err = tg3_setup_fiber_mii_phy(tp, force_reset); else err = tg3_setup_copper_phy(tp, force_reset); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { u32 scale; val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) scale = 65; else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) scale = 6; else scale = 12; val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); tw32(GRC_MISC_CFG, val); } val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) val |= tr32(MAC_TX_LENGTHS) & (TX_LENGTHS_JMB_FRM_LEN_MSK | TX_LENGTHS_CNT_DWN_VAL_MSK); if (tp->link_config.active_speed == SPEED_1000 && tp->link_config.active_duplex == DUPLEX_HALF) tw32(MAC_TX_LENGTHS, val | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); else tw32(MAC_TX_LENGTHS, val | (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); if (!tg3_flag(tp, 5705_PLUS)) { if (netif_carrier_ok(tp->dev)) { tw32(HOSTCC_STAT_COAL_TICKS, tp->coal.stats_block_coalesce_usecs); } else { tw32(HOSTCC_STAT_COAL_TICKS, 0); } } if (tg3_flag(tp, ASPM_WORKAROUND)) { val = tr32(PCIE_PWR_MGMT_THRESH); if (!netif_carrier_ok(tp->dev)) val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | tp->pwrmgmt_thresh; else val |= PCIE_PWR_MGMT_L1_THRESH_MSK; tw32(PCIE_PWR_MGMT_THRESH, val); } return err; } static inline int tg3_irq_sync(struct tg3 *tp) { return tp->irq_sync; } static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) { int i; dst = (u32 *)((u8 *)dst + off); for (i = 0; i < len; i += sizeof(u32)) *dst++ = tr32(off + i); } static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) { tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); if (tg3_flag(tp, SUPPORT_MSIX)) tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); if (!tg3_flag(tp, 5705_PLUS)) { tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); } tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); if (tg3_flag(tp, NVRAM)) tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); } static void tg3_dump_state(struct tg3 *tp) { int i; u32 *regs; regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); if (!regs) { netdev_err(tp->dev, "Failed allocating register dump buffer\n"); return; } if (tg3_flag(tp, PCI_EXPRESS)) { /* Read up to but not including private PCI registers */ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) regs[i / sizeof(u32)] = tr32(i); } else tg3_dump_legacy_regs(tp, regs); for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { if (!regs[i + 0] && !regs[i + 1] && !regs[i + 2] && !regs[i + 3]) continue; netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", i * 4, regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); } kfree(regs); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; /* SW status block */ netdev_err(tp->dev, "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", i, tnapi->hw_status->status, tnapi->hw_status->status_tag, tnapi->hw_status->rx_jumbo_consumer, tnapi->hw_status->rx_consumer, tnapi->hw_status->rx_mini_consumer, tnapi->hw_status->idx[0].rx_producer, tnapi->hw_status->idx[0].tx_consumer); netdev_err(tp->dev, "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", i, tnapi->last_tag, tnapi->last_irq_tag, tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, tnapi->rx_rcb_ptr, tnapi->prodring.rx_std_prod_idx, tnapi->prodring.rx_std_cons_idx, tnapi->prodring.rx_jmb_prod_idx, tnapi->prodring.rx_jmb_cons_idx); } } /* This is called whenever we suspect that the system chipset is re- * ordering the sequence of MMIO to the tx send mailbox. The symptom * is bogus tx completions. We try to recover by setting the * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later * in the workqueue. */ static void tg3_tx_recover(struct tg3 *tp) { BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || tp->write32_tx_mbox == tg3_write_indirect_mbox); netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O " "cycles to the network device, attempting to recover. " "Please report the problem to the driver maintainer " "and include system chipset information.\n"); spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); spin_unlock(&tp->lock); } static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { /* Tell compiler to fetch tx indices from memory. */ barrier(); return tnapi->tx_pending - ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. */ static void tg3_tx(struct tg3_napi *tnapi) { struct tg3 *tp = tnapi->tp; u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; u32 sw_idx = tnapi->tx_cons; struct netdev_queue *txq; int index = tnapi - tp->napi; unsigned int pkts_compl = 0, bytes_compl = 0; if (tg3_flag(tp, ENABLE_TSS)) index--; txq = netdev_get_tx_queue(tp->dev, index); while (sw_idx != hw_idx) { struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; if (unlikely(skb == NULL)) { tg3_tx_recover(tp); return; } pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); ri->skb = NULL; while (ri->fragmented) { ri->fragmented = false; sw_idx = NEXT_TX(sw_idx); ri = &tnapi->tx_buffers[sw_idx]; } sw_idx = NEXT_TX(sw_idx); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ri = &tnapi->tx_buffers[sw_idx]; if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1; pci_unmap_page(tp->pdev, dma_unmap_addr(ri, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_TODEVICE); while (ri->fragmented) { ri->fragmented = false; sw_idx = NEXT_TX(sw_idx); ri = &tnapi->tx_buffers[sw_idx]; } sw_idx = NEXT_TX(sw_idx); } pkts_compl++; bytes_compl += skb->len; dev_kfree_skb(skb); if (unlikely(tx_bug)) { tg3_tx_recover(tp); return; } } netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that tg3_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); if (unlikely(netif_tx_queue_stopped(txq) && (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) && (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } } static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { if (!ri->data) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); kfree(ri->data); ri->data = NULL; } /* Returns size of skb allocated or < 0 on error. * * We only need to fill in the address because the other members * of the RX descriptor are invariant, see tg3_init_rings. * * Note the purposeful assymetry of cpu vs. chip accesses. For * posting buffers we only dirty the first cache line of the RX * descriptor (containing the address). Whereas for the RX status * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; u8 *data; dma_addr_t mapping; int skb_size, data_size, dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; data_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; data_size = TG3_RX_JMB_MAP_SZ; break; default: return -EINVAL; } /* Do not overwrite any of the map or rp information * until we are sure we can commit to a new buffer. * * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc(skb_size, GFP_ATOMIC); if (!data) return -ENOMEM; mapping = pci_map_single(tp->pdev, data + TG3_RX_OFFSET(tp), data_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) { kfree(data); return -EIO; } map->data = data; dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); return data_size; } /* We only need to move over in the address because the other * members of the RX descriptor are invariant. See notes above * tg3_alloc_rx_data for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3_rx_prodring_set *dpr, u32 opaque_key, int src_idx, u32 dest_idx_unmasked) { struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; int dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; dest_desc = &dpr->rx_std[dest_idx]; dest_map = &dpr->rx_std_buffers[dest_idx]; src_desc = &spr->rx_std[src_idx]; src_map = &spr->rx_std_buffers[src_idx]; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; dest_desc = &dpr->rx_jmb[dest_idx].std; dest_map = &dpr->rx_jmb_buffers[dest_idx]; src_desc = &spr->rx_jmb[src_idx].std; src_map = &spr->rx_jmb_buffers[src_idx]; break; default: return; } dest_map->data = src_map->data; dma_unmap_addr_set(dest_map, mapping, dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; dest_desc->addr_lo = src_desc->addr_lo; /* Ensure that the update to the skb happens after the physical * addresses have been transferred to the new BD location. */ smp_wmb(); src_map->data = NULL; } /* The RX ring scheme is composed of multiple rings which post fresh * buffers to the chip, and one special ring the chip uses to report * status back to the host. * * The special ring reports the status of received packets to the * host. The chip does not write into the original descriptor the * RX buffer was obtained from. The chip simply takes the original * descriptor as provided by the host, updates the status and length * field, then writes this into the next status ring entry. * * Each ring the host uses to post buffers to the chip is described * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, * it is first placed into the on-chip ram. When the packet's length * is known, it walks down the TG3_BDINFO entries to select the ring. * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO * which is within the range of the new packet's length is chosen. * * The "separate ring for rx status" scheme may sound queer, but it makes * sense from a cache coherency perspective. If only the host writes * to the buffer post rings, and only the chip writes to the rx status * rings, then cache lines never move beyond shared-modified state. * If both the host and chip were to write into the same ring, cache line * eviction could occur since both entities want it in an exclusive state. */ static int tg3_rx(struct tg3_napi *tnapi, int budget) { struct tg3 *tp = tnapi->tp; u32 work_mask, rx_std_posted = 0; u32 std_prod_idx, jmb_prod_idx; u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; struct tg3_rx_prodring_set *tpr = &tnapi->prodring; hw_idx = *(tnapi->rx_rcb_prod_idx); /* * We need to order the read of hw_idx and the read of * the opaque cookie. */ rmb(); work_mask = 0; received = 0; std_prod_idx = tpr->rx_std_prod_idx; jmb_prod_idx = tpr->rx_jmb_prod_idx; while (sw_idx != hw_idx && budget > 0) { struct ring_info *ri; struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; unsigned int len; struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; u8 *data; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); data = ri->data; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); data = ri->data; post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; work_mask |= opaque_key; if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { drop_it: tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ tp->rx_dropped++; goto next_pkt; } prefetch(data + TG3_RX_OFFSET(tp)); len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, *post_ptr); if (skb_size < 0) goto drop_it; pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); skb = build_skb(data); if (!skb) { kfree(data); goto drop_it_no_recycle; } skb_reserve(skb, TG3_RX_OFFSET(tp)); /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ smp_wmb(); ri->data = NULL; } else { tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); skb = netdev_alloc_skb(tp->dev, len + TG3_RAW_IP_ALIGN); if (skb == NULL) goto drop_it_no_recycle; skb_reserve(skb, TG3_RAW_IP_ALIGN); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); memcpy(skb->data, data + TG3_RX_OFFSET(tp), len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); } skb_put(skb, len); if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) >> RXD_TCPCSUM_SHIFT) == 0xffff)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, tp->dev); if (len > (tp->dev->mtu + ETH_HLEN) && skb->protocol != htons(ETH_P_8021Q)) { dev_kfree_skb(skb); goto drop_it_no_recycle; } if (desc->type_flags & RXD_FLAG_VLAN && !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) __vlan_hwaccel_put_tag(skb, desc->err_vlan & RXD_VLAN_MASK); napi_gro_receive(&tnapi->napi, skb); received++; budget--; next_pkt: (*post_ptr)++; if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); work_mask &= ~RXD_OPAQUE_RING_STD; rx_std_posted = 0; } next_pkt_nopost: sw_idx++; sw_idx &= tp->rx_ret_ring_mask; /* Refresh hw_idx to see if there is new work */ if (sw_idx == hw_idx) { hw_idx = *(tnapi->rx_rcb_prod_idx); rmb(); } } /* ACK the status ring. */ tnapi->rx_rcb_ptr = sw_idx; tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ if (!tg3_flag(tp, ENABLE_RSS)) { /* Sync BD data before updating mailbox */ wmb(); if (work_mask & RXD_OPAQUE_RING_STD) { tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); } if (work_mask & RXD_OPAQUE_RING_JUMBO) { tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); } mmiowb(); } else if (work_mask) { /* rx_std_buffers[] and rx_jmb_buffers[] entries must be * updated before the producer indices can be updated. */ smp_wmb(); tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; if (tnapi != &tp->napi[1]) { tp->rx_refill = true; napi_schedule(&tp->napi[1].napi); } } return received; } static void tg3_poll_link(struct tg3 *tp) { /* handle link change and other phy events */ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { struct tg3_hw_status *sblk = tp->napi[0].hw_status; if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); if (tg3_flag(tp, USE_PHYLIB)) { tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED | MAC_STATUS_MI_COMPLETION | MAC_STATUS_LNKSTATE_CHANGED)); udelay(40); } else tg3_setup_phy(tp, 0); spin_unlock(&tp->lock); } } } static int tg3_rx_prodring_xfer(struct tg3 *tp, struct tg3_rx_prodring_set *dpr, struct tg3_rx_prodring_set *spr) { u32 si, di, cpycnt, src_prod_idx; int i, err = 0; while (1) { src_prod_idx = spr->rx_std_prod_idx; /* Make sure updates to the rx_std_buffers[] entries and the * standard producer index are seen in the correct order. */ smp_rmb(); if (spr->rx_std_cons_idx == src_prod_idx) break; if (spr->rx_std_cons_idx < src_prod_idx) cpycnt = src_prod_idx - spr->rx_std_cons_idx; else cpycnt = tp->rx_std_ring_mask + 1 - spr->rx_std_cons_idx; cpycnt = min(cpycnt, tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); si = spr->rx_std_cons_idx; di = dpr->rx_std_prod_idx; for (i = di; i < di + cpycnt; i++) { if (dpr->rx_std_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; } } if (!cpycnt) break; /* Ensure that updates to the rx_std_buffers ring and the * shadowed hardware producer ring from tg3_recycle_skb() are * ordered correctly WRT the skb check above. */ smp_rmb(); memcpy(&dpr->rx_std_buffers[di], &spr->rx_std_buffers[si], cpycnt * sizeof(struct ring_info)); for (i = 0; i < cpycnt; i++, di++, si++) { struct tg3_rx_buffer_desc *sbd, *dbd; sbd = &spr->rx_std[si]; dbd = &dpr->rx_std[di]; dbd->addr_hi = sbd->addr_hi; dbd->addr_lo = sbd->addr_lo; } spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & tp->rx_std_ring_mask; dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & tp->rx_std_ring_mask; } while (1) { src_prod_idx = spr->rx_jmb_prod_idx; /* Make sure updates to the rx_jmb_buffers[] entries and * the jumbo producer index are seen in the correct order. */ smp_rmb(); if (spr->rx_jmb_cons_idx == src_prod_idx) break; if (spr->rx_jmb_cons_idx < src_prod_idx) cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; else cpycnt = tp->rx_jmb_ring_mask + 1 - spr->rx_jmb_cons_idx; cpycnt = min(cpycnt, tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); si = spr->rx_jmb_cons_idx; di = dpr->rx_jmb_prod_idx; for (i = di; i < di + cpycnt; i++) { if (dpr->rx_jmb_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; } } if (!cpycnt) break; /* Ensure that updates to the rx_jmb_buffers ring and the * shadowed hardware producer ring from tg3_recycle_skb() are * ordered correctly WRT the skb check above. */ smp_rmb(); memcpy(&dpr->rx_jmb_buffers[di], &spr->rx_jmb_buffers[si], cpycnt * sizeof(struct ring_info)); for (i = 0; i < cpycnt; i++, di++, si++) { struct tg3_rx_buffer_desc *sbd, *dbd; sbd = &spr->rx_jmb[si].std; dbd = &dpr->rx_jmb[di].std; dbd->addr_hi = sbd->addr_hi; dbd->addr_lo = sbd->addr_lo; } spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & tp->rx_jmb_ring_mask; dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & tp->rx_jmb_ring_mask; } return err; } static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) { struct tg3 *tp = tnapi->tp; /* run TX completion thread */ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { tg3_tx(tnapi); if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) return work_done; } if (!tnapi->rx_rcb_prod_idx) return work_done; /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside * code synchronizes with tg3->napi.poll() */ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_done += tg3_rx(tnapi, budget - work_done); if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; int i, err = 0; u32 std_prod_idx = dpr->rx_std_prod_idx; u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; tp->rx_refill = false; for (i = 1; i < tp->irq_cnt; i++) err |= tg3_rx_prodring_xfer(tp, dpr, &tp->napi[i].prodring); wmb(); if (std_prod_idx != dpr->rx_std_prod_idx) tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, dpr->rx_std_prod_idx); if (jmb_prod_idx != dpr->rx_jmb_prod_idx) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, dpr->rx_jmb_prod_idx); mmiowb(); if (err) tw32_f(HOSTCC_MODE, tp->coal_now); } return work_done; } static inline void tg3_reset_task_schedule(struct tg3 *tp) { if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) schedule_work(&tp->reset_task); } static inline void tg3_reset_task_cancel(struct tg3 *tp) { cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } static int tg3_poll_msix(struct napi_struct *napi, int budget) { struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); struct tg3 *tp = tnapi->tp; int work_done = 0; struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) break; /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. */ tnapi->last_tag = sblk->status_tag; tnapi->last_irq_tag = tnapi->last_tag; rmb(); /* check for RX/TX work to do */ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { /* This test here is not race free, but will reduce * the number of interrupts by looping again. */ if (tnapi == &tp->napi[1] && tp->rx_refill) continue; napi_complete(napi); /* Reenable interrupts. */ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); /* This test here is synchronized by napi_schedule() * and napi_complete() to close the race condition. */ if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { tw32(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); } mmiowb(); break; } } return work_done; tx_recovery: /* work_done is guaranteed to be less than budget. */ napi_complete(napi); tg3_reset_task_schedule(tp); return work_done; } static void tg3_process_error(struct tg3 *tp) { u32 val; bool real_error = false; if (tg3_flag(tp, ERROR_PROCESSED)) return; /* Check Flow Attention register */ val = tr32(HOSTCC_FLOW_ATTN); if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); real_error = true; } if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); real_error = true; } if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); real_error = true; } if (!real_error) return; tg3_dump_state(tp); tg3_flag_set(tp, ERROR_PROCESSED); tg3_reset_task_schedule(tp); } static int tg3_poll(struct napi_struct *napi, int budget) { struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); struct tg3 *tp = tnapi->tp; int work_done = 0; struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { if (sblk->status & SD_STATUS_ERROR) tg3_process_error(tp); tg3_poll_link(tp); work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) goto tx_recovery; if (unlikely(work_done >= budget)) break; if (tg3_flag(tp, TAGGED_STATUS)) { /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. */ tnapi->last_tag = sblk->status_tag; tnapi->last_irq_tag = tnapi->last_tag; rmb(); } else sblk->status &= ~SD_STATUS_UPDATED; if (likely(!tg3_has_work(tnapi))) { napi_complete(napi); tg3_int_reenable(tnapi); break; } } return work_done; tx_recovery: /* work_done is guaranteed to be less than budget. */ napi_complete(napi); tg3_reset_task_schedule(tp); return work_done; } static void tg3_napi_disable(struct tg3 *tp) { int i; for (i = tp->irq_cnt - 1; i >= 0; i--) napi_disable(&tp->napi[i].napi); } static void tg3_napi_enable(struct tg3 *tp) { int i; for (i = 0; i < tp->irq_cnt; i++) napi_enable(&tp->napi[i].napi); } static void tg3_napi_init(struct tg3 *tp) { int i; netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); for (i = 1; i < tp->irq_cnt; i++) netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); } static void tg3_napi_fini(struct tg3 *tp) { int i; for (i = 0; i < tp->irq_cnt; i++) netif_napi_del(&tp->napi[i].napi); } static inline void tg3_netif_stop(struct tg3 *tp) { tp->dev->trans_start = jiffies; /* prevent tx timeout */ tg3_napi_disable(tp); netif_tx_disable(tp->dev); } static inline void tg3_netif_start(struct tg3 *tp) { /* NOTE: unconditional netif_tx_wake_all_queues is only * appropriate so long as all callers are assured to * have free tx slots (such as after tg3_init_hw) */ netif_tx_wake_all_queues(tp->dev); tg3_napi_enable(tp); tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; tg3_enable_ints(tp); } static void tg3_irq_quiesce(struct tg3 *tp) { int i; BUG_ON(tp->irq_sync); tp->irq_sync = 1; smp_mb(); for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); } /* Fully shutdown all tg3 driver activity elsewhere in the system. * If irq_sync is non-zero, then the IRQ handler must be synchronized * with as well. Most of the time, this is not necessary except when * shutting down the device. */ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) { spin_lock_bh(&tp->lock); if (irq_sync) tg3_irq_quiesce(tp); } static inline void tg3_full_unlock(struct tg3 *tp) { spin_unlock_bh(&tp->lock); } /* One-shot MSI handler - Chip automatically disables interrupt * after sending MSI so driver doesn't have to do it. */ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) { struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; prefetch(tnapi->hw_status); if (tnapi->rx_rcb) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) napi_schedule(&tnapi->napi); return IRQ_HANDLED; } /* MSI ISR - No need to check for interrupt sharing and no need to * flush status block and interrupt mailbox. PCI ordering rules * guarantee that MSI will arrive after the status block. */ static irqreturn_t tg3_msi(int irq, void *dev_id) { struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; prefetch(tnapi->hw_status); if (tnapi->rx_rcb) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); /* * Writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. * Writing non-zero to intr-mbox-0 additional tells the * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. */ tw32_mailbox(tnapi->int_mbox, 0x00000001); if (likely(!tg3_irq_sync(tp))) napi_schedule(&tnapi->napi); return IRQ_RETVAL(1); } static irqreturn_t tg3_interrupt(int irq, void *dev_id) { struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at * the CPU before the status block posted prior to the interrupt. * Reading the PCI State register will confirm whether the * interrupt is ours and will flush the status block. */ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; } } /* * Writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. * Writing non-zero to intr-mbox-0 additional tells the * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. * * Flush the mailbox to de-assert the IRQ immediately to prevent * spurious interrupts. The flush impacts performance but * excessive spurious interrupts can be worse in some cases. */ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (tg3_irq_sync(tp)) goto out; sblk->status &= ~SD_STATUS_UPDATED; if (likely(tg3_has_work(tnapi))) { prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); napi_schedule(&tnapi->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write */ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); } out: return IRQ_RETVAL(handled); } static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) { struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; struct tg3_hw_status *sblk = tnapi->hw_status; unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at * the CPU before the status block posted prior to the interrupt. * Reading the PCI State register will confirm whether the * interrupt is ours and will flush the status block. */ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { if (tg3_flag(tp, CHIP_RESETTING) || (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { handled = 0; goto out; } } /* * writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. * writing non-zero to intr-mbox-0 additional tells the * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. * * Flush the mailbox to de-assert the IRQ immediately to prevent * spurious interrupts. The flush impacts performance but * excessive spurious interrupts can be worse in some cases. */ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); /* * In a shared interrupt configuration, sometimes other devices' * interrupts will scream. We record the current status tag here * so that the above check can report that the screaming interrupts * are unhandled. Eventually they will be silenced. */ tnapi->last_irq_tag = sblk->status_tag; if (tg3_irq_sync(tp)) goto out; prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); napi_schedule(&tnapi->napi); out: return IRQ_RETVAL(handled); } /* ISR for interrupt test */ static irqreturn_t tg3_test_isr(int irq, void *dev_id) { struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; struct tg3_hw_status *sblk = tnapi->hw_status; if ((sblk->status & SD_STATUS_UPDATED) || !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { tg3_disable_ints(tp); return IRQ_RETVAL(1); } return IRQ_RETVAL(0); } #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) { int i; struct tg3 *tp = netdev_priv(dev); for (i = 0; i < tp->irq_cnt; i++) tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); } #endif static void tg3_tx_timeout(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); if (netif_msg_tx_err(tp)) { netdev_err(dev, "transmit timed out, resetting\n"); tg3_dump_state(tp); } tg3_reset_task_schedule(tp); } /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) { u32 base = (u32) mapping & 0xffffffff; return (base > 0xffffdcc0) && (base + len + 8 < base); } /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) if (tg3_flag(tp, 40BIT_DMA_BUG)) return ((u64) mapping + len) > DMA_BIT_MASK(40); return 0; #else return 0; #endif } static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, dma_addr_t mapping, u32 len, u32 flags, u32 mss, u32 vlan) { txbd->addr_hi = ((u64) mapping >> 32); txbd->addr_lo = ((u64) mapping & 0xffffffff); txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); } static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, dma_addr_t map, u32 len, u32 flags, u32 mss, u32 vlan) { struct tg3 *tp = tnapi->tp; bool hwbug = false; if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) hwbug = true; if (tg3_4g_overflow_test(map, len)) hwbug = true; if (tg3_40bit_overflow_test(tp, map, len)) hwbug = true; if (tp->dma_limit) { u32 prvidx = *entry; u32 tmp_flag = flags & ~TXD_FLAG_END; while (len > tp->dma_limit && *budget) { u32 frag_len = tp->dma_limit; len -= tp->dma_limit; /* Avoid the 8byte DMA problem */ if (len <= 8) { len += tp->dma_limit / 2; frag_len = tp->dma_limit / 2; } tnapi->tx_buffers[*entry].fragmented = true; tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, frag_len, tmp_flag, mss, vlan); *budget -= 1; prvidx = *entry; *entry = NEXT_TX(*entry); map += frag_len; } if (len) { if (*budget) { tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, len, flags, mss, vlan); *budget -= 1; *entry = NEXT_TX(*entry); } else { hwbug = true; tnapi->tx_buffers[prvidx].fragmented = false; } } } else { tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, len, flags, mss, vlan); *entry = NEXT_TX(*entry); } return hwbug; } static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) { int i; struct sk_buff *skb; struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; skb = txb->skb; txb->skb = NULL; pci_unmap_single(tnapi->tp->pdev, dma_unmap_addr(txb, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); while (txb->fragmented) { txb->fragmented = false; entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry]; } for (i = 0; i <= last; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry]; pci_unmap_page(tnapi->tp->pdev, dma_unmap_addr(txb, mapping), skb_frag_size(frag), PCI_DMA_TODEVICE); while (txb->fragmented) { txb->fragmented = false; entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry]; } } } /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, struct sk_buff **pskb, u32 *entry, u32 *budget, u32 base_flags, u32 mss, u32 vlan) { struct tg3 *tp = tnapi->tp; struct sk_buff *new_skb, *skb = *pskb; dma_addr_t new_addr = 0; int ret = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) new_skb = skb_copy(skb, GFP_ATOMIC); else { int more_headroom = 4 - ((unsigned long)skb->data & 3); new_skb = skb_copy_expand(skb, skb_headroom(skb) + more_headroom, skb_tailroom(skb), GFP_ATOMIC); } if (!new_skb) { ret = -1; } else { /* New SKB is guaranteed to be linear. */ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, PCI_DMA_TODEVICE); /* Make sure the mapping succeeded */ if (pci_dma_mapping_error(tp->pdev, new_addr)) { dev_kfree_skb(new_skb); ret = -1; } else { u32 save_entry = *entry; base_flags |= TXD_FLAG_END; tnapi->tx_buffers[*entry].skb = new_skb; dma_unmap_addr_set(&tnapi->tx_buffers[*entry], mapping, new_addr); if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, new_skb->len, base_flags, mss, vlan)) { tg3_tx_skb_unmap(tnapi, save_entry, -1); dev_kfree_skb(new_skb); ret = -1; } } } dev_kfree_skb(skb); *pskb = new_skb; return ret; } static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround a rare TSO bug that may be triggered when the * TSO header is greater than 80 bytes. */ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) { struct sk_buff *segs, *nskb; u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; /* Estimate the number of fragments in the worst case */ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { netif_stop_queue(tp->dev); /* netif_tx_stop_queue() must be done before checking * checking tx index in tg3_tx_avail() below, because in * tg3_tx(), we update tx index before checking for * netif_tx_queue_stopped(). */ smp_mb(); if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) return NETDEV_TX_BUSY; netif_wake_queue(tp->dev); } segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); if (IS_ERR(segs)) goto tg3_tso_bug_end; do { nskb = segs; segs = segs->next; nskb->next = NULL; tg3_start_xmit(nskb, tp->dev); } while (segs); tg3_tso_bug_end: dev_kfree_skb(skb); return NETDEV_TX_OK; } /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and * support TG3_FLAG_HW_TSO_1 or firmware TSO only. */ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss, vlan = 0; u32 budget; int i = -1, would_hit_hwbug; dma_addr_t mapping; struct tg3_napi *tnapi; struct netdev_queue *txq; unsigned int last; txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); tnapi = &tp->napi[skb_get_queue_mapping(skb)]; if (tg3_flag(tp, ENABLE_TSS)) tnapi++; budget = tg3_tx_avail(tnapi); /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); /* This is a hard error, log it. */ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } entry = tnapi->tx_prod; base_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; mss = skb_shinfo(skb)->gso_size; if (mss) { struct iphdr *iph; u32 tcp_opt_len, hdr_len; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; if (!skb_is_gso_v6(skb)) { iph->check = 0; iph->tot_len = htons(mss + hdr_len); } if (unlikely((ETH_HLEN + hdr_len) > 80) && tg3_flag(tp, TSO_BUG)) return tg3_tso_bug(tp, skb); base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; } else tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); if (tg3_flag(tp, HW_TSO_3)) { mss |= (hdr_len & 0xc) << 12; if (hdr_len & 0x10) base_flags |= 0x00000010; base_flags |= (hdr_len & 0x3e0) << 5; } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; else if (tg3_flag(tp, HW_TSO_1) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); mss |= (tsflags << 11); } } else { if (tcp_opt_len || iph->ihl > 5) { int tsflags; tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); base_flags |= tsflags << 12; } } } if (tg3_flag(tp, USE_JUMBO_BDFLAG) && !mss && skb->len > VLAN_ETH_FRAME_LEN) base_flags |= TXD_FLAG_JMB_PKT; if (vlan_tx_tag_present(skb)) { base_flags |= TXD_FLAG_VLAN; vlan = vlan_tx_tag_get(skb); } len = skb_headlen(skb); mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) goto drop; tnapi->tx_buffers[entry].skb = skb; dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); would_hit_hwbug = 0; if (tg3_flag(tp, 5701_DMA_BUG)) would_hit_hwbug = 1; if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), mss, vlan)) { would_hit_hwbug = 1; } else if (skb_shinfo(skb)->nr_frags > 0) { u32 tmp_mss = mss; if (!tg3_flag(tp, HW_TSO_1) && !tg3_flag(tp, HW_TSO_2) && !tg3_flag(tp, HW_TSO_3)) tmp_mss = 0; /* Now loop through additional data * fragments, and queue them. */ last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, len, DMA_TO_DEVICE); tnapi->tx_buffers[entry].skb = NULL; dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); if (dma_mapping_error(&tp->pdev->dev, mapping)) goto dma_error; if (!budget || tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | ((i == last) ? TXD_FLAG_END : 0), tmp_mss, vlan)) { would_hit_hwbug = 1; break; } } } if (would_hit_hwbug) { tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ entry = tnapi->tx_prod; budget = tg3_tx_avail(tnapi); if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, base_flags, mss, vlan)) goto drop_nofree; } skb_tx_timestamp(skb); netdev_tx_sent_queue(txq, skb->len); /* Sync BD data before updating mailbox */ wmb(); /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); /* netif_tx_stop_queue() must be done before checking * checking tx index in tg3_tx_avail() below, because in * tg3_tx(), we update tx index before checking for * netif_tx_queue_stopped(). */ smp_mb(); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_tx_wake_queue(txq); } mmiowb(); return NETDEV_TX_OK; dma_error: tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; drop: dev_kfree_skb(skb); drop_nofree: tp->tx_dropped++; return NETDEV_TX_OK; } static void tg3_mac_loopback(struct tg3 *tp, bool enable) { if (enable) { tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | MAC_MODE_PORT_MODE_MASK); tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; if (!tg3_flag(tp, 5705_PLUS)) tp->mac_mode |= MAC_MODE_LINK_POLARITY; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; } else { tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; if (tg3_flag(tp, 5705_PLUS) || (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } tw32(MAC_MODE, tp->mac_mode); udelay(40); } static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) { u32 val, bmcr, mac_mode, ptest = 0; tg3_phy_toggle_apd(tp, false); tg3_phy_toggle_automdix(tp, 0); if (extlpbk && tg3_phy_set_extloopbk(tp)) return -EIO; bmcr = BMCR_FULLDPLX; switch (speed) { case SPEED_10: break; case SPEED_100: bmcr |= BMCR_SPEED100; break; case SPEED_1000: default: if (tp->phy_flags & TG3_PHYFLG_IS_FET) { speed = SPEED_100; bmcr |= BMCR_SPEED100; } else { speed = SPEED_1000; bmcr |= BMCR_SPEED1000; } } if (extlpbk) { if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { tg3_readphy(tp, MII_CTRL1000, &val); val |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; tg3_writephy(tp, MII_CTRL1000, val); } else { ptest = MII_TG3_FET_PTEST_TRIM_SEL | MII_TG3_FET_PTEST_TRIM_2; tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); } } else bmcr |= BMCR_LOOPBACK; tg3_writephy(tp, MII_BMCR, bmcr); /* The write needs to be flushed for the FETs */ if (tp->phy_flags & TG3_PHYFLG_IS_FET) tg3_readphy(tp, MII_BMCR, &bmcr); udelay(40); if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | MII_TG3_FET_PTEST_FRC_TX_LINK | MII_TG3_FET_PTEST_FRC_TX_LOCK); /* The write needs to be flushed for the AC131 */ tg3_readphy(tp, MII_TG3_FET_PTEST, &val); } /* Reset to prevent losing 1st rx packet intermittently */ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && tg3_flag(tp, 5780_CLASS)) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); tw32_f(MAC_RX_MODE, tp->rx_mode); } mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); if (speed == SPEED_1000) mac_mode |= MAC_MODE_PORT_MODE_GMII; else mac_mode |= MAC_MODE_PORT_MODE_MII; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; if (masked_phy_id == TG3_PHY_ID_BCM5401) mac_mode &= ~MAC_MODE_LINK_POLARITY; else if (masked_phy_id == TG3_PHY_ID_BCM5411) mac_mode |= MAC_MODE_LINK_POLARITY; tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_LNK3_LED_MODE); } tw32(MAC_MODE, mac_mode); udelay(40); return 0; } static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); if (features & NETIF_F_LOOPBACK) { if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) return; spin_lock_bh(&tp->lock); tg3_mac_loopback(tp, true); netif_carrier_on(tp->dev); spin_unlock_bh(&tp->lock); netdev_info(dev, "Internal MAC loopback mode enabled.\n"); } else { if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) return; spin_lock_bh(&tp->lock); tg3_mac_loopback(tp, false); /* Force link status check */ tg3_setup_phy(tp, 1); spin_unlock_bh(&tp->lock); netdev_info(dev, "Internal MAC loopback mode disabled.\n"); } } static netdev_features_t tg3_fix_features(struct net_device *dev, netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) features &= ~NETIF_F_ALL_TSO; return features; } static int tg3_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) tg3_set_loopback(dev, features); return 0; } static void tg3_rx_prodring_free(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { int i; if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } return; } for (i = 0; i <= tp->rx_std_ring_mask; i++) tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } /* Initialize rx rings for packet processing. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ static int tg3_rx_prodring_alloc(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { u32 i, rx_pkt_dma_sz; tpr->rx_std_cons_idx = 0; tpr->rx_std_prod_idx = 0; tpr->rx_jmb_cons_idx = 0; tpr->rx_jmb_prod_idx = 0; if (tpr != &tp->napi[0].prodring) { memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE(tp)); if (tpr->rx_jmb_buffers) memset(&tpr->rx_jmb_buffers[0], 0, TG3_RX_JMB_BUFF_RING_SIZE(tp)); goto done; } /* Zero out all descriptors. */ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; if (tg3_flag(tp, 5780_CLASS) && tp->dev->mtu > ETH_DATA_LEN) rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); /* Initialize invariants of the rings, we only set this * stuff once. This works because the card does not * write into the rx buffer posting rings. */ for (i = 0; i <= tp->rx_std_ring_mask; i++) { struct tg3_rx_buffer_desc *rxd; rxd = &tpr->rx_std[i]; rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT)); } /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " "successfully\n", i, tp->rx_pending); if (i == 0) goto initfail; tp->rx_pending = i; break; } } if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) goto done; memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); if (!tg3_flag(tp, JUMBO_RING_ENABLE)) goto done; for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { struct tg3_rx_buffer_desc *rxd; rxd = &tpr->rx_jmb[i].std; rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | RXD_FLAG_JUMBO; rxd->opaque = (RXD_OPAQUE_RING_JUMBO | (i << RXD_OPAQUE_INDEX_SHIFT)); } for (i = 0; i < tp->rx_jumbo_pending; i++) { if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " "successfully\n", i, tp->rx_jumbo_pending); if (i == 0) goto initfail; tp->rx_jumbo_pending = i; break; } } done: return 0; initfail: tg3_rx_prodring_free(tp, tpr); return -ENOMEM; } static void tg3_rx_prodring_fini(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { kfree(tpr->rx_std_buffers); tpr->rx_std_buffers = NULL; kfree(tpr->rx_jmb_buffers); tpr->rx_jmb_buffers = NULL; if (tpr->rx_std) { dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), tpr->rx_std, tpr->rx_std_mapping); tpr->rx_std = NULL; } if (tpr->rx_jmb) { dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), tpr->rx_jmb, tpr->rx_jmb_mapping); tpr->rx_jmb = NULL; } } static int tg3_rx_prodring_init(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), GFP_KERNEL); if (!tpr->rx_std_buffers) return -ENOMEM; tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), &tpr->rx_std_mapping, GFP_KERNEL); if (!tpr->rx_std) goto err_out; if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), GFP_KERNEL); if (!tpr->rx_jmb_buffers) goto err_out; tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), &tpr->rx_jmb_mapping, GFP_KERNEL); if (!tpr->rx_jmb) goto err_out; } return 0; err_out: tg3_rx_prodring_fini(tp, tpr); return -ENOMEM; } /* Free up pending packets in all rx/tx rings. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. tp->{tx,}lock is not held and we are not * in an interrupt context and thus may sleep. */ static void tg3_free_rings(struct tg3 *tp) { int i, j; for (j = 0; j < tp->irq_cnt; j++) { struct tg3_napi *tnapi = &tp->napi[j]; tg3_rx_prodring_free(tp, &tnapi->prodring); if (!tnapi->tx_buffers) continue; for (i = 0; i < TG3_TX_RING_SIZE; i++) { struct sk_buff *skb = tnapi->tx_buffers[i].skb; if (!skb) continue; tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags - 1); dev_kfree_skb_any(skb); } netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); } } /* Initialize tx/rx rings for packet processing. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ static int tg3_init_rings(struct tg3 *tp) { int i; /* Free up all the SKBs. */ tg3_free_rings(tp); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; tnapi->last_tag = 0; tnapi->last_irq_tag = 0; tnapi->hw_status->status = 0; tnapi->hw_status->status_tag = 0; memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); tnapi->tx_prod = 0; tnapi->tx_cons = 0; if (tnapi->tx_ring) memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); tnapi->rx_rcb_ptr = 0; if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } } return 0; } /* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. */ static void tg3_free_consistent(struct tg3 *tp) { int i; for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->tx_ring) { dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, tnapi->tx_ring, tnapi->tx_desc_mapping); tnapi->tx_ring = NULL; } kfree(tnapi->tx_buffers); tnapi->tx_buffers = NULL; if (tnapi->rx_rcb) { dma_free_coherent(&tp->pdev->dev, TG3_RX_RCB_RING_BYTES(tp), tnapi->rx_rcb, tnapi->rx_rcb_mapping); tnapi->rx_rcb = NULL; } tg3_rx_prodring_fini(tp, &tnapi->prodring); if (tnapi->hw_status) { dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, tnapi->hw_status, tnapi->status_mapping); tnapi->hw_status = NULL; } } if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } } /* * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. Can sleep. */ static int tg3_alloc_consistent(struct tg3 *tp) { int i; tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), &tp->stats_mapping, GFP_KERNEL); if (!tp->hw_stats) goto err_out; memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, &tnapi->status_mapping, GFP_KERNEL); if (!tnapi->hw_status) goto err_out; memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); sblk = tnapi->hw_status; if (tg3_rx_prodring_init(tp, &tnapi->prodring)) goto err_out; /* If multivector TSS is enabled, vector 0 does not handle * tx interrupts. Don't allocate any resources for it. */ if ((!i && !tg3_flag(tp, ENABLE_TSS)) || (i && tg3_flag(tp, ENABLE_TSS))) { tnapi->tx_buffers = kzalloc( sizeof(struct tg3_tx_ring_info) * TG3_TX_RING_SIZE, GFP_KERNEL); if (!tnapi->tx_buffers) goto err_out; tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, &tnapi->tx_desc_mapping, GFP_KERNEL); if (!tnapi->tx_ring) goto err_out; } /* * When RSS is enabled, the status block format changes * slightly. The "rx_jumbo_consumer", "reserved", * and "rx_mini_consumer" members get mapped to the * other three rx return ring producer indexes. */ switch (i) { default: if (tg3_flag(tp, ENABLE_RSS)) { tnapi->rx_rcb_prod_idx = NULL; break; } /* Fall through */ case 1: tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; break; case 2: tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; break; case 3: tnapi->rx_rcb_prod_idx = &sblk->reserved; break; case 4: tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; break; } /* * If multivector RSS is enabled, vector 0 does not handle * rx or tx interrupts. Don't allocate any resources for it. */ if (!i && tg3_flag(tp, ENABLE_RSS)) continue; tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, TG3_RX_RCB_RING_BYTES(tp), &tnapi->rx_rcb_mapping, GFP_KERNEL); if (!tnapi->rx_rcb) goto err_out; memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); } return 0; err_out: tg3_free_consistent(tp); return -ENOMEM; } #define MAX_WAIT_CNT 1000 /* To stop a block, clear the enable bit and poll till it * clears. tp->lock is held. */ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) { unsigned int i; u32 val; if (tg3_flag(tp, 5705_PLUS)) { switch (ofs) { case RCVLSC_MODE: case DMAC_MODE: case MBFREE_MODE: case BUFMGR_MODE: case MEMARB_MODE: /* We can't enable/disable these bits of the * 5705/5750, just say success. */ return 0; default: break; } } val = tr32(ofs); val &= ~enable_bit; tw32_f(ofs, val); for (i = 0; i < MAX_WAIT_CNT; i++) { udelay(100); val = tr32(ofs); if ((val & enable_bit) == 0) break; } if (i == MAX_WAIT_CNT && !silent) { dev_err(&tp->pdev->dev, "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", ofs, enable_bit); return -ENODEV; } return 0; } /* tp->lock is held. */ static int tg3_abort_hw(struct tg3 *tp, int silent) { int i, err; tg3_disable_ints(tp); tp->rx_mode &= ~RX_MODE_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); tp->tx_mode &= ~TX_MODE_ENABLE; tw32_f(MAC_TX_MODE, tp->tx_mode); for (i = 0; i < MAX_WAIT_CNT; i++) { udelay(100); if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) break; } if (i >= MAX_WAIT_CNT) { dev_err(&tp->pdev->dev, "%s timed out, TX_MODE_ENABLE will not clear " "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); err |= -ENODEV; } err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); tw32(FTQ_RESET, 0xffffffff); tw32(FTQ_RESET, 0x00000000); err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); } return err; } /* Save PCI command register before chip reset */ static void tg3_save_pci_state(struct tg3 *tp) { pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); } /* Restore PCI state after chip reset */ static void tg3_restore_pci_state(struct tg3 *tp) { u32 val; /* Re-enable indirect register accesses. */ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); /* Set MAX PCI retry to zero. */ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && tg3_flag(tp, PCIX_MODE)) val |= PCISTATE_RETRY_SAME_DMA; /* Allow reads and writes to the APE register and memory space. */ if (tg3_flag(tp, ENABLE_APE)) val |= PCISTATE_ALLOW_APE_CTLSPC_WR | PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); if (!tg3_flag(tp, PCI_EXPRESS)) { pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, tp->pci_cacheline_sz); pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, tp->pci_lat_timer); } /* Make sure PCI-X relaxed ordering bit is clear. */ if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, &pcix_cmd); pcix_cmd &= ~PCI_X_CMD_ERO; pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, pcix_cmd); } if (tg3_flag(tp, 5780_CLASS)) { /* Chip reset on 5780 will reset MSI enable bit, * so need to restore it. */ if (tg3_flag(tp, USING_MSI)) { u16 ctrl; pci_read_config_word(tp->pdev, tp->msi_cap + PCI_MSI_FLAGS, &ctrl); pci_write_config_word(tp->pdev, tp->msi_cap + PCI_MSI_FLAGS, ctrl | PCI_MSI_FLAGS_ENABLE); val = tr32(MSGINT_MODE); tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); } } } /* tp->lock is held. */ static int tg3_chip_reset(struct tg3 *tp) { u32 val; void (*write_op)(struct tg3 *, u32, u32); int i, err; tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); /* No matching tg3_nvram_unlock() after this because * chip reset below will undo the nvram lock. */ tp->nvram_lock_cnt = 0; /* GRC_MISC_CFG core clock reset will clear the memory * enable bit in PCI register 4 and the MSI enable bit * on some chips, so we save relevant registers here. */ tg3_save_pci_state(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || tg3_flag(tp, 5755_PLUS)) tw32(GRC_FASTBOOT_PC, 0); /* * We must avoid the readl() that normally takes place. * It locks machines, causes machine checks, and other * fun things. So, temporarily disable the 5701 * hardware workaround, while we do the reset. */ write_op = tp->write32; if (write_op == tg3_write_flush_reg32) tp->write32 = tg3_write32; /* Prevent the irq handler from reading or writing PCI registers * during chip reset when the memory enable bit in the PCI command * register may be cleared. The chip does not generate interrupt * at this time, but the irq handler may still be called due to irq * sharing or irqpoll. */ tg3_flag_set(tp, CHIP_RESETTING); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) { tnapi->hw_status->status = 0; tnapi->hw_status->status_tag = 0; } tnapi->last_tag = 0; tnapi->last_irq_tag = 0; } smp_mb(); for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } /* do the reset */ val = GRC_MISC_CFG_CORECLK_RESET; if (tg3_flag(tp, PCI_EXPRESS)) { /* Force PCIe 1.0a mode */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS) && tr32(TG3_PCIE_PHY_TSTCTL) == (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { tw32(GRC_MISC_CFG, (1 << 29)); val |= (1 << 29); } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); tw32(GRC_VCPU_EXT_CTRL, tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); } /* Manage gphy power for all CPMU absent PCIe devices. */ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; tw32(GRC_MISC_CFG, val); /* restore 5701 hardware bug workaround write method */ tp->write32 = write_op; /* Unfortunately, we have to delay before the PCI read back. * Some 575X chips even will not respond to a PCI cfg access * when the reset command is given to the chip. * * How do these hardware designers expect things to work * properly if the PCI write is posted for a long period * of time? It is always necessary to have some method by * which a register read back can occur to push the write * out which does the reset. * * For most tg3 variants the trick below was working. * Ho hum... */ udelay(120); /* Flush PCI posted writes. The normal MMIO registers * are inaccessible at this time so this is the only * way to make this reliably (actually, this is no longer * the case, see above). I tried to use indirect * register read/write but this upset some 5701 variants. */ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); udelay(120); if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { u16 val16; if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { int i; u32 cfg_val; /* Wait for link training to complete. */ for (i = 0; i < 5000; i++) udelay(100); pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); pci_write_config_dword(tp->pdev, 0xc4, cfg_val | (1 << 15)); } /* Clear the "no snoop" and "relaxed ordering" bits. */ pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, &val16); val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); /* * Older PCIe devices only support the 128 byte * MPS setting. Enforce the restriction. */ if (!tg3_flag(tp, CPMU_PRESENT)) val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, val16); /* Clear error status */ pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED | PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD); } tg3_restore_pci_state(tp); tg3_flag_clear(tp, CHIP_RESETTING); tg3_flag_clear(tp, ERROR_PROCESSED); val = 0; if (tg3_flag(tp, 5780_CLASS)) val = tr32(MEMARB_MODE); tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { tg3_stop_fw(tp); tw32(0x5000, 0x400); } tw32(GRC_MODE, tp->grc_mode); if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { val = tr32(0xc4); tw32(0xc4, val | (1 << 15)); } if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_TBI; val = tp->mac_mode; } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_GMII; val = tp->mac_mode; } else val = 0; tw32_f(MAC_MODE, val); udelay(40); tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); err = tg3_poll_fw(tp); if (err) return err; tg3_mdio_start(tp); if (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS)) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { val = tr32(TG3_CPMU_CLCK_ORIDE); tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); } /* Reprobe ASF enable state. */ tg3_flag_clear(tp, ENABLE_ASF); tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { tg3_flag_set(tp, ENABLE_ASF); tp->last_event_jiffies = jiffies; if (tg3_flag(tp, 5750_PLUS)) tg3_flag_set(tp, ASF_NEW_HANDSHAKE); } } return 0; } static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) { int err; tg3_stop_fw(tp); tg3_write_sig_pre_reset(tp, kind); tg3_abort_hw(tp, silent); err = tg3_chip_reset(tp); __tg3_set_mac_addr(tp, 0); tg3_write_sig_legacy(tp, kind); tg3_write_sig_post_reset(tp, kind); if (tp->hw_stats) { /* Save the stats across chip resets... */ tg3_get_nstats(tp, &tp->net_stats_prev); tg3_get_estats(tp, &tp->estats_prev); /* And make sure the next sample is new data */ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); } if (err) return err; return 0; } static int tg3_set_mac_addr(struct net_device *dev, void *p) { struct tg3 *tp = netdev_priv(dev); struct sockaddr *addr = p; int err = 0, skip_mac_1 = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (!netif_running(dev)) return 0; if (tg3_flag(tp, ENABLE_ASF)) { u32 addr0_high, addr0_low, addr1_high, addr1_low; addr0_high = tr32(MAC_ADDR_0_HIGH); addr0_low = tr32(MAC_ADDR_0_LOW); addr1_high = tr32(MAC_ADDR_1_HIGH); addr1_low = tr32(MAC_ADDR_1_LOW); /* Skip MAC addr 1 if ASF is using it. */ if ((addr0_high != addr1_high || addr0_low != addr1_low) && !(addr1_high == 0 && addr1_low == 0)) skip_mac_1 = 1; } spin_lock_bh(&tp->lock); __tg3_set_mac_addr(tp, skip_mac_1); spin_unlock_bh(&tp->lock); return err; } /* tp->lock is held. */ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, dma_addr_t mapping, u32 maxlen_flags, u32 nic_addr) { tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), ((u64) mapping >> 32)); tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), ((u64) mapping & 0xffffffff)); tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), maxlen_flags); if (!tg3_flag(tp, 5705_PLUS)) tg3_write_mem(tp, (bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr); } static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) { int i; if (!tg3_flag(tp, ENABLE_TSS)) { tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); } else { tw32(HOSTCC_TXCOL_TICKS, 0); tw32(HOSTCC_TXMAX_FRAMES, 0); tw32(HOSTCC_TXCOAL_MAXF_INT, 0); } if (!tg3_flag(tp, ENABLE_RSS)) { tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); } else { tw32(HOSTCC_RXCOL_TICKS, 0); tw32(HOSTCC_RXMAX_FRAMES, 0); tw32(HOSTCC_RXCOAL_MAXF_INT, 0); } if (!tg3_flag(tp, 5705_PLUS)) { u32 val = ec->stats_block_coalesce_usecs; tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); if (!netif_carrier_ok(tp->dev)) val = 0; tw32(HOSTCC_STAT_COAL_TICKS, val); } for (i = 0; i < tp->irq_cnt - 1; i++) { u32 reg; reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; tw32(reg, ec->rx_coalesce_usecs); reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; tw32(reg, ec->rx_max_coalesced_frames); reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; tw32(reg, ec->rx_max_coalesced_frames_irq); if (tg3_flag(tp, ENABLE_TSS)) { reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; tw32(reg, ec->tx_coalesce_usecs); reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; tw32(reg, ec->tx_max_coalesced_frames); reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; tw32(reg, ec->tx_max_coalesced_frames_irq); } } for (; i < tp->irq_max - 1; i++) { tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); if (tg3_flag(tp, ENABLE_TSS)) { tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); } } } /* tp->lock is held. */ static void tg3_rings_reset(struct tg3 *tp) { int i; u32 stblk, txrcb, rxrcb, limit; struct tg3_napi *tnapi = &tp->napi[0]; /* Disable all transmit rings but the first. */ if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; else if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; else if (tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); /* Disable all receive return rings but the first. */ if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); tp->napi[0].chk_msi_cnt = 0; tp->napi[0].last_rx_cons = 0; tp->napi[0].last_tx_cons = 0; /* Zero mailbox registers. */ if (tg3_flag(tp, SUPPORT_MSIX)) { for (i = 1; i < tp->irq_max; i++) { tp->napi[i].tx_prod = 0; tp->napi[i].tx_cons = 0; if (tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[i].prodmbox, 0); tw32_rx_mbox(tp->napi[i].consmbox, 0); tw32_mailbox_f(tp->napi[i].int_mbox, 1); tp->napi[i].chk_msi_cnt = 0; tp->napi[i].last_rx_cons = 0; tp->napi[i].last_tx_cons = 0; } if (!tg3_flag(tp, ENABLE_TSS)) tw32_mailbox(tp->napi[0].prodmbox, 0); } else { tp->napi[0].tx_prod = 0; tp->napi[0].tx_cons = 0; tw32_mailbox(tp->napi[0].prodmbox, 0); tw32_rx_mbox(tp->napi[0].consmbox, 0); } /* Make sure the NIC-based send BD rings are disabled. */ if (!tg3_flag(tp, 5705_PLUS)) { u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; for (i = 0; i < 16; i++) tw32_tx_mbox(mbox + i * 8, 0); } txrcb = NIC_SRAM_SEND_RCB; rxrcb = NIC_SRAM_RCV_RET_RCB; /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); /* Set status block DMA address */ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tnapi->status_mapping >> 32)); tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff)); if (tnapi->tx_ring) { tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), NIC_SRAM_TX_BUFFER_DESC); txrcb += TG3_BDINFO_SIZE; } if (tnapi->rx_rcb) { tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, (tp->rx_ret_ring_mask + 1) << BDINFO_FLAGS_MAXLEN_SHIFT, 0); rxrcb += TG3_BDINFO_SIZE; } stblk = HOSTCC_STATBLCK_RING1; for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); if (tnapi->tx_ring) { tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), NIC_SRAM_TX_BUFFER_DESC); txrcb += TG3_BDINFO_SIZE; } tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, ((tp->rx_ret_ring_mask + 1) << BDINFO_FLAGS_MAXLEN_SHIFT), 0); stblk += 8; rxrcb += TG3_BDINFO_SIZE; } } static void tg3_setup_rxbd_thresholds(struct tg3 *tp) { u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || tg3_flag(tp, 57765_PLUS)) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; else bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); val = min(nic_rep_thresh, host_rep_thresh); tw32(RCVBDI_STD_THRESH, val); if (tg3_flag(tp, 57765_PLUS)) tw32(STD_REPLENISH_LWM, bdcache_maxcnt); if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) return; bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); val = min(bdcache_maxcnt / 2, host_rep_thresh); tw32(RCVBDI_JUMBO_THRESH, val); if (tg3_flag(tp, 57765_PLUS)) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); } static inline u32 calc_crc(unsigned char *buf, int len) { u32 reg; u32 tmp; int j, k; reg = 0xffffffff; for (j = 0; j < len; j++) { reg ^= buf[j]; for (k = 0; k < 8; k++) { tmp = reg & 0x01; reg >>= 1; if (tmp) reg ^= 0xedb88320; } } return ~reg; } static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) { /* accept or reject all multicast frames */ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); } static void __tg3_set_rx_mode(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 rx_mode; rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | RX_MODE_KEEP_VLAN_TAG); #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG * flag clear. */ if (!tg3_flag(tp, ENABLE_ASF)) rx_mode |= RX_MODE_KEEP_VLAN_TAG; #endif if (dev->flags & IFF_PROMISC) { /* Promiscuous mode. */ rx_mode |= RX_MODE_PROMISC; } else if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast. */ tg3_set_multi(tp, 1); } else if (netdev_mc_empty(dev)) { /* Reject all multicast. */ tg3_set_multi(tp, 0); } else { /* Accept one or more multicast(s). */ struct netdev_hw_addr *ha; u32 mc_filter[4] = { 0, }; u32 regidx; u32 bit; u32 crc; netdev_for_each_mc_addr(ha, dev) { crc = calc_crc(ha->addr, ETH_ALEN); bit = ~crc & 0x7f; regidx = (bit & 0x60) >> 5; bit &= 0x1f; mc_filter[regidx] |= (1 << bit); } tw32(MAC_HASH_REG_0, mc_filter[0]); tw32(MAC_HASH_REG_1, mc_filter[1]); tw32(MAC_HASH_REG_2, mc_filter[2]); tw32(MAC_HASH_REG_3, mc_filter[3]); } if (rx_mode != tp->rx_mode) { tp->rx_mode = rx_mode; tw32_f(MAC_RX_MODE, rx_mode); udelay(10); } } static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) { int i; for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); } static void tg3_rss_check_indir_tbl(struct tg3 *tp) { int i; if (!tg3_flag(tp, SUPPORT_MSIX)) return; if (tp->irq_cnt <= 2) { memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); return; } /* Validate table against current IRQ count */ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) break; } if (i != TG3_RSS_INDIR_TBL_SIZE) tg3_rss_init_dflt_indir_tbl(tp); } static void tg3_rss_write_indir_tbl(struct tg3 *tp) { int i = 0; u32 reg = MAC_RSS_INDIR_TBL_0; while (i < TG3_RSS_INDIR_TBL_SIZE) { u32 val = tp->rss_ind_tbl[i]; i++; for (; i % 8; i++) { val <<= 4; val |= tp->rss_ind_tbl[i]; } tw32(reg, val); reg += 4; } } /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { u32 val, rdmac_mode; int i, err, limit; struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; tg3_disable_ints(tp); tg3_stop_fw(tp); tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); /* Enable MAC control of LPI */ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | TG3_CPMU_EEE_LNKIDL_UART_IDL); tw32_f(TG3_CPMU_EEE_CTRL, TG3_CPMU_EEE_CTRL_EXIT_20_1_US); val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | TG3_CPMU_EEEMD_LPI_IN_TX | TG3_CPMU_EEEMD_LPI_IN_RX | TG3_CPMU_EEEMD_EEE_ENABLE; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; if (tg3_flag(tp, ENABLE_APE)) val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; tw32_f(TG3_CPMU_EEE_MODE, val); tw32_f(TG3_CPMU_EEE_DBTMR1, TG3_CPMU_DBTMR1_PCIEXIT_2047US | TG3_CPMU_DBTMR1_LNKIDLE_2047US); tw32_f(TG3_CPMU_EEE_DBTMR2, TG3_CPMU_DBTMR2_APE_TX_2047US | TG3_CPMU_DBTMR2_TXIDXEQ_2047US); } if (reset_phy) tg3_phy_reset(tp); err = tg3_chip_reset(tp); if (err) return err; tg3_write_sig_legacy(tp, RESET_KIND_INIT); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { val = tr32(TG3_CPMU_CTRL); val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); tw32(TG3_CPMU_CTRL, val); val = tr32(TG3_CPMU_LSPD_10MB_CLK); val &= ~CPMU_LSPD_10MB_MACCLK_MASK; val |= CPMU_LSPD_10MB_MACCLK_6_25; tw32(TG3_CPMU_LSPD_10MB_CLK, val); val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); val &= ~CPMU_LNK_AWARE_MACCLK_MASK; val |= CPMU_LNK_AWARE_MACCLK_6_25; tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); val = tr32(TG3_CPMU_HST_ACC); val &= ~CPMU_HST_ACC_MACCLK_MASK; val |= CPMU_HST_ACC_MACCLK_6_25; tw32(TG3_CPMU_HST_ACC, val); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | PCIE_PWR_MGMT_L1_THRESH_4MS; tw32(PCIE_PWR_MGMT_THRESH, val); val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } if (tg3_flag(tp, L1PLLPD_EN)) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of PL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); tw32(GRC_MODE, grc_mode); } if (tg3_flag(tp, 57765_CLASS)) { if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of PL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); tw32(GRC_MODE, grc_mode); } if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of DL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX); val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, val | TG3_PCIE_DL_LO_FTSMAX_VAL); tw32(GRC_MODE, grc_mode); } val = tr32(TG3_CPMU_LSPD_10MB_CLK); val &= ~CPMU_LSPD_10MB_MACCLK_MASK; val |= CPMU_LSPD_10MB_MACCLK_6_25; tw32(TG3_CPMU_LSPD_10MB_CLK, val); } /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express * chips and don't even touch the clocks if the CPMU is present. */ if (!tg3_flag(tp, CPMU_PRESENT)) { if (!tg3_flag(tp, PCI_EXPRESS)) tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && tg3_flag(tp, PCIX_MODE)) { val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_RETRY_SAME_DMA; tw32(TG3PCI_PCISTATE, val); } if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_ALLOW_APE_CTLSPC_WR | PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; tw32(TG3PCI_PCISTATE, val); } if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { /* Enable some hw fixes. */ val = tr32(TG3PCI_MSI_DATA); val |= (1 << 26) | (1 << 28) | (1 << 29); tw32(TG3PCI_MSI_DATA, val); } /* Descriptor ring init may make accesses to the * NIC SRAM area to setup the TX descriptors, so we * can only do this after the hardware has been * successfully reset. */ err = tg3_init_rings(tp); if (err) return err; if (tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; if (!tg3_flag(tp, 57765_CLASS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { /* This value is determined during the probe time DMA * engine test, tg3_test_dma. */ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); } tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | GRC_MODE_4X_NIC_SEND_RINGS | GRC_MODE_NO_TX_PHDR_CSUM | GRC_MODE_NO_RX_PHDR_CSUM); tp->grc_mode |= GRC_MODE_HOST_SENDBDS; /* Pseudo-header checksum is done by hardware logic and not * the offload processers, so make the chip do the pseudo- * header checksums on receive. For transmit it is more * convenient to do the pseudo-header checksum in software * as Linux does that on transmit for us in all cases. */ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; tw32(GRC_MODE, tp->grc_mode | (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); /* Setup the timer prescalar register. Clock is always 66Mhz. */ val = tr32(GRC_MISC_CFG); val &= ~0xff; val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); tw32(GRC_MISC_CFG, val); /* Initialize MBUF/DESC pool. */ if (tg3_flag(tp, 5750_PLUS)) { /* Do nothing. */ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); else tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); } else if (tg3_flag(tp, TSO_CAPABLE)) { int fw_len; fw_len = tp->fw_len; fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); } if (tp->dev->mtu <= ETH_DATA_LEN) { tw32(BUFMGR_MB_RDMA_LOW_WATER, tp->bufmgr_config.mbuf_read_dma_low_water); tw32(BUFMGR_MB_MACRX_LOW_WATER, tp->bufmgr_config.mbuf_mac_rx_low_water); tw32(BUFMGR_MB_HIGH_WATER, tp->bufmgr_config.mbuf_high_water); } else { tw32(BUFMGR_MB_RDMA_LOW_WATER, tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); tw32(BUFMGR_MB_MACRX_LOW_WATER, tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); tw32(BUFMGR_MB_HIGH_WATER, tp->bufmgr_config.mbuf_high_water_jumbo); } tw32(BUFMGR_DMA_LOW_WATER, tp->bufmgr_config.dma_low_water); tw32(BUFMGR_DMA_HIGH_WATER, tp->bufmgr_config.dma_high_water); val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) val |= BUFMGR_MODE_NO_TX_UNDERRUN; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; tw32(BUFMGR_MODE, val); for (i = 0; i < 2000; i++) { if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) break; udelay(10); } if (i >= 2000) { netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); return -ENODEV; } if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); tg3_setup_rxbd_thresholds(tp); /* Initialize TG3_BDINFO's at: * RCVDBDI_STD_BD: standard eth size rx ring * RCVDBDI_JUMBO_BD: jumbo frame rx ring * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) * * like so: * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | * ring attribute flags * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM * * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. * * The size of each ring is fixed in the firmware, but the location is * configurable. */ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); if (!tg3_flag(tp, 5717_PLUS)) tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC); /* Disable the mini ring */ if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { if (tg3_flag(tp, JUMBO_RING_ENABLE)) { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tpr->rx_jmb_mapping >> 32)); tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_jmb_mapping & 0xffffffff)); val = TG3_RX_JMB_RING_SIZE(tp) << BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, val | BDINFO_FLAGS_USE_EXT_RECV); if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || tg3_flag(tp, 57765_CLASS)) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); } if (tg3_flag(tp, 57765_PLUS)) { val = TG3_RX_STD_RING_SIZE(tp); val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; } else val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); tpr->rx_std_prod_idx = tp->rx_pending; tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); tpr->rx_jmb_prod_idx = tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); tg3_rings_reset(tp); /* Initialize MAC address and backoff seed. */ __tg3_set_mac_addr(tp, 0); /* MTU + ethernet header + FCS + optional VLAN tag */ tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); /* The slot time is changed by tg3_setup_phy if we * run at gigabit with half duplex. */ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT) | (32 << TX_LENGTHS_SLOT_TIME_SHIFT); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) val |= tr32(MAC_TX_LENGTHS) & (TX_LENGTHS_JMB_FRM_LEN_MSK | TX_LENGTHS_CNT_DWN_VAL_MSK); tw32(MAC_TX_LENGTHS, val); /* Receive rules. */ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); tw32(RCVLPC_CONFIG, 0x0181); /* Calculate RDMAC_MODE setting early, we need it to determine * the RCVLPC_STATE_ENABLE mask. */ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { if (tg3_flag(tp, TSO_CAPABLE) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; } } if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; if (tg3_flag(tp, 57765_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || tg3_flag(tp, 57765_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; } tw32(TG3_RDMA_RSRVCTRL_REG, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); } /* Receive/send statistics. */ if (tg3_flag(tp, 5750_PLUS)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_DACK_FIX; tw32(RCVLPC_STATS_ENABLE, val); } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && tg3_flag(tp, TSO_CAPABLE)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; tw32(RCVLPC_STATS_ENABLE, val); } else { tw32(RCVLPC_STATS_ENABLE, 0xffffff); } tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); tw32(SNDDATAI_STATSENAB, 0xffffff); tw32(SNDDATAI_STATSCTRL, (SNDDATAI_SCTRL_ENABLE | SNDDATAI_SCTRL_FASTUPD)); /* Setup host coalescing engine. */ tw32(HOSTCC_MODE, 0); for (i = 0; i < 2000; i++) { if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) break; udelay(10); } __tg3_set_coalesce(tp, &tp->coal); if (!tg3_flag(tp, 5705_PLUS)) { /* Status/statistics block address. See tg3_timer, * the tg3_periodic_fetch_stats call there, and * tg3_get_stats to see how this works for 5705/5750 chips. */ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tp->stats_mapping >> 32)); tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tp->stats_mapping & 0xffffffff)); tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); /* Clear statistics and status block memory areas */ for (i = NIC_SRAM_STATS_BLK; i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; i += sizeof(u32)) { tg3_write_mem(tp, i, 0); udelay(40); } } tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); if (!tg3_flag(tp, 5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; /* reset to prevent losing 1st rx packet intermittently */ tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; if (tg3_flag(tp, ENABLE_APE)) tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; if (!tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) tp->mac_mode |= MAC_MODE_LINK_POLARITY; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); udelay(40); /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). * If TG3_FLAG_IS_NIC is zero, we should read the * register to preserve the GPIO settings for LOMs. The GPIOs, * whether used as inputs or outputs, are set by boot code after * reset. */ if (!tg3_flag(tp, IS_NIC)) { u32 gpio_mask; gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | GRC_LCLCTRL_GPIO_OUTPUT3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; tp->grc_local_ctrl &= ~gpio_mask; tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; /* GPIO1 must be driven high for eeprom write protect */ if (tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); } tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); if (tg3_flag(tp, USING_MSIX)) { val = tr32(MSGINT_MODE); val |= MSGINT_MODE_ENABLE; if (tp->irq_cnt > 1) val |= MSGINT_MODE_MULTIVEC_EN; if (!tg3_flag(tp, 1SHOT_MSI)) val |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } if (!tg3_flag(tp, 5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); udelay(40); } val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { if (tg3_flag(tp, TSO_CAPABLE) && (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { /* nothing */ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { val |= WDMAC_MODE_RX_ACCEL; } } /* Enable host coalescing bug fix */ if (tg3_flag(tp, 5755_PLUS)) val |= WDMAC_MODE_STATUS_TAG_FIX; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) val |= WDMAC_MODE_BURST_ALL_DATA; tw32_f(WDMAC_MODE, val); udelay(40); if (tg3_flag(tp, PCIX_MODE)) { u16 pcix_cmd; pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, &pcix_cmd); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { pcix_cmd &= ~PCI_X_CMD_MAX_READ; pcix_cmd |= PCI_X_CMD_READ_2K; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); pcix_cmd |= PCI_X_CMD_READ_2K; } pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, pcix_cmd); } tw32_f(RDMAC_MODE, rdmac_mode); udelay(40); tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); if (!tg3_flag(tp, 5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); else tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; if (tg3_flag(tp, LRG_PROD_RING_CAP)) val |= RCVDBDI_MODE_LRG_RING_SZ; tw32(RCVDBDI_MODE, val); tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; if (tg3_flag(tp, ENABLE_TSS)) val |= SNDBDI_MODE_MULTI_TXQ_EN; tw32(SNDBDI_MODE, val); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { err = tg3_load_5701_a0_firmware_fix(tp); if (err) return err; } if (tg3_flag(tp, TSO_CAPABLE)) { err = tg3_load_tso_firmware(tp); if (err) return err; } tp->tx_mode = TX_MODE_ENABLE; if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; tp->tx_mode &= ~val; tp->tx_mode |= tr32(MAC_TX_MODE) & val; } tw32_f(MAC_TX_MODE, tp->tx_mode); udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { tg3_rss_write_indir_tbl(tp); /* Setup the "secret" hash key. */ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); tw32(MAC_RSS_HASH_KEY_3, 0x36621985); tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); } tp->rx_mode = RX_MODE_ENABLE; if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | RX_MODE_RSS_IPV6_HASH_EN | RX_MODE_RSS_TCP_IPV6_HASH_EN | RX_MODE_RSS_IPV4_HASH_EN | RX_MODE_RSS_TCP_IPV4_HASH_EN; tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); tw32(MAC_LED_CTRL, tp->led_ctrl); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { /* Set drive transmission level to 1.2V */ /* only if the signal pre-emphasis bit is not set */ val = tr32(MAC_SERDES_CFG); val &= 0xfffff000; val |= 0x880; tw32(MAC_SERDES_CFG, val); } if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) tw32(MAC_SERDES_CFG, 0x616000); } /* Prevent chip from dropping frames when flow control * is enabled. */ if (tg3_flag(tp, 57765_CLASS)) val = 1; else val = 2; tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { /* Use hardware link auto-negotiation */ tg3_flag_set(tp, HW_AUTONEG); } if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { u32 tmp; tmp = tr32(SERDES_RX_CTRL); tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); } if (!tg3_flag(tp, USE_PHYLIB)) { if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; err = tg3_setup_phy(tp, 0); if (err) return err; if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { u32 tmp; /* Clear CRC stats. */ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { tg3_writephy(tp, MII_TG3_TEST1, tmp | MII_TG3_TEST1_CRC_EN); tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); } } } __tg3_set_rx_mode(tp->dev); /* Initialize receive rules. */ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) limit = 8; else limit = 16; if (tg3_flag(tp, ENABLE_ASF)) limit -= 4; switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ case 2: case 1: default: break; } if (tg3_flag(tp, ENABLE_APE)) /* Write our heartbeat update interval to APE. */ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, APE_HOST_HEARTBEAT_INT_DISABLE); tg3_write_sig_post_reset(tp, RESET_KIND_INIT); return 0; } /* Called at device open time to get the chip ready for * packet processing. Invoked with tp->lock held. */ static int tg3_init_hw(struct tg3 *tp, int reset_phy) { tg3_switch_clocks(tp); tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); return tg3_reset_hw(tp, reset_phy); } #define TG3_STAT_ADD32(PSTAT, REG) \ do { u32 __val = tr32(REG); \ (PSTAT)->low += __val; \ if ((PSTAT)->low < __val) \ (PSTAT)->high += 1; \ } while (0) static void tg3_periodic_fetch_stats(struct tg3 *tp) { struct tg3_hw_stats *sp = tp->hw_stats; if (!netif_carrier_ok(tp->dev)) return; TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); } else { u32 val = tr32(HOSTCC_FLOW_ATTN); val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; if (val) { tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); sp->rx_discards.low += val; if (sp->rx_discards.low < val) sp->rx_discards.high += 1; } sp->mbuf_lwm_thresh_hit = sp->rx_discards; } TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); } static void tg3_chk_missed_msi(struct tg3 *tp) { u32 i; for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tg3_has_work(tnapi)) { if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && tnapi->last_tx_cons == tnapi->tx_cons) { if (tnapi->chk_msi_cnt < 1) { tnapi->chk_msi_cnt++; return; } tg3_msi(0, tnapi); } } tnapi->chk_msi_cnt = 0; tnapi->last_rx_cons = tnapi->rx_rcb_ptr; tnapi->last_tx_cons = tnapi->tx_cons; } } static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) goto restart_timer; spin_lock(&tp->lock); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || tg3_flag(tp, 57765_CLASS)) tg3_chk_missed_msi(tp); if (!tg3_flag(tp, TAGGED_STATUS)) { /* All of this garbage is because when using non-tagged * IRQ status the mailbox/status_block protocol the chip * uses with the cpu is race prone. */ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); } else { tw32(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); } if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { spin_unlock(&tp->lock); tg3_reset_task_schedule(tp); goto restart_timer; } } /* This part only runs once per second. */ if (!--tp->timer_counter) { if (tg3_flag(tp, 5705_PLUS)) tg3_periodic_fetch_stats(tp); if (tp->setlpicnt && !--tp->setlpicnt) tg3_phy_eee_enable(tp); if (tg3_flag(tp, USE_LINKCHG_REG)) { u32 mac_stat; int phy_event; mac_stat = tr32(MAC_STATUS); phy_event = 0; if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { if (mac_stat & MAC_STATUS_MI_INTERRUPT) phy_event = 1; } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) phy_event = 1; if (phy_event) tg3_setup_phy(tp, 0); } else if (tg3_flag(tp, POLL_SERDES)) { u32 mac_stat = tr32(MAC_STATUS); int need_setup = 0; if (netif_carrier_ok(tp->dev) && (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { need_setup = 1; } if (!netif_carrier_ok(tp->dev) && (mac_stat & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET))) { need_setup = 1; } if (need_setup) { if (!tp->serdes_counter) { tw32_f(MAC_MODE, (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK)); udelay(40); tw32_f(MAC_MODE, tp->mac_mode); udelay(40); } tg3_setup_phy(tp, 0); } } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && tg3_flag(tp, 5780_CLASS)) { tg3_serdes_parallel_detect(tp); } tp->timer_counter = tp->timer_multiplier; } /* Heartbeat is only sent once every 2 seconds. * * The heartbeat is to tell the ASF firmware that the host * driver is still alive. In the event that the OS crashes, * ASF needs to reset the hardware to free up the FIFO space * that may be filled with rx packets destined for the host. * If the FIFO is full, ASF will no longer function properly. * * Unintended resets have been reported on real time kernels * where the timer doesn't run on time. Netpoll will also have * same problem. * * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware * to check the ring condition when the heartbeat is expiring * before doing the reset. This will prevent most unintended * resets. */ if (!--tp->asf_counter) { if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE3); tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, TG3_FW_UPDATE_TIMEOUT_SEC); tg3_generate_fw_event(tp); } tp->asf_counter = tp->asf_multiplier; } spin_unlock(&tp->lock); restart_timer: tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); } static void __devinit tg3_timer_init(struct tg3 *tp) { if (tg3_flag(tp, TAGGED_STATUS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && !tg3_flag(tp, 57765_CLASS)) tp->timer_offset = HZ; else tp->timer_offset = HZ / 10; BUG_ON(tp->timer_offset > HZ); tp->timer_multiplier = (HZ / tp->timer_offset); tp->asf_multiplier = (HZ / tp->timer_offset) * TG3_FW_UPDATE_FREQ_SEC; init_timer(&tp->timer); tp->timer.data = (unsigned long) tp; tp->timer.function = tg3_timer; } static void tg3_timer_start(struct tg3 *tp) { tp->asf_counter = tp->asf_multiplier; tp->timer_counter = tp->timer_multiplier; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); } static void tg3_timer_stop(struct tg3 *tp) { del_timer_sync(&tp->timer); } /* Restart hardware after configuration changes, self-test, etc. * Invoked with tp->lock held. */ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) __releases(tp->lock) __acquires(tp->lock) { int err; err = tg3_init_hw(tp, reset_phy); if (err) { netdev_err(tp->dev, "Failed to re-initialize device, aborting\n"); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_full_unlock(tp); tg3_timer_stop(tp); tp->irq_sync = 0; tg3_napi_enable(tp); dev_close(tp->dev); tg3_full_lock(tp, 0); } return err; } static void tg3_reset_task(struct work_struct *work) { struct tg3 *tp = container_of(work, struct tg3, reset_task); int err; tg3_full_lock(tp, 0); if (!netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); return; } tg3_full_unlock(tp); tg3_phy_stop(tp); tg3_netif_stop(tp); tg3_full_lock(tp, 1); if (tg3_flag(tp, TX_RECOVERY_PENDING)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; tp->write32_rx_mbox = tg3_write_flush_reg32; tg3_flag_set(tp, MBOX_WRITE_REORDER); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, 1); if (err) goto out; tg3_netif_start(tp); out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); } static int tg3_request_irq(struct tg3 *tp, int irq_num) { irq_handler_t fn; unsigned long flags; char *name; struct tg3_napi *tnapi = &tp->napi[irq_num]; if (tp->irq_cnt == 1) name = tp->dev->name; else { name = &tnapi->irq_lbl[0]; snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); name[IFNAMSIZ-1] = 0; } if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { fn = tg3_msi; if (tg3_flag(tp, 1SHOT_MSI)) fn = tg3_msi_1shot; flags = 0; } else { fn = tg3_interrupt; if (tg3_flag(tp, TAGGED_STATUS)) fn = tg3_interrupt_tagged; flags = IRQF_SHARED; } return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); } static int tg3_test_interrupt(struct tg3 *tp) { struct tg3_napi *tnapi = &tp->napi[0]; struct net_device *dev = tp->dev; int err, i, intr_ok = 0; u32 val; if (!netif_running(dev)) return -ENODEV; tg3_disable_ints(tp); free_irq(tnapi->irq_vec, tnapi); /* * Turn off MSI one shot mode. Otherwise this test has no * observable way to know whether the interrupt was delivered. */ if (tg3_flag(tp, 57765_PLUS)) { val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } err = request_irq(tnapi->irq_vec, tg3_test_isr, IRQF_SHARED, dev->name, tnapi); if (err) return err; tnapi->hw_status->status &= ~SD_STATUS_UPDATED; tg3_enable_ints(tp); tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | tnapi->coal_now); for (i = 0; i < 5; i++) { u32 int_mbox, misc_host_ctrl; int_mbox = tr32_mailbox(tnapi->int_mbox); misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); if ((int_mbox != 0) || (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { intr_ok = 1; break; } if (tg3_flag(tp, 57765_PLUS) && tnapi->hw_status->status_tag != tnapi->last_tag) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); msleep(10); } tg3_disable_ints(tp); free_irq(tnapi->irq_vec, tnapi); err = tg3_request_irq(tp, 0); if (err) return err; if (intr_ok) { /* Reenable MSI one shot mode. */ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } return 0; } return -EIO; } /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is * successfully restored */ static int tg3_test_msi(struct tg3 *tp) { int err; u16 pci_cmd; if (!tg3_flag(tp, USING_MSI)) return 0; /* Turn off SERR reporting in case MSI terminates with Master * Abort. */ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_SERR); err = tg3_test_interrupt(tp); pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); if (!err) return 0; /* other failures */ if (err != -EIO) return err; /* MSI test failed, go back to INTx mode */ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " "to INTx mode. Please report this failure to the PCI " "maintainer and include system chipset information\n"); free_irq(tp->napi[0].irq_vec, &tp->napi[0]); pci_disable_msi(tp->pdev); tg3_flag_clear(tp, USING_MSI); tp->napi[0].irq_vec = tp->pdev->irq; err = tg3_request_irq(tp, 0); if (err) return err; /* Need to reset the chip because the MSI cycle may have terminated * with Master Abort. */ tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); err = tg3_init_hw(tp, 1); tg3_full_unlock(tp); if (err) free_irq(tp->napi[0].irq_vec, &tp->napi[0]); return err; } static int tg3_request_firmware(struct tg3 *tp) { const __be32 *fw_data; if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", tp->fw_needed); return -ENOENT; } fw_data = (void *)tp->fw->data; /* Firmware blob starts with version numbers, followed by * start address and _full_ length including BSS sections * (which must be longer than the actual data, of course */ tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ if (tp->fw_len < (tp->fw->size - 12)) { netdev_err(tp->dev, "bogus length %d in \"%s\"\n", tp->fw_len, tp->fw_needed); release_firmware(tp->fw); tp->fw = NULL; return -EINVAL; } /* We no longer need firmware; we have it. */ tp->fw_needed = NULL; return 0; } static bool tg3_enable_msix(struct tg3 *tp) { int i, rc; struct msix_entry msix_ent[tp->irq_max]; tp->irq_cnt = num_online_cpus(); if (tp->irq_cnt > 1) { /* We want as many rx rings enabled as there are cpus. * In multiqueue MSI-X mode, the first MSI-X vector * only deals with link interrupts, etc, so we add * one to the number of vectors we are requesting. */ tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max); } for (i = 0; i < tp->irq_max; i++) { msix_ent[i].entry = i; msix_ent[i].vector = 0; } rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); if (rc < 0) { return false; } else if (rc != 0) { if (pci_enable_msix(tp->pdev, msix_ent, rc)) return false; netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", tp->irq_cnt, rc); tp->irq_cnt = rc; } for (i = 0; i < tp->irq_max; i++) tp->napi[i].irq_vec = msix_ent[i].vector; netif_set_real_num_tx_queues(tp->dev, 1); rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; if (netif_set_real_num_rx_queues(tp->dev, rc)) { pci_disable_msix(tp->pdev); return false; } if (tp->irq_cnt > 1) { tg3_flag_set(tp, ENABLE_RSS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { tg3_flag_set(tp, ENABLE_TSS); netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); } } return true; } static void tg3_ints_init(struct tg3 *tp) { if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && !tg3_flag(tp, TAGGED_STATUS)) { /* All MSI supporting chips should support tagged * status. Assert that this is the case. */ netdev_warn(tp->dev, "MSI without TAGGED_STATUS? Not using MSI\n"); goto defcfg; } if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) tg3_flag_set(tp, USING_MSIX); else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) tg3_flag_set(tp, USING_MSI); if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { u32 msi_mode = tr32(MSGINT_MODE); if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) msi_mode |= MSGINT_MODE_MULTIVEC_EN; if (!tg3_flag(tp, 1SHOT_MSI)) msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } defcfg: if (!tg3_flag(tp, USING_MSIX)) { tp->irq_cnt = 1; tp->napi[0].irq_vec = tp->pdev->irq; netif_set_real_num_tx_queues(tp->dev, 1); netif_set_real_num_rx_queues(tp->dev, 1); } } static void tg3_ints_fini(struct tg3 *tp) { if (tg3_flag(tp, USING_MSIX)) pci_disable_msix(tp->pdev); else if (tg3_flag(tp, USING_MSI)) pci_disable_msi(tp->pdev); tg3_flag_clear(tp, USING_MSI); tg3_flag_clear(tp, USING_MSIX); tg3_flag_clear(tp, ENABLE_RSS); tg3_flag_clear(tp, ENABLE_TSS); } static int tg3_open(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); int i, err; if (tp->fw_needed) { err = tg3_request_firmware(tp); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { if (err) return err; } else if (err) { netdev_warn(tp->dev, "TSO capability disabled\n"); tg3_flag_clear(tp, TSO_CAPABLE); } else if (!tg3_flag(tp, TSO_CAPABLE)) { netdev_notice(tp->dev, "TSO capability restored\n"); tg3_flag_set(tp, TSO_CAPABLE); } } netif_carrier_off(tp->dev); err = tg3_power_up(tp); if (err) return err; tg3_full_lock(tp, 0); tg3_disable_ints(tp); tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); /* * Setup interrupts first so we know how * many NAPI resources to allocate */ tg3_ints_init(tp); tg3_rss_check_indir_tbl(tp); /* The placement of this call is tied * to the setup and use of Host TX descriptors. */ err = tg3_alloc_consistent(tp); if (err) goto err_out1; tg3_napi_init(tp); tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { for (i--; i >= 0; i--) { tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } goto err_out2; } } tg3_full_lock(tp, 0); err = tg3_init_hw(tp, 1); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); } tg3_full_unlock(tp); if (err) goto err_out3; if (tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); if (err) { tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); tg3_full_unlock(tp); goto err_out2; } if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { u32 val = tr32(PCIE_TRANSACTION_CFG); tw32(PCIE_TRANSACTION_CFG, val | PCIE_TRANS_CFG_1SHOT_MSI); } } tg3_phy_start(tp); tg3_full_lock(tp, 0); tg3_timer_start(tp); tg3_flag_set(tp, INIT_COMPLETE); tg3_enable_ints(tp); tg3_full_unlock(tp); netif_tx_start_all_queues(dev); /* * Reset loopback feature if it was turned on while the device was down * make sure that it's installed properly now. */ if (dev->features & NETIF_F_LOOPBACK) tg3_set_loopback(dev, dev->features); return 0; err_out3: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } err_out2: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp); err_out1: tg3_ints_fini(tp); tg3_frob_aux_power(tp, false); pci_set_power_state(tp->pdev, PCI_D3hot); return err; } static int tg3_close(struct net_device *dev) { int i; struct tg3 *tp = netdev_priv(dev); tg3_napi_disable(tp); tg3_reset_task_cancel(tp); netif_tx_stop_all_queues(dev); tg3_timer_stop(tp); tg3_phy_stop(tp); tg3_full_lock(tp, 1); tg3_disable_ints(tp); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } tg3_ints_fini(tp); /* Clear stats across close / open calls */ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); tg3_napi_fini(tp); tg3_free_consistent(tp); tg3_power_down(tp); netif_carrier_off(tp->dev); return 0; } static inline u64 get_stat64(tg3_stat64_t *val) { return ((u64)val->high << 32) | ((u64)val->low); } static u64 tg3_calc_crc_errors(struct tg3 *tp) { struct tg3_hw_stats *hw_stats = tp->hw_stats; if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 val; if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { tg3_writephy(tp, MII_TG3_TEST1, val | MII_TG3_TEST1_CRC_EN); tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); } else val = 0; tp->phy_crc_errors += val; return tp->phy_crc_errors; } return get_stat64(&hw_stats->rx_fcs_errors); } #define ESTAT_ADD(member) \ estats->member = old_estats->member + \ get_stat64(&hw_stats->member) static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) { struct tg3_ethtool_stats *old_estats = &tp->estats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; ESTAT_ADD(rx_octets); ESTAT_ADD(rx_fragments); ESTAT_ADD(rx_ucast_packets); ESTAT_ADD(rx_mcast_packets); ESTAT_ADD(rx_bcast_packets); ESTAT_ADD(rx_fcs_errors); ESTAT_ADD(rx_align_errors); ESTAT_ADD(rx_xon_pause_rcvd); ESTAT_ADD(rx_xoff_pause_rcvd); ESTAT_ADD(rx_mac_ctrl_rcvd); ESTAT_ADD(rx_xoff_entered); ESTAT_ADD(rx_frame_too_long_errors); ESTAT_ADD(rx_jabbers); ESTAT_ADD(rx_undersize_packets); ESTAT_ADD(rx_in_length_errors); ESTAT_ADD(rx_out_length_errors); ESTAT_ADD(rx_64_or_less_octet_packets); ESTAT_ADD(rx_65_to_127_octet_packets); ESTAT_ADD(rx_128_to_255_octet_packets); ESTAT_ADD(rx_256_to_511_octet_packets); ESTAT_ADD(rx_512_to_1023_octet_packets); ESTAT_ADD(rx_1024_to_1522_octet_packets); ESTAT_ADD(rx_1523_to_2047_octet_packets); ESTAT_ADD(rx_2048_to_4095_octet_packets); ESTAT_ADD(rx_4096_to_8191_octet_packets); ESTAT_ADD(rx_8192_to_9022_octet_packets); ESTAT_ADD(tx_octets); ESTAT_ADD(tx_collisions); ESTAT_ADD(tx_xon_sent); ESTAT_ADD(tx_xoff_sent); ESTAT_ADD(tx_flow_control); ESTAT_ADD(tx_mac_errors); ESTAT_ADD(tx_single_collisions); ESTAT_ADD(tx_mult_collisions); ESTAT_ADD(tx_deferred); ESTAT_ADD(tx_excessive_collisions); ESTAT_ADD(tx_late_collisions); ESTAT_ADD(tx_collide_2times); ESTAT_ADD(tx_collide_3times); ESTAT_ADD(tx_collide_4times); ESTAT_ADD(tx_collide_5times); ESTAT_ADD(tx_collide_6times); ESTAT_ADD(tx_collide_7times); ESTAT_ADD(tx_collide_8times); ESTAT_ADD(tx_collide_9times); ESTAT_ADD(tx_collide_10times); ESTAT_ADD(tx_collide_11times); ESTAT_ADD(tx_collide_12times); ESTAT_ADD(tx_collide_13times); ESTAT_ADD(tx_collide_14times); ESTAT_ADD(tx_collide_15times); ESTAT_ADD(tx_ucast_packets); ESTAT_ADD(tx_mcast_packets); ESTAT_ADD(tx_bcast_packets); ESTAT_ADD(tx_carrier_sense_errors); ESTAT_ADD(tx_discards); ESTAT_ADD(tx_errors); ESTAT_ADD(dma_writeq_full); ESTAT_ADD(dma_write_prioq_full); ESTAT_ADD(rxbds_empty); ESTAT_ADD(rx_discards); ESTAT_ADD(rx_errors); ESTAT_ADD(rx_threshold_hit); ESTAT_ADD(dma_readq_full); ESTAT_ADD(dma_read_prioq_full); ESTAT_ADD(tx_comp_queue_full); ESTAT_ADD(ring_set_send_prod_index); ESTAT_ADD(ring_status_update); ESTAT_ADD(nic_irqs); ESTAT_ADD(nic_avoided_irqs); ESTAT_ADD(nic_tx_threshold_hit); ESTAT_ADD(mbuf_lwm_thresh_hit); } static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) { struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; stats->rx_packets = old_stats->rx_packets + get_stat64(&hw_stats->rx_ucast_packets) + get_stat64(&hw_stats->rx_mcast_packets) + get_stat64(&hw_stats->rx_bcast_packets); stats->tx_packets = old_stats->tx_packets + get_stat64(&hw_stats->tx_ucast_packets) + get_stat64(&hw_stats->tx_mcast_packets) + get_stat64(&hw_stats->tx_bcast_packets); stats->rx_bytes = old_stats->rx_bytes + get_stat64(&hw_stats->rx_octets); stats->tx_bytes = old_stats->tx_bytes + get_stat64(&hw_stats->tx_octets); stats->rx_errors = old_stats->rx_errors + get_stat64(&hw_stats->rx_errors); stats->tx_errors = old_stats->tx_errors + get_stat64(&hw_stats->tx_errors) + get_stat64(&hw_stats->tx_mac_errors) + get_stat64(&hw_stats->tx_carrier_sense_errors) + get_stat64(&hw_stats->tx_discards); stats->multicast = old_stats->multicast + get_stat64(&hw_stats->rx_mcast_packets); stats->collisions = old_stats->collisions + get_stat64(&hw_stats->tx_collisions); stats->rx_length_errors = old_stats->rx_length_errors + get_stat64(&hw_stats->rx_frame_too_long_errors) + get_stat64(&hw_stats->rx_undersize_packets); stats->rx_over_errors = old_stats->rx_over_errors + get_stat64(&hw_stats->rxbds_empty); stats->rx_frame_errors = old_stats->rx_frame_errors + get_stat64(&hw_stats->rx_align_errors); stats->tx_aborted_errors = old_stats->tx_aborted_errors + get_stat64(&hw_stats->tx_discards); stats->tx_carrier_errors = old_stats->tx_carrier_errors + get_stat64(&hw_stats->tx_carrier_sense_errors); stats->rx_crc_errors = old_stats->rx_crc_errors + tg3_calc_crc_errors(tp); stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); stats->rx_dropped = tp->rx_dropped; stats->tx_dropped = tp->tx_dropped; } static int tg3_get_regs_len(struct net_device *dev) { return TG3_REG_BLK_SIZE; } static void tg3_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { struct tg3 *tp = netdev_priv(dev); regs->version = 0; memset(_p, 0, TG3_REG_BLK_SIZE); if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return; tg3_full_lock(tp, 0); tg3_dump_legacy_regs(tp, (u32 *)_p); tg3_full_unlock(tp); } static int tg3_get_eeprom_len(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); return tp->nvram_size; } static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct tg3 *tp = netdev_priv(dev); int ret; u8 *pd; u32 i, offset, len, b_offset, b_count; __be32 val; if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; offset = eeprom->offset; len = eeprom->len; eeprom->len = 0; eeprom->magic = TG3_EEPROM_MAGIC; if (offset & 3) { /* adjustments to start on required 4 byte boundary */ b_offset = offset & 3; b_count = 4 - b_offset; if (b_count > len) { /* i.e. offset=1 len=2 */ b_count = len; } ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); if (ret) return ret; memcpy(data, ((char *)&val) + b_offset, b_count); len -= b_count; offset += b_count; eeprom->len += b_count; } /* read bytes up to the last 4 byte boundary */ pd = &data[eeprom->len]; for (i = 0; i < (len - (len & 3)); i += 4) { ret = tg3_nvram_read_be32(tp, offset + i, &val); if (ret) { eeprom->len += i; return ret; } memcpy(pd + i, &val, 4); } eeprom->len += i; if (len & 3) { /* read last bytes not ending on 4 byte boundary */ pd = &data[eeprom->len]; b_count = len & 3; b_offset = offset + len - b_count; ret = tg3_nvram_read_be32(tp, b_offset, &val); if (ret) return ret; memcpy(pd, &val, b_count); eeprom->len += b_count; } return 0; } static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct tg3 *tp = netdev_priv(dev); int ret; u32 offset, len, b_offset, odd_len; u8 *buf; __be32 start, end; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; offset = eeprom->offset; len = eeprom->len; if ((b_offset = (offset & 3))) { /* adjustments to start on required 4 byte boundary */ ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); if (ret) return ret; len += b_offset; offset &= ~3; if (len < 4) len = 4; } odd_len = 0; if (len & 3) { /* adjustments to end on required 4 byte boundary */ odd_len = 1; len = (len + 3) & ~3; ret = tg3_nvram_read_be32(tp, offset+len-4, &end); if (ret) return ret; } buf = data; if (b_offset || odd_len) { buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; if (b_offset) memcpy(buf, &start, 4); if (odd_len) memcpy(buf+len-4, &end, 4); memcpy(buf + b_offset, data, eeprom->len); } ret = tg3_nvram_write_block(tp, offset, len, buf); if (buf != data) kfree(buf); return ret; } static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_gset(phydev, cmd); } cmd->supported = (SUPPORTED_Autoneg); if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) cmd->supported |= (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { cmd->supported |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP); cmd->port = PORT_TP; } else { cmd->supported |= SUPPORTED_FIBRE; cmd->port = PORT_FIBRE; } cmd->advertising = tp->link_config.advertising; if (tg3_flag(tp, PAUSE_AUTONEG)) { if (tp->link_config.flowctrl & FLOW_CTRL_RX) { if (tp->link_config.flowctrl & FLOW_CTRL_TX) { cmd->advertising |= ADVERTISED_Pause; } else { cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; } } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { cmd->advertising |= ADVERTISED_Asym_Pause; } } if (netif_running(dev) && netif_carrier_ok(dev)) { ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; cmd->lp_advertising = tp->link_config.rmt_adv; if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) cmd->eth_tp_mdix = ETH_TP_MDI_X; else cmd->eth_tp_mdix = ETH_TP_MDI; } } else { ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); cmd->duplex = DUPLEX_UNKNOWN; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = tp->link_config.autoneg; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); u32 speed = ethtool_cmd_speed(cmd); if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_sset(phydev, cmd); } if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE && cmd->duplex != DUPLEX_FULL && cmd->duplex != DUPLEX_HALF) return -EINVAL; if (cmd->autoneg == AUTONEG_ENABLE) { u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause | ADVERTISED_Asym_Pause; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) mask |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) mask |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_TP; else mask |= ADVERTISED_FIBRE; if (cmd->advertising & ~mask) return -EINVAL; mask &= (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); cmd->advertising &= mask; } else { if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { if (speed != SPEED_1000) return -EINVAL; if (cmd->duplex != DUPLEX_FULL) return -EINVAL; } else { if (speed != SPEED_100 && speed != SPEED_10) return -EINVAL; } } tg3_full_lock(tp, 0); tp->link_config.autoneg = cmd->autoneg; if (cmd->autoneg == AUTONEG_ENABLE) { tp->link_config.advertising = (cmd->advertising | ADVERTISED_Autoneg); tp->link_config.speed = SPEED_UNKNOWN; tp->link_config.duplex = DUPLEX_UNKNOWN; } else { tp->link_config.advertising = 0; tp->link_config.speed = speed; tp->link_config.duplex = cmd->duplex; } if (netif_running(dev)) tg3_setup_phy(tp, 1); tg3_full_unlock(tp); return 0; } static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tg3 *tp = netdev_priv(dev); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); } static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) wol->supported = WAKE_MAGIC; else wol->supported = 0; wol->wolopts = 0; if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) wol->wolopts = WAKE_MAGIC; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); struct device *dp = &tp->pdev->dev; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if ((wol->wolopts & WAKE_MAGIC) && !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) return -EINVAL; device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); spin_lock_bh(&tp->lock); if (device_may_wakeup(dp)) tg3_flag_set(tp, WOL_ENABLE); else tg3_flag_clear(tp, WOL_ENABLE); spin_unlock_bh(&tp->lock); return 0; } static u32 tg3_get_msglevel(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); return tp->msg_enable; } static void tg3_set_msglevel(struct net_device *dev, u32 value) { struct tg3 *tp = netdev_priv(dev); tp->msg_enable = value; } static int tg3_nway_reset(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); int r; if (!netif_running(dev)) return -EAGAIN; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) return -EINVAL; if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } else { u32 bmcr; spin_lock_bh(&tp->lock); r = -EINVAL; tg3_readphy(tp, MII_BMCR, &bmcr); if (!tg3_readphy(tp, MII_BMCR, &bmcr) && ((bmcr & BMCR_ANENABLE) || (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); r = 0; } spin_unlock_bh(&tp->lock); } return r; } static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); ering->rx_max_pending = tp->rx_std_ring_mask; if (tg3_flag(tp, JUMBO_RING_ENABLE)) ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; else ering->rx_jumbo_max_pending = 0; ering->tx_max_pending = TG3_TX_RING_SIZE - 1; ering->rx_pending = tp->rx_pending; if (tg3_flag(tp, JUMBO_RING_ENABLE)) ering->rx_jumbo_pending = tp->rx_jumbo_pending; else ering->rx_jumbo_pending = 0; ering->tx_pending = tp->napi[0].tx_pending; } static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; if ((ering->rx_pending > tp->rx_std_ring_mask) || (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || (ering->tx_pending > TG3_TX_RING_SIZE - 1) || (ering->tx_pending <= MAX_SKB_FRAGS) || (tg3_flag(tp, TSO_BUG) && (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) return -EINVAL; if (netif_running(dev)) { tg3_phy_stop(tp); tg3_netif_stop(tp); irq_sync = 1; } tg3_full_lock(tp, irq_sync); tp->rx_pending = ering->rx_pending; if (tg3_flag(tp, MAX_RXPEND_64) && tp->rx_pending > 63) tp->rx_pending = 63; tp->rx_jumbo_pending = ering->rx_jumbo_pending; for (i = 0; i < tp->irq_max; i++) tp->napi[i].tx_pending = ering->tx_pending; if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); err = tg3_restart_hw(tp, 1); if (!err) tg3_netif_start(tp); } tg3_full_unlock(tp); if (irq_sync && !err) tg3_phy_start(tp); return err; } static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); if (tp->link_config.flowctrl & FLOW_CTRL_RX) epause->rx_pause = 1; else epause->rx_pause = 0; if (tp->link_config.flowctrl & FLOW_CTRL_TX) epause->tx_pause = 1; else epause->tx_pause = 0; } static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); int err = 0; if (tg3_flag(tp, USE_PHYLIB)) { u32 newadv; struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && (epause->rx_pause != epause->tx_pause))) return -EINVAL; tp->link_config.flowctrl = 0; if (epause->rx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_RX; if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; newadv = ADVERTISED_Pause; } else newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; } else if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; newadv = ADVERTISED_Asym_Pause; } else newadv = 0; if (epause->autoneg) tg3_flag_set(tp, PAUSE_AUTONEG); else tg3_flag_clear(tp, PAUSE_AUTONEG); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { u32 oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (oldadv != newadv) { phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); phydev->advertising |= newadv; if (phydev->autoneg) { /* * Always renegotiate the link to * inform our link partner of our * flow control settings, even if the * flow control is forced. Let * tg3_adjust_link() do the final * flow control setup. */ return phy_start_aneg(phydev); } } if (!epause->autoneg) tg3_setup_flow_control(tp, 0, 0); } else { tp->link_config.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); tp->link_config.advertising |= newadv; } } else { int irq_sync = 0; if (netif_running(dev)) { tg3_netif_stop(tp); irq_sync = 1; } tg3_full_lock(tp, irq_sync); if (epause->autoneg) tg3_flag_set(tp, PAUSE_AUTONEG); else tg3_flag_clear(tp, PAUSE_AUTONEG); if (epause->rx_pause) tp->link_config.flowctrl |= FLOW_CTRL_RX; else tp->link_config.flowctrl &= ~FLOW_CTRL_RX; if (epause->tx_pause) tp->link_config.flowctrl |= FLOW_CTRL_TX; else tp->link_config.flowctrl &= ~FLOW_CTRL_TX; if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); err = tg3_restart_hw(tp, 1); if (!err) tg3_netif_start(tp); } tg3_full_unlock(tp); } return err; } static int tg3_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: return TG3_NUM_TEST; case ETH_SS_STATS: return TG3_NUM_STATS; default: return -EOPNOTSUPP; } } static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { struct tg3 *tp = netdev_priv(dev); if (!tg3_flag(tp, SUPPORT_MSIX)) return -EOPNOTSUPP; switch (info->cmd) { case ETHTOOL_GRXRINGS: if (netif_running(tp->dev)) info->data = tp->irq_cnt; else { info->data = num_online_cpus(); if (info->data > TG3_IRQ_MAX_VECS_RSS) info->data = TG3_IRQ_MAX_VECS_RSS; } /* The first interrupt vector only * handles link interrupts. */ info->data -= 1; return 0; default: return -EOPNOTSUPP; } } static u32 tg3_get_rxfh_indir_size(struct net_device *dev) { u32 size = 0; struct tg3 *tp = netdev_priv(dev); if (tg3_flag(tp, SUPPORT_MSIX)) size = TG3_RSS_INDIR_TBL_SIZE; return size; } static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) { struct tg3 *tp = netdev_priv(dev); int i; for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) indir[i] = tp->rss_ind_tbl[i]; return 0; } static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) { struct tg3 *tp = netdev_priv(dev); size_t i; for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) tp->rss_ind_tbl[i] = indir[i]; if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) return 0; /* It is legal to write the indirection * table while the device is running. */ tg3_full_lock(tp, 0); tg3_rss_write_indir_tbl(tp); tg3_full_unlock(tp); return 0; } static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; case ETH_SS_TEST: memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys)); break; default: WARN_ON(1); /* we need a WARN() */ break; } } static int tg3_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct tg3 *tp = netdev_priv(dev); if (!netif_running(tp->dev)) return -EAGAIN; switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON | LED_CTRL_10MBPS_ON | LED_CTRL_TRAFFIC_OVERRIDE | LED_CTRL_TRAFFIC_BLINK | LED_CTRL_TRAFFIC_LED); break; case ETHTOOL_ID_OFF: tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | LED_CTRL_TRAFFIC_OVERRIDE); break; case ETHTOOL_ID_INACTIVE: tw32(MAC_LED_CTRL, tp->led_ctrl); break; } return 0; } static void tg3_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct tg3 *tp = netdev_priv(dev); if (tp->hw_stats) tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); else memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); } static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) { int i; __be32 *buf; u32 offset = 0, len = 0; u32 magic, val; if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) return NULL; if (magic == TG3_EEPROM_MAGIC) { for (offset = TG3_NVM_DIR_START; offset < TG3_NVM_DIR_END; offset += TG3_NVM_DIRENT_SIZE) { if (tg3_nvram_read(tp, offset, &val)) return NULL; if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_EXTVPD) break; } if (offset != TG3_NVM_DIR_END) { len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; if (tg3_nvram_read(tp, offset + 4, &offset)) return NULL; offset = tg3_nvram_logical_addr(tp, offset); } } if (!offset || !len) { offset = TG3_NVM_VPD_OFF; len = TG3_NVM_VPD_LEN; } buf = kmalloc(len, GFP_KERNEL); if (buf == NULL) return NULL; if (magic == TG3_EEPROM_MAGIC) { for (i = 0; i < len; i += 4) { /* The data is in little-endian format in NVRAM. * Use the big-endian read routines to preserve * the byte order as it exists in NVRAM. */ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) goto error; } } else { u8 *ptr; ssize_t cnt; unsigned int pos = 0; ptr = (u8 *)&buf[0]; for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { cnt = pci_read_vpd(tp->pdev, pos, len - pos, ptr); if (cnt == -ETIMEDOUT || cnt == -EINTR) cnt = 0; else if (cnt < 0) goto error; } if (pos != len) goto error; } *vpdlen = len; return buf; error: kfree(buf); return NULL; } #define NVRAM_TEST_SIZE 0x100 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 #define NVRAM_SELFBOOT_HW_SIZE 0x20 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c static int tg3_test_nvram(struct tg3 *tp) { u32 csum, magic, len; __be32 *buf; int i, j, k, err = 0, size; if (tg3_flag(tp, NO_NVRAM)) return 0; if (tg3_nvram_read(tp, 0, &magic) != 0) return -EIO; if (magic == TG3_EEPROM_MAGIC) size = NVRAM_TEST_SIZE; else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == TG3_EEPROM_SB_FORMAT_1) { switch (magic & TG3_EEPROM_SB_REVISION_MASK) { case TG3_EEPROM_SB_REVISION_0: size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; break; case TG3_EEPROM_SB_REVISION_2: size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; break; case TG3_EEPROM_SB_REVISION_3: size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; break; case TG3_EEPROM_SB_REVISION_4: size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; break; case TG3_EEPROM_SB_REVISION_5: size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; break; case TG3_EEPROM_SB_REVISION_6: size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; break; default: return -EIO; } } else return 0; } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) size = NVRAM_SELFBOOT_HW_SIZE; else return -EIO; buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; err = -EIO; for (i = 0, j = 0; i < size; i += 4, j++) { err = tg3_nvram_read_be32(tp, i, &buf[j]); if (err) break; } if (i < size) goto out; /* Selfboot format */ magic = be32_to_cpu(buf[0]); if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { u8 *buf8 = (u8 *) buf, csum8 = 0; if ((magic & TG3_EEPROM_SB_REVISION_MASK) == TG3_EEPROM_SB_REVISION_2) { /* For rev 2, the csum doesn't include the MBA. */ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) csum8 += buf8[i]; for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) csum8 += buf8[i]; } else { for (i = 0; i < size; i++) csum8 += buf8[i]; } if (csum8 == 0) { err = 0; goto out; } err = -EIO; goto out; } if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) { u8 data[NVRAM_SELFBOOT_DATA_SIZE]; u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; u8 *buf8 = (u8 *) buf; /* Separate the parity bits and the data bytes. */ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { if ((i == 0) || (i == 8)) { int l; u8 msk; for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) parity[k++] = buf8[i] & msk; i++; } else if (i == 16) { int l; u8 msk; for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) parity[k++] = buf8[i] & msk; i++; for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) parity[k++] = buf8[i] & msk; i++; } data[j++] = buf8[i]; } err = -EIO; for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { u8 hw8 = hweight8(data[i]); if ((hw8 & 0x1) && parity[i]) goto out; else if (!(hw8 & 0x1) && !parity[i]) goto out; } err = 0; goto out; } err = -EIO; /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); if (csum != le32_to_cpu(buf[0x10/4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); if (csum != le32_to_cpu(buf[0xfc/4])) goto out; kfree(buf); buf = tg3_vpd_readblock(tp, &len); if (!buf) return -ENOMEM; i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); if (i > 0) { j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); if (j < 0) goto out; if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) goto out; i += PCI_VPD_LRDT_TAG_SIZE; j = pci_vpd_find_info_keyword((u8 *)buf, i, j, PCI_VPD_RO_KEYWORD_CHKSUM); if (j > 0) { u8 csum8 = 0; j += PCI_VPD_INFO_FLD_HDR_SIZE; for (i = 0; i <= j; i++) csum8 += ((u8 *)buf)[i]; if (csum8) goto out; } } err = 0; out: kfree(buf); return err; } #define TG3_SERDES_TIMEOUT_SEC 2 #define TG3_COPPER_TIMEOUT_SEC 6 static int tg3_test_link(struct tg3 *tp) { int i, max; if (!netif_running(tp->dev)) return -ENODEV; if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) max = TG3_SERDES_TIMEOUT_SEC; else max = TG3_COPPER_TIMEOUT_SEC; for (i = 0; i < max; i++) { if (netif_carrier_ok(tp->dev)) return 0; if (msleep_interruptible(1000)) break; } return -EIO; } /* Only test the commonly used registers */ static int tg3_test_registers(struct tg3 *tp) { int i, is_5705, is_5750; u32 offset, read_mask, write_mask, val, save_val, read_val; static struct { u16 offset; u16 flags; #define TG3_FL_5705 0x1 #define TG3_FL_NOT_5705 0x2 #define TG3_FL_NOT_5788 0x4 #define TG3_FL_NOT_5750 0x8 u32 read_mask; u32 write_mask; } reg_tbl[] = { /* MAC Control Registers */ { MAC_MODE, TG3_FL_NOT_5705, 0x00000000, 0x00ef6f8c }, { MAC_MODE, TG3_FL_5705, 0x00000000, 0x01ef6b8c }, { MAC_STATUS, TG3_FL_NOT_5705, 0x03800107, 0x00000000 }, { MAC_STATUS, TG3_FL_5705, 0x03800100, 0x00000000 }, { MAC_ADDR_0_HIGH, 0x0000, 0x00000000, 0x0000ffff }, { MAC_ADDR_0_LOW, 0x0000, 0x00000000, 0xffffffff }, { MAC_RX_MTU_SIZE, 0x0000, 0x00000000, 0x0000ffff }, { MAC_TX_MODE, 0x0000, 0x00000000, 0x00000070 }, { MAC_TX_LENGTHS, 0x0000, 0x00000000, 0x00003fff }, { MAC_RX_MODE, TG3_FL_NOT_5705, 0x00000000, 0x000007fc }, { MAC_RX_MODE, TG3_FL_5705, 0x00000000, 0x000007dc }, { MAC_HASH_REG_0, 0x0000, 0x00000000, 0xffffffff }, { MAC_HASH_REG_1, 0x0000, 0x00000000, 0xffffffff }, { MAC_HASH_REG_2, 0x0000, 0x00000000, 0xffffffff }, { MAC_HASH_REG_3, 0x0000, 0x00000000, 0xffffffff }, /* Receive Data and Receive BD Initiator Control Registers. */ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 0x00000000, 0x00000003 }, { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { RCVDBDI_STD_BD+0, 0x0000, 0x00000000, 0xffffffff }, { RCVDBDI_STD_BD+4, 0x0000, 0x00000000, 0xffffffff }, { RCVDBDI_STD_BD+8, 0x0000, 0x00000000, 0xffff0002 }, { RCVDBDI_STD_BD+0xc, 0x0000, 0x00000000, 0xffffffff }, /* Receive BD Initiator Control Registers. */ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { RCVBDI_STD_THRESH, TG3_FL_5705, 0x00000000, 0x000003ff }, { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, /* Host Coalescing Control Registers. */ { HOSTCC_MODE, TG3_FL_NOT_5705, 0x00000000, 0x00000004 }, { HOSTCC_MODE, TG3_FL_5705, 0x00000000, 0x000000f6 }, { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 0x00000000, 0x000003ff }, { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 0x00000000, 0x000003ff }, { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 0x00000000, 0x000000ff }, { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 0x00000000, 0x000000ff }, { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 0x00000000, 0x000000ff }, { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 0x00000000, 0x000000ff }, { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 0x00000000, 0xffffffff }, { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 0x00000000, 0xffffffff }, { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 0xffffffff, 0x00000000 }, { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 0xffffffff, 0x00000000 }, /* Buffer Manager Control Registers. */ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 0x00000000, 0x007fff80 }, { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 0x00000000, 0x007fffff }, { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 0x00000000, 0x0000003f }, { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 0x00000000, 0x000001ff }, { BUFMGR_MB_HIGH_WATER, 0x0000, 0x00000000, 0x000001ff }, { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 0xffffffff, 0x00000000 }, { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 0xffffffff, 0x00000000 }, /* Mailbox Registers */ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 0x00000000, 0x000001ff }, { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 0x00000000, 0x000001ff }, { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 0x00000000, 0x000007ff }, { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 0x00000000, 0x000001ff }, { 0xffff, 0x0000, 0x00000000, 0x00000000 }, }; is_5705 = is_5750 = 0; if (tg3_flag(tp, 5705_PLUS)) { is_5705 = 1; if (tg3_flag(tp, 5750_PLUS)) is_5750 = 1; } for (i = 0; reg_tbl[i].offset != 0xffff; i++) { if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) continue; if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) continue; if (tg3_flag(tp, IS_5788) && (reg_tbl[i].flags & TG3_FL_NOT_5788)) continue; if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) continue; offset = (u32) reg_tbl[i].offset; read_mask = reg_tbl[i].read_mask; write_mask = reg_tbl[i].write_mask; /* Save the original register content */ save_val = tr32(offset); /* Determine the read-only value. */ read_val = save_val & read_mask; /* Write zero to the register, then make sure the read-only bits * are not changed and the read/write bits are all zeros. */ tw32(offset, 0); val = tr32(offset); /* Test the read-only and read/write bits. */ if (((val & read_mask) != read_val) || (val & write_mask)) goto out; /* Write ones to all the bits defined by RdMask and WrMask, then * make sure the read-only bits are not changed and the * read/write bits are all ones. */ tw32(offset, read_mask | write_mask); val = tr32(offset); /* Test the read-only bits. */ if ((val & read_mask) != read_val) goto out; /* Test the read/write bits. */ if ((val & write_mask) != write_mask) goto out; tw32(offset, save_val); } return 0; out: if (netif_msg_hw(tp)) netdev_err(tp->dev, "Register test failed at offset %x\n", offset); tw32(offset, save_val); return -EIO; } static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) { static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; int i; u32 j; for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { for (j = 0; j < len; j += 4) { u32 val; tg3_write_mem(tp, offset + j, test_pattern[i]); tg3_read_mem(tp, offset + j, &val); if (val != test_pattern[i]) return -EIO; } } return 0; } static int tg3_test_memory(struct tg3 *tp) { static struct mem_entry { u32 offset; u32 len; } mem_tbl_570x[] = { { 0x00000000, 0x00b50}, { 0x00002000, 0x1c000}, { 0xffffffff, 0x00000} }, mem_tbl_5705[] = { { 0x00000100, 0x0000c}, { 0x00000200, 0x00008}, { 0x00004000, 0x00800}, { 0x00006000, 0x01000}, { 0x00008000, 0x02000}, { 0x00010000, 0x0e000}, { 0xffffffff, 0x00000} }, mem_tbl_5755[] = { { 0x00000200, 0x00008}, { 0x00004000, 0x00800}, { 0x00006000, 0x00800}, { 0x00008000, 0x02000}, { 0x00010000, 0x0c000}, { 0xffffffff, 0x00000} }, mem_tbl_5906[] = { { 0x00000200, 0x00008}, { 0x00004000, 0x00400}, { 0x00006000, 0x00400}, { 0x00008000, 0x01000}, { 0x00010000, 0x01000}, { 0xffffffff, 0x00000} }, mem_tbl_5717[] = { { 0x00000200, 0x00008}, { 0x00010000, 0x0a000}, { 0x00020000, 0x13c00}, { 0xffffffff, 0x00000} }, mem_tbl_57765[] = { { 0x00000200, 0x00008}, { 0x00004000, 0x00800}, { 0x00006000, 0x09800}, { 0x00010000, 0x0a000}, { 0xffffffff, 0x00000} }; struct mem_entry *mem_tbl; int err = 0; int i; if (tg3_flag(tp, 5717_PLUS)) mem_tbl = mem_tbl_5717; else if (tg3_flag(tp, 57765_CLASS)) mem_tbl = mem_tbl_57765; else if (tg3_flag(tp, 5755_PLUS)) mem_tbl = mem_tbl_5755; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; else if (tg3_flag(tp, 5705_PLUS)) mem_tbl = mem_tbl_5705; else mem_tbl = mem_tbl_570x; for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); if (err) break; } return err; } #define TG3_TSO_MSS 500 #define TG3_TSO_IP_HDR_LEN 20 #define TG3_TSO_TCP_HDR_LEN 20 #define TG3_TSO_TCP_OPT_LEN 12 static const u8 tg3_tso_header[] = { 0x08, 0x00, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x0a, 0x00, 0x00, 0x02, 0x0d, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x80, 0x10, 0x10, 0x00, 0x14, 0x09, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }; static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) { u32 rx_start_idx, rx_idx, tx_idx, opaque_key; u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; u32 budget; struct sk_buff *skb; u8 *tx_data, *rx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; struct tg3_napi *tnapi, *rnapi; struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; tnapi = &tp->napi[0]; rnapi = &tp->napi[0]; if (tp->irq_cnt > 1) { if (tg3_flag(tp, ENABLE_RSS)) rnapi = &tp->napi[1]; if (tg3_flag(tp, ENABLE_TSS)) tnapi = &tp->napi[1]; } coal_now = tnapi->coal_now | rnapi->coal_now; err = -EIO; tx_len = pktsz; skb = netdev_alloc_skb(tp->dev, tx_len); if (!skb) return -ENOMEM; tx_data = skb_put(skb, tx_len); memcpy(tx_data, tp->dev->dev_addr, 6); memset(tx_data + 6, 0x0, 8); tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); if (tso_loopback) { struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + TG3_TSO_TCP_OPT_LEN; memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, sizeof(tg3_tso_header)); mss = TG3_TSO_MSS; val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); /* Set the total length field in the IP header */ iph->tot_len = htons((u16)(mss + hdr_len)); base_flags = (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { struct tcphdr *th; val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; th = (struct tcphdr *)&tx_data[val]; th->check = 0; } else base_flags |= TXD_FLAG_TCPUDP_CSUM; if (tg3_flag(tp, HW_TSO_3)) { mss |= (hdr_len & 0xc) << 12; if (hdr_len & 0x10) base_flags |= 0x00000010; base_flags |= (hdr_len & 0x3e0) << 5; } else if (tg3_flag(tp, HW_TSO_2)) mss |= hdr_len << 9; else if (tg3_flag(tp, HW_TSO_1) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { mss |= (TG3_TSO_TCP_OPT_LEN << 9); } else { base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); } data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); } else { num_pkts = 1; data_off = ETH_HLEN; if (tg3_flag(tp, USE_JUMBO_BDFLAG) && tx_len > VLAN_ETH_FRAME_LEN) base_flags |= TXD_FLAG_JMB_PKT; } for (i = data_off; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff); map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(tp->pdev, map)) { dev_kfree_skb(skb); return -EIO; } val = tnapi->tx_prod; tnapi->tx_buffers[val].skb = skb; dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | rnapi->coal_now); udelay(10); rx_start_idx = rnapi->hw_status->idx[0].rx_producer; budget = tg3_tx_avail(tnapi); if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, base_flags | TXD_FLAG_END, mss, 0)) { tnapi->tx_buffers[val].skb = NULL; dev_kfree_skb(skb); return -EIO; } tnapi->tx_prod++; /* Sync BD data before updating mailbox */ wmb(); tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); tr32_mailbox(tnapi->prodmbox); udelay(10); /* 350 usec to allow enough time on some 10/100 Mbps devices. */ for (i = 0; i < 35; i++) { tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | coal_now); udelay(10); tx_idx = tnapi->hw_status->idx[0].tx_consumer; rx_idx = rnapi->hw_status->idx[0].rx_producer; if ((tx_idx == tnapi->tx_prod) && (rx_idx == (rx_start_idx + num_pkts))) break; } tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); dev_kfree_skb(skb); if (tx_idx != tnapi->tx_prod) goto out; if (rx_idx != rx_start_idx + num_pkts) goto out; val = data_off; while (rx_idx != rx_start_idx) { desc = &rnapi->rx_rcb[rx_start_idx++]; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) goto out; rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (!tso_loopback) { if (rx_len != tx_len) goto out; if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { if (opaque_key != RXD_OPAQUE_RING_STD) goto out; } else { if (opaque_key != RXD_OPAQUE_RING_JUMBO) goto out; } } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) >> RXD_TCPCSUM_SHIFT != 0xffff) { goto out; } if (opaque_key == RXD_OPAQUE_RING_STD) { rx_data = tpr->rx_std_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { rx_data = tpr->rx_jmb_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], mapping); } else goto out; pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { if (*(rx_data + i) != (u8) (val & 0xff)) goto out; } } err = 0; /* tg3_free_rings will unmap and free the rx_data */ out: return err; } #define TG3_STD_LOOPBACK_FAILED 1 #define TG3_JMB_LOOPBACK_FAILED 2 #define TG3_TSO_LOOPBACK_FAILED 4 #define TG3_LOOPBACK_FAILED \ (TG3_STD_LOOPBACK_FAILED | \ TG3_JMB_LOOPBACK_FAILED | \ TG3_TSO_LOOPBACK_FAILED) static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) { int err = -EIO; u32 eee_cap; u32 jmb_pkt_sz = 9000; if (tp->dma_limit) jmb_pkt_sz = tp->dma_limit - ETH_HLEN; eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; if (!netif_running(tp->dev)) { data[0] = TG3_LOOPBACK_FAILED; data[1] = TG3_LOOPBACK_FAILED; if (do_extlpbk) data[2] = TG3_LOOPBACK_FAILED; goto done; } err = tg3_reset_hw(tp, 1); if (err) { data[0] = TG3_LOOPBACK_FAILED; data[1] = TG3_LOOPBACK_FAILED; if (do_extlpbk) data[2] = TG3_LOOPBACK_FAILED; goto done; } if (tg3_flag(tp, ENABLE_RSS)) { int i; /* Reroute all rx packets to the 1st queue */ for (i = MAC_RSS_INDIR_TBL_0; i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) tw32(i, 0x0); } /* HW errata - mac loopback fails in some cases on 5780. * Normal traffic and PHY loopback are not affected by * errata. Also, the MAC loopback test is deprecated for * all newer ASIC revisions. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && !tg3_flag(tp, CPMU_PRESENT)) { tg3_mac_loopback(tp, true); if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) data[0] |= TG3_STD_LOOPBACK_FAILED; if (tg3_flag(tp, JUMBO_RING_ENABLE) && tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) data[0] |= TG3_JMB_LOOPBACK_FAILED; tg3_mac_loopback(tp, false); } if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && !tg3_flag(tp, USE_PHYLIB)) { int i; tg3_phy_lpbk_set(tp, 0, false); /* Wait for link */ for (i = 0; i < 100; i++) { if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) break; mdelay(1); } if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) data[1] |= TG3_STD_LOOPBACK_FAILED; if (tg3_flag(tp, TSO_CAPABLE) && tg3_run_loopback(tp, ETH_FRAME_LEN, true)) data[1] |= TG3_TSO_LOOPBACK_FAILED; if (tg3_flag(tp, JUMBO_RING_ENABLE) && tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) data[1] |= TG3_JMB_LOOPBACK_FAILED; if (do_extlpbk) { tg3_phy_lpbk_set(tp, 0, true); /* All link indications report up, but the hardware * isn't really ready for about 20 msec. Double it * to be sure. */ mdelay(40); if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) data[2] |= TG3_STD_LOOPBACK_FAILED; if (tg3_flag(tp, TSO_CAPABLE) && tg3_run_loopback(tp, ETH_FRAME_LEN, true)) data[2] |= TG3_TSO_LOOPBACK_FAILED; if (tg3_flag(tp, JUMBO_RING_ENABLE) && tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) data[2] |= TG3_JMB_LOOPBACK_FAILED; } /* Re-enable gphy autopowerdown. */ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); } err = (data[0] | data[1] | data[2]) ? -EIO : 0; done: tp->phy_flags |= eee_cap; return err; } static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *data) { struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && tg3_power_up(tp)) { etest->flags |= ETH_TEST_FL_FAILED; memset(data, 1, sizeof(u64) * TG3_NUM_TEST); return; } memset(data, 0, sizeof(u64) * TG3_NUM_TEST); if (tg3_test_nvram(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[0] = 1; } if (!doextlpbk && tg3_test_link(tp)) { etest->flags |= ETH_TEST_FL_FAILED; data[1] = 1; } if (etest->flags & ETH_TEST_FL_OFFLINE) { int err, err2 = 0, irq_sync = 0; if (netif_running(dev)) { tg3_phy_stop(tp); tg3_netif_stop(tp); irq_sync = 1; } tg3_full_lock(tp, irq_sync); tg3_halt(tp, RESET_KIND_SUSPEND, 1); err = tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); if (!tg3_flag(tp, 5705_PLUS)) tg3_halt_cpu(tp, TX_CPU_BASE); if (!err) tg3_nvram_unlock(tp); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tg3_phy_reset(tp); if (tg3_test_registers(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[2] = 1; } if (tg3_test_memory(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[3] = 1; } if (doextlpbk) etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; if (tg3_test_loopback(tp, &data[4], doextlpbk)) etest->flags |= ETH_TEST_FL_FAILED; tg3_full_unlock(tp); if (tg3_test_interrupt(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[7] = 1; } tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (!err2) tg3_netif_start(tp); } tg3_full_unlock(tp); if (irq_sync && !err2) tg3_phy_start(tp); } if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) tg3_power_down(tp); } static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct tg3 *tp = netdev_priv(dev); int err; if (tg3_flag(tp, USE_PHYLIB)) { struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_mii_ioctl(phydev, ifr, cmd); } switch (cmd) { case SIOCGMIIPHY: data->phy_id = tp->phy_addr; /* fallthru */ case SIOCGMIIREG: { u32 mii_regval; if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); spin_unlock_bh(&tp->lock); data->val_out = mii_regval; return err; } case SIOCSMIIREG: if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ if (!netif_running(dev)) return -EAGAIN; spin_lock_bh(&tp->lock); err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&tp->lock); return err; default: /* do nothing */ break; } return -EOPNOTSUPP; } static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct tg3 *tp = netdev_priv(dev); memcpy(ec, &tp->coal, sizeof(*ec)); return 0; } static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct tg3 *tp = netdev_priv(dev); u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; if (!tg3_flag(tp, 5705_PLUS)) { max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; max_stat_coal_ticks = MAX_STAT_COAL_TICKS; min_stat_coal_ticks = MIN_STAT_COAL_TICKS; } if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) return -EINVAL; /* No rx interrupts will be generated if both are zero */ if ((ec->rx_coalesce_usecs == 0) && (ec->rx_max_coalesced_frames == 0)) return -EINVAL; /* No tx interrupts will be generated if both are zero */ if ((ec->tx_coalesce_usecs == 0) && (ec->tx_max_coalesced_frames == 0)) return -EINVAL; /* Only copy relevant parameters, ignore all others. */ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; if (netif_running(dev)) { tg3_full_lock(tp, 0); __tg3_set_coalesce(tp, &tp->coal); tg3_full_unlock(tp); } return 0; } static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, .get_drvinfo = tg3_get_drvinfo, .get_regs_len = tg3_get_regs_len, .get_regs = tg3_get_regs, .get_wol = tg3_get_wol, .set_wol = tg3_set_wol, .get_msglevel = tg3_get_msglevel, .set_msglevel = tg3_set_msglevel, .nway_reset = tg3_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = tg3_get_eeprom_len, .get_eeprom = tg3_get_eeprom, .set_eeprom = tg3_set_eeprom, .get_ringparam = tg3_get_ringparam, .set_ringparam = tg3_set_ringparam, .get_pauseparam = tg3_get_pauseparam, .set_pauseparam = tg3_set_pauseparam, .self_test = tg3_self_test, .get_strings = tg3_get_strings, .set_phys_id = tg3_set_phys_id, .get_ethtool_stats = tg3_get_ethtool_stats, .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, .get_sset_count = tg3_get_sset_count, .get_rxnfc = tg3_get_rxnfc, .get_rxfh_indir_size = tg3_get_rxfh_indir_size, .get_rxfh_indir = tg3_get_rxfh_indir, .set_rxfh_indir = tg3_set_rxfh_indir, }; static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct tg3 *tp = netdev_priv(dev); if (!tp->hw_stats) return &tp->net_stats_prev; spin_lock_bh(&tp->lock); tg3_get_nstats(tp, stats); spin_unlock_bh(&tp->lock); return stats; } static void tg3_set_rx_mode(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); if (!netif_running(dev)) return; tg3_full_lock(tp, 0); __tg3_set_rx_mode(dev); tg3_full_unlock(tp); } static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, int new_mtu) { dev->mtu = new_mtu; if (new_mtu > ETH_DATA_LEN) { if (tg3_flag(tp, 5780_CLASS)) { netdev_update_features(dev); tg3_flag_clear(tp, TSO_CAPABLE); } else { tg3_flag_set(tp, JUMBO_RING_ENABLE); } } else { if (tg3_flag(tp, 5780_CLASS)) { tg3_flag_set(tp, TSO_CAPABLE); netdev_update_features(dev); } tg3_flag_clear(tp, JUMBO_RING_ENABLE); } } static int tg3_change_mtu(struct net_device *dev, int new_mtu) { struct tg3 *tp = netdev_priv(dev); int err, reset_phy = 0; if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) return -EINVAL; if (!netif_running(dev)) { /* We'll just catch it later when the * device is up'd. */ tg3_set_mtu(dev, tp, new_mtu); return 0; } tg3_phy_stop(tp); tg3_netif_stop(tp); tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_set_mtu(dev, tp, new_mtu); /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) reset_phy = 1; err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); return err; } static const struct net_device_ops tg3_netdev_ops = { .ndo_open = tg3_open, .ndo_stop = tg3_close, .ndo_start_xmit = tg3_start_xmit, .ndo_get_stats64 = tg3_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, .ndo_do_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, .ndo_fix_features = tg3_fix_features, .ndo_set_features = tg3_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tg3_poll_controller, #endif }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) { u32 cursize, val, magic; tp->nvram_size = EEPROM_CHIP_SIZE; if (tg3_nvram_read(tp, 0, &magic) != 0) return; if ((magic != TG3_EEPROM_MAGIC) && ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) return; /* * Size the chip by reading offsets at increasing powers of two. * When we encounter our validation signature, we know the addressing * has wrapped around, and thus have our chip size. */ cursize = 0x10; while (cursize < tp->nvram_size) { if (tg3_nvram_read(tp, cursize, &val) != 0) return; if (val == magic) break; cursize <<= 1; } tp->nvram_size = cursize; } static void __devinit tg3_get_nvram_size(struct tg3 *tp) { u32 val; if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) return; /* Selfboot format */ if (val != TG3_EEPROM_MAGIC) { tg3_get_eeprom_size(tp); return; } if (tg3_nvram_read(tp, 0xf0, &val) == 0) { if (val != 0) { /* This is confusing. We want to operate on the * 16-bit value at offset 0xf2. The tg3_nvram_read() * call will read from NVRAM and byteswap the data * according to the byteswapping settings for all * other register accesses. This ensures the data we * want will always reside in the lower 16-bits. * However, the data in NVRAM is in LE format, which * means the data from the NVRAM read will always be * opposite the endianness of the CPU. The 16-bit * byteswap then brings the data to CPU endianness. */ tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; return; } } tp->nvram_size = TG3_NVRAM_SIZE_512KB; } static void __devinit tg3_get_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { tg3_flag_set(tp, FLASH); } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || tg3_flag(tp, 5780_CLASS)) { switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; break; case FLASH_VENDOR_ATMEL_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_ST: tp->nvram_jedecnum = JEDEC_ST; tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_VENDOR_SAIFUN: tp->nvram_jedecnum = JEDEC_SAIFUN; tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; break; case FLASH_VENDOR_SST_SMALL: case FLASH_VENDOR_SST_LARGE: tp->nvram_jedecnum = JEDEC_SST; tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; break; } } else { tp->nvram_jedecnum = JEDEC_ATMEL; tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; tg3_flag_set(tp, NVRAM_BUFFERED); } } static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) { switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { case FLASH_5752PAGE_SIZE_256: tp->nvram_pagesize = 256; break; case FLASH_5752PAGE_SIZE_512: tp->nvram_pagesize = 512; break; case FLASH_5752PAGE_SIZE_1K: tp->nvram_pagesize = 1024; break; case FLASH_5752PAGE_SIZE_2K: tp->nvram_pagesize = 2048; break; case FLASH_5752PAGE_SIZE_4K: tp->nvram_pagesize = 4096; break; case FLASH_5752PAGE_SIZE_264: tp->nvram_pagesize = 264; break; case FLASH_5752PAGE_SIZE_528: tp->nvram_pagesize = 528; break; } } static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) tg3_flag_set(tp, PROTECTED_NVRAM); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); break; case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); break; } if (tg3_flag(tp, FLASH)) { tg3_nvram_get_pagesize(tp, nvcfg1); } else { /* For eeprom, set pagesize to maximum eeprom size */ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } } static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) { u32 nvcfg1, protect = 0; nvcfg1 = tr32(NVRAM_CFG1); /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; switch (nvcfg1) { case FLASH_5755VENDOR_ATMEL_FLASH_1: case FLASH_5755VENDOR_ATMEL_FLASH_2: case FLASH_5755VENDOR_ATMEL_FLASH_3: case FLASH_5755VENDOR_ATMEL_FLASH_5: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) tp->nvram_size = (protect ? 0x3e200 : TG3_NVRAM_SIZE_512KB); else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) tp->nvram_size = (protect ? 0x1f200 : TG3_NVRAM_SIZE_256KB); else tp->nvram_size = (protect ? 0x1f200 : TG3_NVRAM_SIZE_128KB); break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) tp->nvram_size = (protect ? TG3_NVRAM_SIZE_64KB : TG3_NVRAM_SIZE_128KB); else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) tp->nvram_size = (protect ? TG3_NVRAM_SIZE_64KB : TG3_NVRAM_SIZE_256KB); else tp->nvram_size = (protect ? TG3_NVRAM_SIZE_128KB : TG3_NVRAM_SIZE_512KB); break; } } static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); break; case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: case FLASH_5755VENDOR_ATMEL_FLASH_1: case FLASH_5755VENDOR_ATMEL_FLASH_2: case FLASH_5755VENDOR_ATMEL_FLASH_3: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 264; break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } } static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) { u32 nvcfg1, protect = 0; nvcfg1 = tr32(NVRAM_CFG1); /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { tg3_flag_set(tp, PROTECTED_NVRAM); protect = 1; } nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; switch (nvcfg1) { case FLASH_5761VENDOR_ATMEL_ADB021D: case FLASH_5761VENDOR_ATMEL_ADB041D: case FLASH_5761VENDOR_ATMEL_ADB081D: case FLASH_5761VENDOR_ATMEL_ADB161D: case FLASH_5761VENDOR_ATMEL_MDB021D: case FLASH_5761VENDOR_ATMEL_MDB041D: case FLASH_5761VENDOR_ATMEL_MDB081D: case FLASH_5761VENDOR_ATMEL_MDB161D: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); tp->nvram_pagesize = 256; break; case FLASH_5761VENDOR_ST_A_M45PE20: case FLASH_5761VENDOR_ST_A_M45PE40: case FLASH_5761VENDOR_ST_A_M45PE80: case FLASH_5761VENDOR_ST_A_M45PE16: case FLASH_5761VENDOR_ST_M_M45PE20: case FLASH_5761VENDOR_ST_M_M45PE40: case FLASH_5761VENDOR_ST_M_M45PE80: case FLASH_5761VENDOR_ST_M_M45PE16: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); tp->nvram_pagesize = 256; break; } if (protect) { tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); } else { switch (nvcfg1) { case FLASH_5761VENDOR_ATMEL_ADB161D: case FLASH_5761VENDOR_ATMEL_MDB161D: case FLASH_5761VENDOR_ST_A_M45PE16: case FLASH_5761VENDOR_ST_M_M45PE16: tp->nvram_size = TG3_NVRAM_SIZE_2MB; break; case FLASH_5761VENDOR_ATMEL_ADB081D: case FLASH_5761VENDOR_ATMEL_MDB081D: case FLASH_5761VENDOR_ST_A_M45PE80: case FLASH_5761VENDOR_ST_M_M45PE80: tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; case FLASH_5761VENDOR_ATMEL_ADB041D: case FLASH_5761VENDOR_ATMEL_MDB041D: case FLASH_5761VENDOR_ST_A_M45PE40: case FLASH_5761VENDOR_ST_M_M45PE40: tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; case FLASH_5761VENDOR_ATMEL_ADB021D: case FLASH_5761VENDOR_ATMEL_MDB021D: case FLASH_5761VENDOR_ST_A_M45PE20: case FLASH_5761VENDOR_ST_M_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; } } } static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) { tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; } static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); return; case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: case FLASH_57780VENDOR_ATMEL_AT45DB011D: case FLASH_57780VENDOR_ATMEL_AT45DB011B: case FLASH_57780VENDOR_ATMEL_AT45DB021D: case FLASH_57780VENDOR_ATMEL_AT45DB021B: case FLASH_57780VENDOR_ATMEL_AT45DB041D: case FLASH_57780VENDOR_ATMEL_AT45DB041B: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: case FLASH_57780VENDOR_ATMEL_AT45DB011D: case FLASH_57780VENDOR_ATMEL_AT45DB011B: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; case FLASH_57780VENDOR_ATMEL_AT45DB021D: case FLASH_57780VENDOR_ATMEL_AT45DB021B: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; case FLASH_57780VENDOR_ATMEL_AT45DB041D: case FLASH_57780VENDOR_ATMEL_AT45DB041B: tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; } break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ST_M45PE10: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; case FLASH_5752VENDOR_ST_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; } break; default: tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ATMEL_EEPROM: case FLASH_5717VENDOR_MICRO_EEPROM: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); return; case FLASH_5717VENDOR_ATMEL_MDB011D: case FLASH_5717VENDOR_ATMEL_ADB011B: case FLASH_5717VENDOR_ATMEL_ADB011D: case FLASH_5717VENDOR_ATMEL_MDB021D: case FLASH_5717VENDOR_ATMEL_ADB021B: case FLASH_5717VENDOR_ATMEL_ADB021D: case FLASH_5717VENDOR_ATMEL_45USPT: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ATMEL_MDB021D: /* Detect size with tg3_nvram_get_size() */ break; case FLASH_5717VENDOR_ATMEL_ADB021B: case FLASH_5717VENDOR_ATMEL_ADB021D: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; default: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; case FLASH_5717VENDOR_ST_M_M25PE10: case FLASH_5717VENDOR_ST_A_M25PE10: case FLASH_5717VENDOR_ST_M_M45PE10: case FLASH_5717VENDOR_ST_A_M45PE10: case FLASH_5717VENDOR_ST_M_M25PE20: case FLASH_5717VENDOR_ST_A_M25PE20: case FLASH_5717VENDOR_ST_M_M45PE20: case FLASH_5717VENDOR_ST_A_M45PE20: case FLASH_5717VENDOR_ST_25USPT: case FLASH_5717VENDOR_ST_45USPT: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5717VENDOR_ST_M_M25PE20: case FLASH_5717VENDOR_ST_M_M45PE20: /* Detect size with tg3_nvram_get_size() */ break; case FLASH_5717VENDOR_ST_A_M25PE20: case FLASH_5717VENDOR_ST_A_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; default: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; default: tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) { u32 nvcfg1, nvmpinstrp; nvcfg1 = tr32(NVRAM_CFG1); nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; switch (nvmpinstrp) { case FLASH_5720_EEPROM_HD: case FLASH_5720_EEPROM_LD: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); if (nvmpinstrp == FLASH_5720_EEPROM_HD) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; else tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; return; case FLASH_5720VENDOR_M_ATMEL_DB011D: case FLASH_5720VENDOR_A_ATMEL_DB011B: case FLASH_5720VENDOR_A_ATMEL_DB011D: case FLASH_5720VENDOR_M_ATMEL_DB021D: case FLASH_5720VENDOR_A_ATMEL_DB021B: case FLASH_5720VENDOR_A_ATMEL_DB021D: case FLASH_5720VENDOR_M_ATMEL_DB041D: case FLASH_5720VENDOR_A_ATMEL_DB041B: case FLASH_5720VENDOR_A_ATMEL_DB041D: case FLASH_5720VENDOR_M_ATMEL_DB081D: case FLASH_5720VENDOR_A_ATMEL_DB081D: case FLASH_5720VENDOR_ATMEL_45USPT: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvmpinstrp) { case FLASH_5720VENDOR_M_ATMEL_DB021D: case FLASH_5720VENDOR_A_ATMEL_DB021B: case FLASH_5720VENDOR_A_ATMEL_DB021D: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; case FLASH_5720VENDOR_M_ATMEL_DB041D: case FLASH_5720VENDOR_A_ATMEL_DB041B: case FLASH_5720VENDOR_A_ATMEL_DB041D: tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; case FLASH_5720VENDOR_M_ATMEL_DB081D: case FLASH_5720VENDOR_A_ATMEL_DB081D: tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; default: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; case FLASH_5720VENDOR_M_ST_M25PE10: case FLASH_5720VENDOR_M_ST_M45PE10: case FLASH_5720VENDOR_A_ST_M25PE10: case FLASH_5720VENDOR_A_ST_M45PE10: case FLASH_5720VENDOR_M_ST_M25PE20: case FLASH_5720VENDOR_M_ST_M45PE20: case FLASH_5720VENDOR_A_ST_M25PE20: case FLASH_5720VENDOR_A_ST_M45PE20: case FLASH_5720VENDOR_M_ST_M25PE40: case FLASH_5720VENDOR_M_ST_M45PE40: case FLASH_5720VENDOR_A_ST_M25PE40: case FLASH_5720VENDOR_A_ST_M45PE40: case FLASH_5720VENDOR_M_ST_M25PE80: case FLASH_5720VENDOR_M_ST_M45PE80: case FLASH_5720VENDOR_A_ST_M25PE80: case FLASH_5720VENDOR_A_ST_M45PE80: case FLASH_5720VENDOR_ST_25USPT: case FLASH_5720VENDOR_ST_45USPT: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); switch (nvmpinstrp) { case FLASH_5720VENDOR_M_ST_M25PE20: case FLASH_5720VENDOR_M_ST_M45PE20: case FLASH_5720VENDOR_A_ST_M25PE20: case FLASH_5720VENDOR_A_ST_M45PE20: tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; case FLASH_5720VENDOR_M_ST_M25PE40: case FLASH_5720VENDOR_M_ST_M45PE40: case FLASH_5720VENDOR_A_ST_M25PE40: case FLASH_5720VENDOR_A_ST_M45PE40: tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; case FLASH_5720VENDOR_M_ST_M25PE80: case FLASH_5720VENDOR_M_ST_M45PE80: case FLASH_5720VENDOR_A_ST_M25PE80: case FLASH_5720VENDOR_A_ST_M45PE80: tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; default: tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; } break; default: tg3_flag_set(tp, NO_NVRAM); return; } tg3_nvram_get_pagesize(tp, nvcfg1); if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } /* Chips other than 5700/5701 use the NVRAM for fetching info. */ static void __devinit tg3_nvram_init(struct tg3 *tp) { tw32_f(GRC_EEPROM_ADDR, (EEPROM_ADDR_FSM_RESET | (EEPROM_DEFAULT_CLOCK_PERIOD << EEPROM_ADDR_CLKPERD_SHIFT))); msleep(1); /* Enable seeprom accesses. */ tw32_f(GRC_LOCAL_CTRL, tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); udelay(100); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { tg3_flag_set(tp, NVRAM); if (tg3_nvram_lock(tp)) { netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n", __func__); return; } tg3_enable_nvram_access(tp); tp->nvram_size = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tg3_get_5752_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tg3_get_5755_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_get_5787_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tg3_get_5761_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || tg3_flag(tp, 57765_CLASS)) tg3_get_57780_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_get_5717_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tg3_get_5720_nvram_info(tp); else tg3_get_nvram_info(tp); if (tp->nvram_size == 0) tg3_get_nvram_size(tp); tg3_disable_nvram_access(tp); tg3_nvram_unlock(tp); } else { tg3_flag_clear(tp, NVRAM); tg3_flag_clear(tp, NVRAM_BUFFERED); tg3_get_eeprom_size(tp); } } struct subsys_tbl_ent { u16 subsys_vendor, subsys_devid; u32 phy_id; }; static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { /* Broadcom boards. */ { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, { TG3PCI_SUBVENDOR_ID_BROADCOM, TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, /* 3com boards. */ { TG3PCI_SUBVENDOR_ID_3COM, TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, { TG3PCI_SUBVENDOR_ID_3COM, TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_3COM, TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, { TG3PCI_SUBVENDOR_ID_3COM, TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_3COM, TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, /* DELL boards. */ { TG3PCI_SUBVENDOR_ID_DELL, TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, { TG3PCI_SUBVENDOR_ID_DELL, TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, { TG3PCI_SUBVENDOR_ID_DELL, TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, { TG3PCI_SUBVENDOR_ID_DELL, TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, /* Compaq boards. */ { TG3PCI_SUBVENDOR_ID_COMPAQ, TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_COMPAQ, TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_COMPAQ, TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, { TG3PCI_SUBVENDOR_ID_COMPAQ, TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, { TG3PCI_SUBVENDOR_ID_COMPAQ, TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, /* IBM boards. */ { TG3PCI_SUBVENDOR_ID_IBM, TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } }; static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) { int i; for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { if ((subsys_id_to_phy_id[i].subsys_vendor == tp->pdev->subsystem_vendor) && (subsys_id_to_phy_id[i].subsys_devid == tp->pdev->subsystem_device)) return &subsys_id_to_phy_id[i]; } return NULL; } static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) { u32 val; tp->phy_id = TG3_PHY_ID_INVALID; tp->led_ctrl = LED_CTRL_MODE_PHY_1; /* Assume an onboard device and WOL capable by default. */ tg3_flag_set(tp, EEPROM_WRITE_PROT); tg3_flag_set(tp, WOL_CAP); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { tg3_flag_clear(tp, EEPROM_WRITE_PROT); tg3_flag_set(tp, IS_NIC); } val = tr32(VCPU_CFGSHDW); if (val & VCPU_CFGSHDW_ASPM_DBNC) tg3_flag_set(tp, ASPM_WORKAROUND); if ((val & VCPU_CFGSHDW_WOL_ENABLE) && (val & VCPU_CFGSHDW_WOL_MAGPKT)) { tg3_flag_set(tp, WOL_ENABLE); device_set_wakeup_enable(&tp->pdev->dev, true); } goto done; } tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg, led_cfg; u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; int eeprom_phy_serdes = 0; tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); tp->nic_sram_data_cfg = nic_cfg; tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); ver >>= NIC_SRAM_DATA_VER_SHIFT; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && (ver > 0) && (ver < 0x100)) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) eeprom_phy_serdes = 1; tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); if (nic_phy_id != 0) { u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; eeprom_phy_id = (id1 >> 16) << 10; eeprom_phy_id |= (id2 & 0xfc00) << 16; eeprom_phy_id |= (id2 & 0x03ff) << 0; } else eeprom_phy_id = 0; tp->phy_id = eeprom_phy_id; if (eeprom_phy_serdes) { if (!tg3_flag(tp, 5705_PLUS)) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else tp->phy_flags |= TG3_PHYFLG_MII_SERDES; } if (tg3_flag(tp, 5750_PLUS)) led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | SHASTA_EXT_LED_MODE_MASK); else led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; switch (led_cfg) { default: case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: tp->led_ctrl = LED_CTRL_MODE_PHY_1; break; case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: tp->led_ctrl = LED_CTRL_MODE_PHY_2; break; case NIC_SRAM_DATA_CFG_LED_MODE_MAC: tp->led_ctrl = LED_CTRL_MODE_MAC; /* Default to PHY_1_MODE if 0 (MAC_MODE) is * read on some older 5700/5701 bootcode. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) tp->led_ctrl = LED_CTRL_MODE_PHY_1; break; case SHASTA_EXT_LED_SHARED: tp->led_ctrl = LED_CTRL_MODE_SHARED; if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); break; case SHASTA_EXT_LED_MAC: tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; break; case SHASTA_EXT_LED_COMBO: tp->led_ctrl = LED_CTRL_MODE_COMBO; if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); break; } if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) tp->led_ctrl = LED_CTRL_MODE_PHY_2; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) tp->led_ctrl = LED_CTRL_MODE_PHY_1; if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { tg3_flag_set(tp, EEPROM_WRITE_PROT); if ((tp->pdev->subsystem_vendor == PCI_VENDOR_ID_ARIMA) && (tp->pdev->subsystem_device == 0x205a || tp->pdev->subsystem_device == 0x2063)) tg3_flag_clear(tp, EEPROM_WRITE_PROT); } else { tg3_flag_clear(tp, EEPROM_WRITE_PROT); tg3_flag_set(tp, IS_NIC); } if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { tg3_flag_set(tp, ENABLE_ASF); if (tg3_flag(tp, 5750_PLUS)) tg3_flag_set(tp, ASF_NEW_HANDSHAKE); } if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && tg3_flag(tp, 5750_PLUS)) tg3_flag_set(tp, ENABLE_APE); if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) tg3_flag_clear(tp, WOL_CAP); if (tg3_flag(tp, WOL_CAP) && (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { tg3_flag_set(tp, WOL_ENABLE); device_set_wakeup_enable(&tp->pdev->dev, true); } if (cfg2 & (1 << 17)) tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; /* serdes signal pre-emphasis in register 0x590 set by */ /* bootcode if bit 18 is set */ if (cfg2 & (1 << 18)) tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; if ((tg3_flag(tp, 57765_PLUS) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; if (tg3_flag(tp, PCI_EXPRESS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && !tg3_flag(tp, 57765_PLUS)) { u32 cfg3; tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) tg3_flag_set(tp, ASPM_WORKAROUND); } if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) tg3_flag_set(tp, RGMII_INBAND_DISABLE); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); } done: if (tg3_flag(tp, WOL_CAP)) device_set_wakeup_enable(&tp->pdev->dev, tg3_flag(tp, WOL_ENABLE)); else device_set_wakeup_capable(&tp->pdev->dev, false); } static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) { int i; u32 val; tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); tw32(OTP_CTRL, cmd); /* Wait for up to 1 ms for command to execute. */ for (i = 0; i < 100; i++) { val = tr32(OTP_STATUS); if (val & OTP_STATUS_CMD_DONE) break; udelay(10); } return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; } /* Read the gphy configuration from the OTP region of the chip. The gphy * configuration is a 32-bit value that straddles the alignment boundary. * We do two 32-bit reads and then shift and merge the results. */ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) { u32 bhalf_otp, thalf_otp; tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) return 0; tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) return 0; thalf_otp = tr32(OTP_READ_DATA); tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) return 0; bhalf_otp = tr32(OTP_READ_DATA); return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); } static void __devinit tg3_phy_init_link_config(struct tg3 *tp) { u32 adv = ADVERTISED_Autoneg; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) adv |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_TP; else adv |= ADVERTISED_FIBRE; tp->link_config.advertising = adv; tp->link_config.speed = SPEED_UNKNOWN; tp->link_config.duplex = DUPLEX_UNKNOWN; tp->link_config.autoneg = AUTONEG_ENABLE; tp->link_config.active_speed = SPEED_UNKNOWN; tp->link_config.active_duplex = DUPLEX_UNKNOWN; tp->old_link = -1; } static int __devinit tg3_phy_probe(struct tg3 *tp) { u32 hw_phy_id_1, hw_phy_id_2; u32 hw_phy_id, hw_phy_id_masked; int err; /* flow control autonegotiation is default behavior */ tg3_flag_set(tp, PAUSE_AUTONEG); tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; if (tg3_flag(tp, USE_PHYLIB)) return tg3_phy_init(tp); /* Reading the PHY ID register can conflict with ASF * firmware access to the PHY hardware. */ err = 0; if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; } else { /* Now read the physical PHY_ID from the chip and verify * that it is sane. If it doesn't look good, we fall back * to either the hard-coded table based PHY_ID and failing * that the value found in the eeprom area. */ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; } if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { tp->phy_id = hw_phy_id; if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; } else { if (tp->phy_id != TG3_PHY_ID_INVALID) { /* Do nothing, phy ID already set up in * tg3_get_eeprom_hw_cfg(). */ } else { struct subsys_tbl_ent *p; /* No eeprom signature? Try the hardcoded * subsys device table. */ p = tg3_lookup_by_subsys(tp); if (!p) return -ENODEV; tp->phy_id = p->phy_id; if (!tp->phy_id || tp->phy_id == TG3_PHY_ID_BCM8002) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; } } if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) tp->phy_flags |= TG3_PHYFLG_EEE_CAP; tg3_phy_init_link_config(tp); if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !tg3_flag(tp, ENABLE_APE) && !tg3_flag(tp, ENABLE_ASF)) { u32 bmsr, dummy; tg3_readphy(tp, MII_BMSR, &bmsr); if (!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) goto skip_phy_reset; err = tg3_phy_reset(tp); if (err) return err; tg3_phy_set_wirespeed(tp); if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, tp->link_config.flowctrl); tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); } } skip_phy_reset: if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { err = tg3_init_5401phy_dsp(tp); if (err) return err; err = tg3_init_5401phy_dsp(tp); } return err; } static void __devinit tg3_read_vpd(struct tg3 *tp) { u8 *vpd_data; unsigned int block_end, rosize, len; u32 vpdlen; int j, i = 0; vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); if (!vpd_data) goto out_no_vpd; i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; rosize = pci_vpd_lrdt_size(&vpd_data[i]); block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; i += PCI_VPD_LRDT_TAG_SIZE; if (block_end > vpdlen) goto out_not_found; j = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_MFR_ID); if (j > 0) { len = pci_vpd_info_field_size(&vpd_data[j]); j += PCI_VPD_INFO_FLD_HDR_SIZE; if (j + len > block_end || len != 4 || memcmp(&vpd_data[j], "1028", 4)) goto partno; j = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_VENDOR0); if (j < 0) goto partno; len = pci_vpd_info_field_size(&vpd_data[j]); j += PCI_VPD_INFO_FLD_HDR_SIZE; if (j + len > block_end) goto partno; memcpy(tp->fw_ver, &vpd_data[j], len); strncat(tp->fw_ver, " bc ", vpdlen - len - 1); } partno: i = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_PARTNO); if (i < 0) goto out_not_found; len = pci_vpd_info_field_size(&vpd_data[i]); i += PCI_VPD_INFO_FLD_HDR_SIZE; if (len > TG3_BPN_SIZE || (len + i) > vpdlen) goto out_not_found; memcpy(tp->board_part_number, &vpd_data[i], len); out_not_found: kfree(vpd_data); if (tp->board_part_number[0]) return; out_no_vpd: if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) strcpy(tp->board_part_number, "BCM5717"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) strcpy(tp->board_part_number, "BCM5718"); else goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) strcpy(tp->board_part_number, "BCM57780"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) strcpy(tp->board_part_number, "BCM57760"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) strcpy(tp->board_part_number, "BCM57790"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) strcpy(tp->board_part_number, "BCM57788"); else goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) strcpy(tp->board_part_number, "BCM57761"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) strcpy(tp->board_part_number, "BCM57765"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) strcpy(tp->board_part_number, "BCM57781"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) strcpy(tp->board_part_number, "BCM57785"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) strcpy(tp->board_part_number, "BCM57791"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) strcpy(tp->board_part_number, "BCM57795"); else goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) strcpy(tp->board_part_number, "BCM57762"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) strcpy(tp->board_part_number, "BCM57766"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) strcpy(tp->board_part_number, "BCM57782"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) strcpy(tp->board_part_number, "BCM57786"); else goto nomatch; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); } else { nomatch: strcpy(tp->board_part_number, "none"); } } static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) { u32 val; if (tg3_nvram_read(tp, offset, &val) || (val & 0xfc000000) != 0x0c000000 || tg3_nvram_read(tp, offset + 4, &val) || val != 0) return 0; return 1; } static void __devinit tg3_read_bc_ver(struct tg3 *tp) { u32 val, offset, start, ver_offset; int i, dst_off; bool newver = false; if (tg3_nvram_read(tp, 0xc, &offset) || tg3_nvram_read(tp, 0x4, &start)) return; offset = tg3_nvram_logical_addr(tp, offset); if (tg3_nvram_read(tp, offset, &val)) return; if ((val & 0xfc000000) == 0x0c000000) { if (tg3_nvram_read(tp, offset + 4, &val)) return; if (val == 0) newver = true; } dst_off = strlen(tp->fw_ver); if (newver) { if (TG3_VER_SIZE - dst_off < 16 || tg3_nvram_read(tp, offset + 8, &ver_offset)) return; offset = offset + ver_offset - start; for (i = 0; i < 16; i += 4) { __be32 v; if (tg3_nvram_read_be32(tp, offset + i, &v)) return; memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); } } else { u32 major, minor; if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) return; major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> TG3_NVM_BCVER_MAJSFT; minor = ver_offset & TG3_NVM_BCVER_MINMSK; snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, "v%d.%02d", major, minor); } } static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) { u32 val, major, minor; /* Use native endian representation */ if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) return; major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> TG3_NVM_HWSB_CFG1_MAJSFT; minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> TG3_NVM_HWSB_CFG1_MINSFT; snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); } static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) { u32 offset, major, minor, build; strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) return; switch (val & TG3_EEPROM_SB_REVISION_MASK) { case TG3_EEPROM_SB_REVISION_0: offset = TG3_EEPROM_SB_F1R0_EDH_OFF; break; case TG3_EEPROM_SB_REVISION_2: offset = TG3_EEPROM_SB_F1R2_EDH_OFF; break; case TG3_EEPROM_SB_REVISION_3: offset = TG3_EEPROM_SB_F1R3_EDH_OFF; break; case TG3_EEPROM_SB_REVISION_4: offset = TG3_EEPROM_SB_F1R4_EDH_OFF; break; case TG3_EEPROM_SB_REVISION_5: offset = TG3_EEPROM_SB_F1R5_EDH_OFF; break; case TG3_EEPROM_SB_REVISION_6: offset = TG3_EEPROM_SB_F1R6_EDH_OFF; break; default: return; } if (tg3_nvram_read(tp, offset, &val)) return; build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> TG3_EEPROM_SB_EDH_BLD_SHFT; major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> TG3_EEPROM_SB_EDH_MAJ_SHFT; minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; if (minor > 99 || build > 26) return; offset = strlen(tp->fw_ver); snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, " v%d.%02d", major, minor); if (build > 0) { offset = strlen(tp->fw_ver); if (offset < TG3_VER_SIZE - 1) tp->fw_ver[offset] = 'a' + build - 1; } } static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) { u32 val, offset, start; int i, vlen; for (offset = TG3_NVM_DIR_START; offset < TG3_NVM_DIR_END; offset += TG3_NVM_DIRENT_SIZE) { if (tg3_nvram_read(tp, offset, &val)) return; if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) break; } if (offset == TG3_NVM_DIR_END) return; if (!tg3_flag(tp, 5705_PLUS)) start = 0x08000000; else if (tg3_nvram_read(tp, offset - 4, &start)) return; if (tg3_nvram_read(tp, offset + 4, &offset) || !tg3_fw_img_is_valid(tp, offset) || tg3_nvram_read(tp, offset + 8, &val)) return; offset += val - start; vlen = strlen(tp->fw_ver); tp->fw_ver[vlen++] = ','; tp->fw_ver[vlen++] = ' '; for (i = 0; i < 4; i++) { __be32 v; if (tg3_nvram_read_be32(tp, offset, &v)) return; offset += sizeof(v); if (vlen > TG3_VER_SIZE - sizeof(v)) { memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); break; } memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); vlen += sizeof(v); } } static void __devinit tg3_read_dash_ver(struct tg3 *tp) { int vlen; u32 apedata; char *fwtype; if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) return; apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); if (apedata != APE_SEG_SIG_MAGIC) return; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); if (!(apedata & APE_FW_STATUS_READY)) return; apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { tg3_flag_set(tp, APE_HAS_NCSI); fwtype = "NCSI"; } else { fwtype = "DASH"; } vlen = strlen(tp->fw_ver); snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", fwtype, (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, (apedata & APE_FW_VERSION_BLDMSK)); } static void __devinit tg3_read_fw_ver(struct tg3 *tp) { u32 val; bool vpd_vers = false; if (tp->fw_ver[0] != 0) vpd_vers = true; if (tg3_flag(tp, NO_NVRAM)) { strcat(tp->fw_ver, "sb"); return; } if (tg3_nvram_read(tp, 0, &val)) return; if (val == TG3_EEPROM_MAGIC) tg3_read_bc_ver(tp); else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) tg3_read_sb_ver(tp, val); else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) tg3_read_hwsb_ver(tp); else return; if (vpd_vers) goto done; if (tg3_flag(tp, ENABLE_APE)) { if (tg3_flag(tp, ENABLE_ASF)) tg3_read_dash_ver(tp); } else if (tg3_flag(tp, ENABLE_ASF)) { tg3_read_mgmtfw_ver(tp); } done: tp->fw_ver[TG3_VER_SIZE - 1] = 0; } static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) { if (tg3_flag(tp, LRG_PROD_RING_CAP)) return TG3_RX_RET_MAX_SIZE_5717; else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) return TG3_RX_RET_MAX_SIZE_5700; else return TG3_RX_RET_MAX_SIZE_5705; } static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, { }, }; static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) { struct pci_dev *peer; unsigned int func, devnr = tp->pdev->devfn & ~7; for (func = 0; func < 8; func++) { peer = pci_get_slot(tp->pdev->bus, devnr | func); if (peer && peer != tp->pdev) break; pci_dev_put(peer); } /* 5704 can be configured in single-port mode, set peer to * tp->pdev in that case. */ if (!peer) { peer = tp->pdev; return peer; } /* * We don't need to keep the refcount elevated; there's no way * to remove one half of this device without removing the other */ pci_dev_put(peer); return peer; } static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) { tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { u32 reg; /* All devices that use the alternate * ASIC REV location have a CPMU. */ tg3_flag_set(tp, CPMU_PRESENT); if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) reg = TG3PCI_GEN2_PRODID_ASICREV; else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) reg = TG3PCI_GEN15_PRODID_ASICREV; else reg = TG3PCI_PRODID_ASICREV; pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); } /* Wrong chip ID in 5752 A0. This code can be removed later * as A0 is not in production. */ if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tg3_flag_set(tp, 5717_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) tg3_flag_set(tp, 57765_CLASS); if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) tg3_flag_set(tp, 57765_PLUS); /* Intentionally exclude ASIC_REV_5906 */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || tg3_flag(tp, 57765_PLUS)) tg3_flag_set(tp, 5755_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) tg3_flag_set(tp, 5780_CLASS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || tg3_flag(tp, 5755_PLUS) || tg3_flag(tp, 5780_CLASS)) tg3_flag_set(tp, 5750_PLUS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || tg3_flag(tp, 5750_PLUS)) tg3_flag_set(tp, 5705_PLUS); } static int __devinit tg3_get_invariants(struct tg3 *tp) { u32 misc_ctrl_reg; u32 pci_state_reg, grc_misc_cfg; u32 val; u16 pci_cmd; int err; /* Force memory write invalidate off. If we leave it on, * then on 5700_BX chips we have to enable a workaround. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary * to match the cacheline size. The Broadcom driver have this * workaround but turns MWI off all the times so never uses * it. This seems to suggest that the workaround is insufficient. */ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); /* Important! -- Make sure register accesses are byteswapped * correctly. Also, for those chips that require it, make * sure that indirect register accesses are enabled before * the first operation. */ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, &misc_ctrl_reg); tp->misc_host_ctrl |= (misc_ctrl_reg & MISC_HOST_CTRL_CHIPREV); pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); tg3_detect_asic_rev(tp, misc_ctrl_reg); /* If we have 5702/03 A1 or A2 on certain ICH chipsets, * we need to disable memory and use config. cycles * only to access all registers. The 5702/03 chips * can mistakenly decode the special cycles from the * ICH chipsets as memory write cycles, causing corruption * of register and memory space. Only certain ICH bridges * will drive special cycles with non-zero data during the * address phase which can fall within the 5703's address * range. This is not an ICH bug as the PCI spec allows * non-zero address during special cycles. However, only * these ICH bridges are known to drive non-zero addresses * during special cycles. * * Since special cycles do not cross PCI bridges, we only * enable this workaround if the 5703 is on the secondary * bus of these ICH bridges. */ if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { static struct tg3_dev_id { u32 vendor; u32 device; u32 rev; } ich_chipsets[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, PCI_ANY_ID }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, PCI_ANY_ID }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 0xa }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, PCI_ANY_ID }, { }, }; struct tg3_dev_id *pci_id = &ich_chipsets[0]; struct pci_dev *bridge = NULL; while (pci_id->vendor != 0) { bridge = pci_get_device(pci_id->vendor, pci_id->device, bridge); if (!bridge) { pci_id++; continue; } if (pci_id->rev != PCI_ANY_ID) { if (bridge->revision > pci_id->rev) continue; } if (bridge->subordinate && (bridge->subordinate->number == tp->pdev->bus->number)) { tg3_flag_set(tp, ICH_WORKAROUND); pci_dev_put(bridge); break; } } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { static struct tg3_dev_id { u32 vendor; u32 device; } bridge_chipsets[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, { }, }; struct tg3_dev_id *pci_id = &bridge_chipsets[0]; struct pci_dev *bridge = NULL; while (pci_id->vendor != 0) { bridge = pci_get_device(pci_id->vendor, pci_id->device, bridge); if (!bridge) { pci_id++; continue; } if (bridge->subordinate && (bridge->subordinate->number <= tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { tg3_flag_set(tp, 5701_DMA_BUG); pci_dev_put(bridge); break; } } } /* The EPB bridge inside 5714, 5715, and 5780 cannot support * DMA addresses > 40-bit. This bridge may have other additional * 57xx devices behind it in some 4-port NIC designs for example. * Any tg3 device found behind the bridge will also need the 40-bit * DMA workaround. */ if (tg3_flag(tp, 5780_CLASS)) { tg3_flag_set(tp, 40BIT_DMA_BUG); tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); } else { struct pci_dev *bridge = NULL; do { bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_EPB, bridge); if (bridge && bridge->subordinate && (bridge->subordinate->number <= tp->pdev->bus->number) && (bridge->subordinate->subordinate >= tp->pdev->bus->number)) { tg3_flag_set(tp, 40BIT_DMA_BUG); pci_dev_put(bridge); break; } } while (bridge); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) tp->pdev_peer = tg3_find_peer(tp); /* Determine TSO capabilities */ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) ; /* Do nothing. HW bug. */ else if (tg3_flag(tp, 57765_PLUS)) tg3_flag_set(tp, HW_TSO_3); else if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_flag_set(tp, HW_TSO_2); else if (tg3_flag(tp, 5750_PLUS)) { tg3_flag_set(tp, HW_TSO_1); tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) tg3_flag_clear(tp, TSO_BUG); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { tg3_flag_set(tp, TSO_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else tp->fw_needed = FIRMWARE_TG3TSO; } /* Selectively allow TSO based on operating conditions */ if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || tp->fw_needed) { /* For firmware TSO, assume ASF is disabled. * We'll disable TSO later if we discover ASF * is enabled in tg3_get_eeprom_hw_cfg(). */ tg3_flag_set(tp, TSO_CAPABLE); } else { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; } if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) tp->fw_needed = FIRMWARE_TG3; tp->irq_max = 1; if (tg3_flag(tp, 5750_PLUS)) { tg3_flag_set(tp, SUPPORT_MSI); if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && tp->pdev_peer == tp->pdev)) tg3_flag_clear(tp, SUPPORT_MSI); if (tg3_flag(tp, 5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tg3_flag_set(tp, 1SHOT_MSI); } if (tg3_flag(tp, 57765_PLUS)) { tg3_flag_set(tp, SUPPORT_MSIX); tp->irq_max = TG3_IRQ_MAX_VECS; tg3_rss_init_dflt_indir_tbl(tp); } } if (tg3_flag(tp, 5755_PLUS)) tg3_flag_set(tp, SHORT_DMA_BUG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tg3_flag_set(tp, LRG_PROD_RING_CAP); if (tg3_flag(tp, 57765_PLUS) && tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) tg3_flag_set(tp, USE_JUMBO_BDFLAG); if (!tg3_flag(tp, 5705_PLUS) || tg3_flag(tp, 5780_CLASS) || tg3_flag(tp, USE_JUMBO_BDFLAG)) tg3_flag_set(tp, JUMBO_CAPABLE); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if (pci_is_pcie(tp->pdev)) { u16 lnkctl; tg3_flag_set(tp, PCI_EXPRESS); pci_read_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tg3_flag_clear(tp, HW_TSO_2); tg3_flag_clear(tp, TSO_CAPABLE); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) tg3_flag_set(tp, CLKREQ_BUG); } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { tg3_flag_set(tp, L1PLLPD_EN); } } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { /* BCM5785 devices are effectively PCIe devices, and should * follow PCIe codepaths, but do not have a PCIe capabilities * section. */ tg3_flag_set(tp, PCI_EXPRESS); } else if (!tg3_flag(tp, 5705_PLUS) || tg3_flag(tp, 5780_CLASS)) { tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); if (!tp->pcix_cap) { dev_err(&tp->pdev->dev, "Cannot find PCI-X capability, aborting\n"); return -EIO; } if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) tg3_flag_set(tp, PCIX_MODE); } /* If we have an AMD 762 or VIA K8T800 chipset, write * reordering to the mailbox registers done by the host * controller can cause major troubles. We read back from * every mailbox register write to force the writes to be * posted to the chip in order. */ if (pci_dev_present(tg3_write_reorder_chipsets) && !tg3_flag(tp, PCI_EXPRESS)) tg3_flag_set(tp, MBOX_WRITE_REORDER); pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &tp->pci_cacheline_sz); pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, &tp->pci_lat_timer); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && tp->pci_lat_timer < 64) { tp->pci_lat_timer = 64; pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, tp->pci_lat_timer); } /* Important! -- It is critical that the PCI-X hw workaround * situation is decided before the first MMIO register access. */ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { /* 5700 BX chips need to have their TX producer index * mailboxes written twice to workaround a bug. */ tg3_flag_set(tp, TXD_MBOX_HWBUG); /* If we are in PCI-X mode, enable register write workaround. * * The workaround is to use indirect register accesses * for all chip writes not to mailbox registers. */ if (tg3_flag(tp, PCIX_MODE)) { u32 pm_reg; tg3_flag_set(tp, PCIX_TARGET_HWBUG); /* The chip can have it's power management PCI config * space registers clobbered due to this bug. * So explicitly force the chip into D0 here. */ pci_read_config_dword(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pm_reg); pm_reg &= ~PCI_PM_CTRL_STATE_MASK; pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; pci_write_config_dword(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pm_reg); /* Also, force SERR#/PERR# in PCI command. */ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); } } if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) tg3_flag_set(tp, PCI_HIGH_SPEED); if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) tg3_flag_set(tp, PCI_32BIT); /* Chip-specific fixup from Broadcom driver */ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { pci_state_reg |= PCISTATE_RETRY_SAME_DMA; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); } /* Default fast path register access methods */ tp->read32 = tg3_read32; tp->write32 = tg3_write32; tp->read32_mbox = tg3_read32; tp->write32_mbox = tg3_write32; tp->write32_tx_mbox = tg3_write32; tp->write32_rx_mbox = tg3_write32; /* Various workaround register access methods */ if (tg3_flag(tp, PCIX_TARGET_HWBUG)) tp->write32 = tg3_write_indirect_reg32; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || (tg3_flag(tp, PCI_EXPRESS) && tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { /* * Back to back register writes can cause problems on these * chips, the workaround is to read back all reg writes * except those to mailbox regs. * * See tg3_write_indirect_reg32(). */ tp->write32 = tg3_write_flush_reg32; } if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { tp->write32_tx_mbox = tg3_write32_tx_mbox; if (tg3_flag(tp, MBOX_WRITE_REORDER)) tp->write32_rx_mbox = tg3_write_flush_reg32; } if (tg3_flag(tp, ICH_WORKAROUND)) { tp->read32 = tg3_read_indirect_reg32; tp->write32 = tg3_write_indirect_reg32; tp->read32_mbox = tg3_read_indirect_mbox; tp->write32_mbox = tg3_write_indirect_mbox; tp->write32_tx_mbox = tg3_write_indirect_mbox; tp->write32_rx_mbox = tg3_write_indirect_mbox; iounmap(tp->regs); tp->regs = NULL; pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tp->read32_mbox = tg3_read32_mbox_5906; tp->write32_mbox = tg3_write32_mbox_5906; tp->write32_tx_mbox = tg3_write32_mbox_5906; tp->write32_rx_mbox = tg3_write32_mbox_5906; } if (tp->write32 == tg3_write_indirect_reg32 || (tg3_flag(tp, PCIX_MODE) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) tg3_flag_set(tp, SRAM_USE_CONFIG); /* The memory arbiter has to be enabled in order for SRAM accesses * to succeed. Normally on powerup the tg3 chip firmware will make * sure it is enabled, but other entities such as system netboot * code might disable it. */ val = tr32(MEMARB_MODE); tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || tg3_flag(tp, 5780_CLASS)) { if (tg3_flag(tp, PCIX_MODE)) { pci_read_config_dword(tp->pdev, tp->pcix_cap + PCI_X_STATUS, &val); tp->pci_fn = val & 0x7; } } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == NIC_SRAM_CPMUSTAT_SIG) { tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717; tp->pci_fn = tp->pci_fn ? 1 : 0; } } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == NIC_SRAM_CPMUSTAT_SIG) { tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> TG3_CPMU_STATUS_FSHFT_5719; } } /* Get eeprom hw config before calling tg3_set_power_state(). * In particular, the TG3_FLAG_IS_NIC flag must be * determined before calling tg3_set_power_state() so that * we know whether or not to switch out of Vaux power. * When the flag is set, it means that GPIO1 is used for eeprom * write protect and also implies that it is a LOM where GPIOs * are not used to switch power. */ tg3_get_eeprom_hw_cfg(tp); if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; } if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. */ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); tg3_ape_lock_init(tp); } /* Set up tp->grc_local_ctrl before calling * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high * will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. */ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || tg3_flag(tp, EEPROM_WRITE_PROT)) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); /* Unused GPIO3 must be driven as output on 5752 because there * are no pull-up resistors on unused GPIO pins. */ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || tg3_flag(tp, 57765_CLASS)) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { /* Turn off the debug UART. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; if (tg3_flag(tp, IS_NIC)) /* Keep VMain power. */ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OUTPUT0; } /* Switch out of Vaux if it is a NIC */ tg3_pwrsrc_switch_to_vmain(tp); /* Derive initial jumbo mode from MTU assigned in * ether_setup() via the alloc_etherdev() call */ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) tg3_flag_set(tp, JUMBO_RING_ENABLE); /* Determine WakeOnLan speed to use. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { tg3_flag_clear(tp, WOL_SPEED_100MB); } else { tg3_flag_set(tp, WOL_SPEED_100MB); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->phy_flags |= TG3_PHYFLG_IS_FET; /* A few boards don't want Ethernet@WireSpeed phy feature */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || (tp->phy_flags & TG3_PHYFLG_IS_FET) || (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) tp->phy_flags |= TG3_PHYFLG_ADC_BUG; if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; if (tg3_flag(tp, 5705_PLUS) && !(tp->phy_flags & TG3_PHYFLG_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && !tg3_flag(tp, 57765_PLUS)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; } else tp->phy_flags |= TG3_PHYFLG_BER_BUG; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { tp->phy_otp = tg3_read_otp_phycfg(tp); if (tp->phy_otp == 0) tp->phy_otp = TG3_OTP_DEFAULT; } if (tg3_flag(tp, CPMU_PRESENT)) tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; else tp->mi_mode = MAC_MI_MODE_BASE; tp->coalesce_mode = 0; if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; /* Set these bits to enable statistics workaround. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { tp->coalesce_mode |= HOSTCC_MODE_ATTN; tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) tg3_flag_set(tp, USE_PHYLIB); err = tg3_mdio_init(tp); if (err) return err; /* Initialize data/descriptor byte/word swapping. */ val = tr32(GRC_MODE); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | GRC_MODE_WORD_SWAP_B2HRX_DATA | GRC_MODE_B2HRX_ENABLE | GRC_MODE_HTX2B_ENABLE | GRC_MODE_HOST_STACKUP); else val &= GRC_MODE_HOST_STACKUP; tw32(GRC_MODE, val | tp->grc_mode); tg3_switch_clocks(tp); /* Clear this out for sanity. */ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && !tg3_flag(tp, PCIX_TARGET_HWBUG)) { u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); if (chiprevid == CHIPREV_ID_5701_A0 || chiprevid == CHIPREV_ID_5701_B0 || chiprevid == CHIPREV_ID_5701_B2 || chiprevid == CHIPREV_ID_5701_B5) { void __iomem *sram_base; /* Write some dummy words into the SRAM status block * area, see if it reads back correctly. If the return * value is bad, force enable the PCIX workaround. */ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; writel(0x00000000, sram_base); writel(0x00000000, sram_base + 4); writel(0xffffffff, sram_base + 4); if (readl(sram_base) != 0x00000000) tg3_flag_set(tp, PCIX_TARGET_HWBUG); } } udelay(50); tg3_nvram_init(tp); grc_misc_cfg = tr32(GRC_MISC_CFG); grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) tg3_flag_set(tp, IS_5788); if (!tg3_flag(tp, IS_5788) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) tg3_flag_set(tp, TAGGED_STATUS); if (tg3_flag(tp, TAGGED_STATUS)) { tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | HOSTCC_MODE_CLRTICK_TXBD); tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); } /* Preserve the APE MAC_MODE bits */ if (tg3_flag(tp, ENABLE_APE)) tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; else tp->mac_mode = 0; /* these are limited to 10/100 only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || (tp->phy_flags & TG3_PHYFLG_IS_FET)) tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; err = tg3_phy_probe(tp); if (err) { dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); /* ... but do not return immediately ... */ tg3_mdio_fini(tp); } tg3_read_vpd(tp); tg3_read_fw_ver(tp); if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } else { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; else tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } /* 5700 {AX,BX} chips have a broken status block link * change bit implementation, so we must use the * status register in those cases. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) tg3_flag_set(tp, USE_LINKCHG_REG); else tg3_flag_clear(tp, USE_LINKCHG_REG); /* The led_ctrl is set during tg3_phy_probe, here we might * have to force the link status polling mechanism based * upon subsystem IDs. */ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; tg3_flag_set(tp, USE_LINKCHG_REG); } /* For all SERDES we poll the MAC status register. */ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) tg3_flag_set(tp, POLL_SERDES); else tg3_flag_clear(tp, POLL_SERDES); tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && tg3_flag(tp, PCIX_MODE)) { tp->rx_offset = NET_SKB_PAD; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif } tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; tp->rx_std_max_post = tp->rx_std_ring_mask + 1; /* Increment the rx prod index on the rx std ring by at most * 8 for these chips to workaround hw errata. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->rx_std_max_post = 8; if (tg3_flag(tp, ASPM_WORKAROUND)) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & PCIE_PWR_MGMT_L1_THRESH_MSK; return err; } #ifdef CONFIG_SPARC static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) { struct net_device *dev = tp->dev; struct pci_dev *pdev = tp->pdev; struct device_node *dp = pci_device_to_OF_node(pdev); const unsigned char *addr; int len; addr = of_get_property(dp, "local-mac-address", &len); if (addr && len == 6) { memcpy(dev->dev_addr, addr, 6); memcpy(dev->perm_addr, dev->dev_addr, 6); return 0; } return -ENODEV; } static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) { struct net_device *dev = tp->dev; memcpy(dev->dev_addr, idprom->id_ethaddr, 6); memcpy(dev->perm_addr, idprom->id_ethaddr, 6); return 0; } #endif static int __devinit tg3_get_device_address(struct tg3 *tp) { struct net_device *dev = tp->dev; u32 hi, lo, mac_offset; int addr_ok = 0; #ifdef CONFIG_SPARC if (!tg3_get_macaddr_sparc(tp)) return 0; #endif mac_offset = 0x7c; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || tg3_flag(tp, 5780_CLASS)) { if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) mac_offset = 0xcc; if (tg3_nvram_lock(tp)) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); else tg3_nvram_unlock(tp); } else if (tg3_flag(tp, 5717_PLUS)) { if (tp->pci_fn & 1) mac_offset = 0xcc; if (tp->pci_fn > 1) mac_offset += 0x18c; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mac_offset = 0x10; /* First try to get it from MAC address mailbox. */ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); if ((hi >> 16) == 0x484b) { dev->dev_addr[0] = (hi >> 8) & 0xff; dev->dev_addr[1] = (hi >> 0) & 0xff; tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); dev->dev_addr[2] = (lo >> 24) & 0xff; dev->dev_addr[3] = (lo >> 16) & 0xff; dev->dev_addr[4] = (lo >> 8) & 0xff; dev->dev_addr[5] = (lo >> 0) & 0xff; /* Some old bootcode may report a 0 MAC address in SRAM */ addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); } if (!addr_ok) { /* Next, try NVRAM. */ if (!tg3_flag(tp, NO_NVRAM) && !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); } /* Finally just fetch it out of the MAC control regs. */ else { hi = tr32(MAC_ADDR_0_HIGH); lo = tr32(MAC_ADDR_0_LOW); dev->dev_addr[5] = lo & 0xff; dev->dev_addr[4] = (lo >> 8) & 0xff; dev->dev_addr[3] = (lo >> 16) & 0xff; dev->dev_addr[2] = (lo >> 24) & 0xff; dev->dev_addr[1] = hi & 0xff; dev->dev_addr[0] = (hi >> 8) & 0xff; } } if (!is_valid_ether_addr(&dev->dev_addr[0])) { #ifdef CONFIG_SPARC if (!tg3_get_default_macaddr_sparc(tp)) return 0; #endif return -EINVAL; } memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return 0; } #define BOUNDARY_SINGLE_CACHELINE 1 #define BOUNDARY_MULTI_CACHELINE 2 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) { int cacheline_size; u8 byte; int goal; pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); if (byte == 0) cacheline_size = 1024; else cacheline_size = (int) byte * 4; /* On 5703 and later chips, the boundary bits have no * effect. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && !tg3_flag(tp, PCI_EXPRESS)) goto out; #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) goal = BOUNDARY_MULTI_CACHELINE; #else #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) goal = BOUNDARY_SINGLE_CACHELINE; #else goal = 0; #endif #endif if (tg3_flag(tp, 57765_PLUS)) { val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; goto out; } if (!goal) goto out; /* PCI controllers on most RISC systems tend to disconnect * when a device tries to burst across a cache-line boundary. * Therefore, letting tg3 do so just wastes PCI bandwidth. * * Unfortunately, for PCI-E there are only limited * write-side controls for this, and thus for reads * we will still get the disconnects. We'll also waste * these PCI cycles for both read and write for chips * other than 5700 and 5701 which do not implement the * boundary bits. */ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: case 64: case 128: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | DMA_RWCTRL_WRITE_BNDRY_128_PCIX); } else { val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); } break; case 256: val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | DMA_RWCTRL_WRITE_BNDRY_256_PCIX); break; default: val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); break; } } else if (tg3_flag(tp, PCI_EXPRESS)) { switch (cacheline_size) { case 16: case 32: case 64: if (goal == BOUNDARY_SINGLE_CACHELINE) { val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; break; } /* fallthrough */ case 128: default: val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; break; } } else { switch (cacheline_size) { case 16: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_16 | DMA_RWCTRL_WRITE_BNDRY_16); break; } /* fallthrough */ case 32: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_32 | DMA_RWCTRL_WRITE_BNDRY_32); break; } /* fallthrough */ case 64: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_64 | DMA_RWCTRL_WRITE_BNDRY_64); break; } /* fallthrough */ case 128: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_128 | DMA_RWCTRL_WRITE_BNDRY_128); break; } /* fallthrough */ case 256: val |= (DMA_RWCTRL_READ_BNDRY_256 | DMA_RWCTRL_WRITE_BNDRY_256); break; case 512: val |= (DMA_RWCTRL_READ_BNDRY_512 | DMA_RWCTRL_WRITE_BNDRY_512); break; case 1024: default: val |= (DMA_RWCTRL_READ_BNDRY_1024 | DMA_RWCTRL_WRITE_BNDRY_1024); break; } } out: return val; } static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) { struct tg3_internal_buffer_desc test_desc; u32 sram_dma_descs; int i, ret; sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); tw32(RDMAC_STATUS, 0); tw32(WDMAC_STATUS, 0); tw32(BUFMGR_MODE, 0); tw32(FTQ_RESET, 0); test_desc.addr_hi = ((u64) buf_dma) >> 32; test_desc.addr_lo = buf_dma & 0xffffffff; test_desc.nic_mbuf = 0x00002100; test_desc.len = size; /* * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz * the *second* time the tg3 driver was getting loaded after an * initial scan. * * Broadcom tells me: * ...the DMA engine is connected to the GRC block and a DMA * reset may affect the GRC block in some unpredictable way... * The behavior of resets to individual blocks has not been tested. * * Broadcom noted the GRC reset will also reset all sub-components. */ if (to_device) { test_desc.cqid_sqid = (13 << 8) | 2; tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); udelay(40); } else { test_desc.cqid_sqid = (16 << 8) | 7; tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); udelay(40); } test_desc.flags = 0x00000005; for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { u32 val; val = *(((u32 *)&test_desc) + i); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, sram_dma_descs + (i * sizeof(u32))); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); } pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); if (to_device) tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); else tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); ret = -ENODEV; for (i = 0; i < 40; i++) { u32 val; if (to_device) val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); else val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); if ((val & 0xffff) == sram_dma_descs) { ret = 0; break; } udelay(100); } return ret; } #define TEST_BUFFER_SIZE 0x2000 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, { }, }; static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; int ret = 0; buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, &buf_dma, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out_nofree; } tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); if (tg3_flag(tp, 57765_PLUS)) goto out; if (tg3_flag(tp, PCI_EXPRESS)) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; } else if (!tg3_flag(tp, PCIX_MODE)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) tp->dma_rwctrl |= 0x003f0000; else tp->dma_rwctrl |= 0x003f000f; } else { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); u32 read_water = 0x7; /* If the 5704 is behind the EPB bridge, we can * do the less restrictive ONE_DMA workaround for * better performance. */ if (tg3_flag(tp, 40BIT_DMA_BUG) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) tp->dma_rwctrl |= 0x8000; else if (ccval == 0x6 || ccval == 0x7) tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) read_water = 4; /* Set bit 23 to enable PCIX hw bug fix */ tp->dma_rwctrl |= (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | (1 << 23); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { /* 5780 always in PCIX mode */ tp->dma_rwctrl |= 0x00144000; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { /* 5714 always in PCIX mode */ tp->dma_rwctrl |= 0x00148000; } else { tp->dma_rwctrl |= 0x001b000f; } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) tp->dma_rwctrl &= 0xfffffff0; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { /* Remove this if it causes problems for some boards. */ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; /* On 5700/5701 chips, we need to set this bit. * Otherwise the chip will issue cacheline transactions * to streamable DMA memory with not all the byte * enables turned on. This is an error on several * RISC PCI controllers, in particular sparc64. * * On 5703/5704 chips, this bit has been reassigned * a different meaning. In particular, it is used * on those chips to enable a PCI-X workaround. */ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; } tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); #if 0 /* Unneeded, already done by tg3_get_invariants. */ tg3_switch_clocks(tp); #endif if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) goto out; /* It is best to perform DMA test with maximum write burst size * to expose the 5700/5701 write DMA bug. */ saved_dma_rwctrl = tp->dma_rwctrl; tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); while (1) { u32 *p = buf, i; for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) p[i] = i; /* Send the buffer to the chip. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); if (ret) { dev_err(&tp->pdev->dev, "%s: Buffer write failed. err = %d\n", __func__, ret); break; } #if 0 /* validate data reached card RAM correctly. */ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { u32 val; tg3_read_mem(tp, 0x2100 + (i*4), &val); if (le32_to_cpu(val) != p[i]) { dev_err(&tp->pdev->dev, "%s: Buffer corrupted on device! " "(%d != %d)\n", __func__, val, i); /* ret = -ENODEV here? */ } p[i] = 0; } #endif /* Now read it back. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); if (ret) { dev_err(&tp->pdev->dev, "%s: Buffer read failed. " "err = %d\n", __func__, ret); break; } /* Verify it. */ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { if (p[i] == i) continue; if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != DMA_RWCTRL_WRITE_BNDRY_16) { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); break; } else { dev_err(&tp->pdev->dev, "%s: Buffer corrupted on read back! " "(%d != %d)\n", __func__, p[i], i); ret = -ENODEV; goto out; } } if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { /* Success. */ ret = 0; break; } } if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != DMA_RWCTRL_WRITE_BNDRY_16) { /* DMA test passed without adjusting DMA boundary, * now look for chipsets that are known to expose the * DMA bug without failing the test. */ if (pci_dev_present(tg3_dma_wait_state_chipsets)) { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; } else { /* Safe to use the calculated DMA boundary. */ tp->dma_rwctrl = saved_dma_rwctrl; } tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); } out: dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); out_nofree: return ret; } static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { if (tg3_flag(tp, 57765_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = DEFAULT_MB_MACRX_LOW_WATER_57765; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER_57765; tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; tp->bufmgr_config.mbuf_high_water_jumbo = DEFAULT_MB_HIGH_WATER_JUMBO_57765; } else if (tg3_flag(tp, 5705_PLUS)) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = DEFAULT_MB_MACRX_LOW_WATER_5705; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER_5705; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tp->bufmgr_config.mbuf_mac_rx_low_water = DEFAULT_MB_MACRX_LOW_WATER_5906; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER_5906; } tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; tp->bufmgr_config.mbuf_high_water_jumbo = DEFAULT_MB_HIGH_WATER_JUMBO_5780; } else { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER; tp->bufmgr_config.mbuf_mac_rx_low_water = DEFAULT_MB_MACRX_LOW_WATER; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER; tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = DEFAULT_MB_RDMA_LOW_WATER_JUMBO; tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = DEFAULT_MB_MACRX_LOW_WATER_JUMBO; tp->bufmgr_config.mbuf_high_water_jumbo = DEFAULT_MB_HIGH_WATER_JUMBO; } tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; } static char * __devinit tg3_phy_string(struct tg3 *tp) { switch (tp->phy_id & TG3_PHY_ID_MASK) { case TG3_PHY_ID_BCM5400: return "5400"; case TG3_PHY_ID_BCM5401: return "5401"; case TG3_PHY_ID_BCM5411: return "5411"; case TG3_PHY_ID_BCM5701: return "5701"; case TG3_PHY_ID_BCM5703: return "5703"; case TG3_PHY_ID_BCM5704: return "5704"; case TG3_PHY_ID_BCM5705: return "5705"; case TG3_PHY_ID_BCM5750: return "5750"; case TG3_PHY_ID_BCM5752: return "5752"; case TG3_PHY_ID_BCM5714: return "5714"; case TG3_PHY_ID_BCM5780: return "5780"; case TG3_PHY_ID_BCM5755: return "5755"; case TG3_PHY_ID_BCM5787: return "5787"; case TG3_PHY_ID_BCM5784: return "5784"; case TG3_PHY_ID_BCM5756: return "5722/5756"; case TG3_PHY_ID_BCM5906: return "5906"; case TG3_PHY_ID_BCM5761: return "5761"; case TG3_PHY_ID_BCM5718C: return "5718C"; case TG3_PHY_ID_BCM5718S: return "5718S"; case TG3_PHY_ID_BCM57765: return "57765"; case TG3_PHY_ID_BCM5719C: return "5719C"; case TG3_PHY_ID_BCM5720C: return "5720C"; case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; } } static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) { if (tg3_flag(tp, PCI_EXPRESS)) { strcpy(str, "PCI Express"); return str; } else if (tg3_flag(tp, PCIX_MODE)) { u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; strcpy(str, "PCIX:"); if ((clock_ctrl == 7) || ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == GRC_MISC_CFG_BOARD_ID_5704CIOBE)) strcat(str, "133MHz"); else if (clock_ctrl == 0) strcat(str, "33MHz"); else if (clock_ctrl == 2) strcat(str, "50MHz"); else if (clock_ctrl == 4) strcat(str, "66MHz"); else if (clock_ctrl == 6) strcat(str, "100MHz"); } else { strcpy(str, "PCI:"); if (tg3_flag(tp, PCI_HIGH_SPEED)) strcat(str, "66MHz"); else strcat(str, "33MHz"); } if (tg3_flag(tp, PCI_32BIT)) strcat(str, ":32-bit"); else strcat(str, ":64-bit"); return str; } static void __devinit tg3_init_coal(struct tg3 *tp) { struct ethtool_coalesce *ec = &tp->coal; memset(ec, 0, sizeof(*ec)); ec->cmd = ETHTOOL_GCOALESCE; ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | HOSTCC_MODE_CLRTICK_TXBD)) { ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; } if (tg3_flag(tp, 5705_PLUS)) { ec->rx_coalesce_usecs_irq = 0; ec->tx_coalesce_usecs_irq = 0; ec->stats_block_coalesce_usecs = 0; } } static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct tg3 *tp; int i, err, pm_cap; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; netdev_features_t features = 0; printk_once(KERN_INFO "%s\n", version); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } pci_set_master(pdev); /* Find power-management capability. */ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pm_cap == 0) { dev_err(&pdev->dev, "Cannot find Power Management capability, aborting\n"); err = -EIO; goto err_out_free_res; } err = pci_set_power_state(pdev, PCI_D0); if (err) { dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); goto err_out_free_res; } dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; goto err_out_power_down; } SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; tp->pm_cap = pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; if (tg3_debug > 0) tp->msg_enable = tg3_debug; else tp->msg_enable = TG3_DEF_MSG_ENABLE; /* The word/byte swap controls here control register access byte * swapping. DMA data byte swapping is controlled in the GRC_MODE * setting below. */ tp->misc_host_ctrl = MISC_HOST_CTRL_MASK_PCI_INT | MISC_HOST_CTRL_WORD_SWAP | MISC_HOST_CTRL_INDIR_ACCESS | MISC_HOST_CTRL_PCISTATE_RW; /* The NONFRM (non-frame) byte/word swap controls take effect * on descriptor entries, anything which isn't packet data. * * The StrongARM chips on the board (one for tx, one for rx) * are running in big-endian mode. */ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | GRC_MODE_WSWAP_NONFRM_DATA); #ifdef __BIG_ENDIAN tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task); tp->regs = pci_ioremap_bar(pdev, BAR_0); if (!tp->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_free_dev; } if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { tg3_flag_set(tp, ENABLE_APE); tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { dev_err(&pdev->dev, "Cannot map APE registers, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } } tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->netdev_ops = &tg3_netdev_ops; dev->irq = pdev->irq; err = tg3_get_invariants(tp); if (err) { dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_apeunmap; } /* The EPB bridge inside 5714, 5715, and 5780 and any * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and * do DMA address check in tg3_start_xmit(). */ if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); else if (tg3_flag(tp, 40BIT_DMA_BUG)) { persist_dma_mask = dma_mask = DMA_BIT_MASK(40); #ifdef CONFIG_HIGHMEM dma_mask = DMA_BIT_MASK(64); #endif } else persist_dma_mask = dma_mask = DMA_BIT_MASK(64); /* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit " "DMA for consistent allocations\n"); goto err_out_apeunmap; } } } if (err || dma_mask == DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_apeunmap; } } tg3_init_bufmgr_config(tp); features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; if (tg3_flag(tp, 5755_PLUS)) features |= NETIF_F_IPV6_CSUM; } /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ if ((tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) && (features & NETIF_F_IP_CSUM)) features |= NETIF_F_TSO; if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { if (features & NETIF_F_IPV6_CSUM) features |= NETIF_F_TSO6; if (tg3_flag(tp, HW_TSO_3) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) features |= NETIF_F_TSO_ECN; } dev->features |= features; dev->vlan_features |= features; /* * Add loopback capability only for a subset of devices that support * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY * loopback for the remaining devices. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && !tg3_flag(tp, CPMU_PRESENT)) /* Add the loopback capability */ features |= NETIF_F_LOOPBACK; dev->hw_features |= features; if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && !tg3_flag(tp, TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { tg3_flag_set(tp, MAX_RXPEND_64); tp->rx_pending = 63; } err = tg3_get_device_address(tp); if (err) { dev_err(&pdev->dev, "Could not obtain valid ethernet address, aborting\n"); goto err_out_apeunmap; } /* * Reset chip in case UNDI or EFI driver did not shutdown * DMA self test will enable WDMAC and we'll see (spurious) * pending DMA on the PCI bus at that point. */ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); } err = tg3_test_dma(tp); if (err) { dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); goto err_out_apeunmap; } intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; for (i = 0; i < tp->irq_max; i++) { struct tg3_napi *tnapi = &tp->napi[i]; tnapi->tp = tp; tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; tnapi->int_mbox = intmbx; if (i <= 4) intmbx += 0x8; else intmbx += 0x4; tnapi->consmbox = rcvmbx; tnapi->prodmbox = sndmbx; if (i) tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); else tnapi->coal_now = HOSTCC_MODE_NOW; if (!tg3_flag(tp, SUPPORT_MSIX)) break; /* * If we support MSIX, we'll be using RSS. If we're using * RSS, the first vector only handles link interrupts and the * remaining vectors handle rx and tx interrupts. Reuse the * mailbox values for the next iteration. The values we setup * above are still useful for the single vectored mode. */ if (!i) continue; rcvmbx += 0x8; if (sndmbx & 0x4) sndmbx -= 0x4; else sndmbx += 0xc; } tg3_init_coal(tp); pci_set_drvdata(pdev, dev); if (tg3_flag(tp, 5717_PLUS)) { /* Resume a low-power mode */ tg3_frob_aux_power(tp, false); } tg3_timer_init(tp); err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_apeunmap; } netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", tp->board_part_number, tp->pci_chip_rev_id, tg3_bus_string(tp, str), dev->dev_addr); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); } else { char *ethtype; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) ethtype = "10/100Base-TX"; else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ethtype = "1000Base-SX"; else ethtype = "10/100/1000Base-T"; netdev_info(dev, "attached PHY is %s (%s Ethernet) " "(WireSpeed[%d], EEE[%d])\n", tg3_phy_string(tp), ethtype, (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); } netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", (dev->features & NETIF_F_RXCSUM) != 0, tg3_flag(tp, USE_LINKCHG_REG) != 0, (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, tg3_flag(tp, ENABLE_ASF) != 0, tg3_flag(tp, TSO_CAPABLE) != 0); netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", tp->dma_rwctrl, pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); pci_save_state(pdev); return 0; err_out_apeunmap: if (tp->aperegs) { iounmap(tp->aperegs); tp->aperegs = NULL; } err_out_iounmap: if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; } err_out_free_dev: free_netdev(dev); err_out_power_down: pci_set_power_state(pdev, PCI_D3hot); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void __devexit tg3_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct tg3 *tp = netdev_priv(dev); if (tp->fw) release_firmware(tp->fw); tg3_reset_task_cancel(tp); if (tg3_flag(tp, USE_PHYLIB)) { tg3_phy_fini(tp); tg3_mdio_fini(tp); } unregister_netdev(dev); if (tp->aperegs) { iounmap(tp->aperegs); tp->aperegs = NULL; } if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; } free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } #ifdef CONFIG_PM_SLEEP static int tg3_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); int err; if (!netif_running(dev)) return 0; tg3_reset_task_cancel(tp); tg3_phy_stop(tp); tg3_netif_stop(tp); tg3_timer_stop(tp); tg3_full_lock(tp, 1); tg3_disable_ints(tp); tg3_full_unlock(tp); netif_device_detach(dev); tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); err = tg3_power_down_prepare(tp); if (err) { int err2; tg3_full_lock(tp, 0); tg3_flag_set(tp, INIT_COMPLETE); err2 = tg3_restart_hw(tp, 1); if (err2) goto out; tg3_timer_start(tp); netif_device_attach(dev); tg3_netif_start(tp); out: tg3_full_unlock(tp); if (!err2) tg3_phy_start(tp); } return err; } static int tg3_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); int err; if (!netif_running(dev)) return 0; netif_device_attach(dev); tg3_full_lock(tp, 0); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, 1); if (err) goto out; tg3_timer_start(tp); tg3_netif_start(tp); out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); return err; } static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); #define TG3_PM_OPS (&tg3_pm_ops) #else #define TG3_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ /** * tg3_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(netdev); pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; netdev_info(netdev, "PCI I/O error detected\n"); rtnl_lock(); if (!netif_running(netdev)) goto done; tg3_phy_stop(tp); tg3_netif_stop(tp); tg3_timer_stop(tp); /* Want to make sure that the reset task doesn't run */ tg3_reset_task_cancel(tp); netif_device_detach(netdev); /* Clean up software state, even if MMIO is blocked */ tg3_full_lock(tp, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_full_unlock(tp); done: if (state == pci_channel_io_perm_failure) err = PCI_ERS_RESULT_DISCONNECT; else pci_disable_device(pdev); rtnl_unlock(); return err; } /** * tg3_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. * At this point, the card has exprienced a hard reset, * followed by fixups by BIOS, and has its config space * set up identically to what it was at cold boot. */ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(netdev); pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; int err; rtnl_lock(); if (pci_enable_device(pdev)) { netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); goto done; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); if (!netif_running(netdev)) { rc = PCI_ERS_RESULT_RECOVERED; goto done; } err = tg3_power_up(tp); if (err) goto done; rc = PCI_ERS_RESULT_RECOVERED; done: rtnl_unlock(); return rc; } /** * tg3_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells * us that its OK to resume normal operation. */ static void tg3_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(netdev); int err; rtnl_lock(); if (!netif_running(netdev)) goto done; tg3_full_lock(tp, 0); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, 1); tg3_full_unlock(tp); if (err) { netdev_err(netdev, "Cannot restart hardware after reset.\n"); goto done; } netif_device_attach(netdev); tg3_timer_start(tp); tg3_netif_start(tp); tg3_phy_start(tp); done: rtnl_unlock(); } static struct pci_error_handlers tg3_err_handler = { .error_detected = tg3_io_error_detected, .slot_reset = tg3_io_slot_reset, .resume = tg3_io_resume }; static struct pci_driver tg3_driver = { .name = DRV_MODULE_NAME, .id_table = tg3_pci_tbl, .probe = tg3_init_one, .remove = __devexit_p(tg3_remove_one), .err_handler = &tg3_err_handler, .driver.pm = TG3_PM_OPS, }; static int __init tg3_init(void) { return pci_register_driver(&tg3_driver); } static void __exit tg3_cleanup(void) { pci_unregister_driver(&tg3_driver); } module_init(tg3_init); module_exit(tg3_cleanup);
gpl-2.0
Badadroid/android_kernel_samsung_wave
arch/x86/mm/init_32.c
2883
25398
/* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/bugs.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/olpc_ofw.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/paravirt.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/page_types.h> #include <asm/init.h> unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { unsigned long pfn = pgt_buf_end++; void *adr; if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); clear_page(adr); return adr; } /* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); return pmd_table; } #endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; } pte_t * __init populate_extra_pte(unsigned long vaddr) { int pte_idx = pte_index(vaddr); pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug. */ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; BUG_ON(after_bootmem); newpte = alloc_low_page(); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(newpte + i, pte[i]); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); BUG_ON(newpte != pte_offset_kernel(pmd, 0)); __flush_tlb_all(); paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); pte = newpte; } BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) && vaddr > fix_to_virt(FIX_KMAP_END) && lastpte && lastpte + PTRS_PER_PTE != pte); #endif return pte; } /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */ static void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), pmd, vaddr, pte); vaddr += PMD_SIZE; } pmd_idx = 0; } } static inline int is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; return 0; } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET: */ unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; /* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes." */ mapping_iter = 1; if (!cpu_has_pse) use_pse = 0; repeat: pages_2m = pages_4k = 0; pfn = start_pfn; pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pgd = pgd_base + pgd_idx; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= end_pfn) continue; #ifdef CONFIG_X86_PAE pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pmd += pmd_idx; #else pmd_idx = 0; #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(addr) || is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; if (mapping_iter == 1) set_pmd(pmd, pfn_pmd(pfn, init_prot)); else set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; continue; } pte = one_page_table_init(pmd); pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pte += pte_ofs; for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; /* * first pass will use the same initial * identity mapping attribute. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); if (is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; if (mapping_iter == 1) { set_pte(pte, pfn_pte(pfn, init_prot)); last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; } else set_pte(pte, pfn_pte(pfn, prot)); } } } if (mapping_iter == 1) { /* * update direct mapping page count only in the first * iteration. */ update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_4K, pages_4k); /* * local global flush tlb, which will flush the previous * mappings present in both small and large page TLB's. */ __flush_tlb_all(); /* * Second iteration will set the actual desired PTE attributes. */ mapping_iter = 2; goto repeat; } return last_map_addr; } pte_t *kmap_pte; pgprot_t kmap_prot; static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } static void __init kmap_init(void) { unsigned long kmap_vstart; /* * Cache the first kmap pte: */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; } static void __init add_one_highpage_init(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { struct range *range; int nr_range; int i; nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); for (i = 0; i < nr_range; i++) { struct page *page; int node_pfn; for (node_pfn = range[i].start; node_pfn < range[i].end; node_pfn++) { if (!pfn_valid(node_pfn)) continue; page = pfn_to_page(node_pfn); add_one_highpage_init(page); } } } #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) { unsigned long pfn, va; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* * Remove any mappings which extend past the end of physical * memory from the boot time page table: */ for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break; pud = pud_offset(pgd, va); pmd = pmd_offset(pud, va); if (!pmd_present(*pmd)) break; pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break; pte_clear(NULL, va, pte); } paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); } void __init native_pagetable_setup_done(pgd_t *base) { } /* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/x86/kernel/head_32.S. The root of the * pagetable will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */ void __init early_ioremap_page_table_range_init(void) { pgd_t *pgd_base = swapper_pg_dir; unsigned long vaddr, end; /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); early_ioremap_reset(); } static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; permanent_kmaps_init(pgd_base); } pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; /* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes. */ static int __init parse_highmem(char *arg) { if (!arg) return -EINVAL; highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; return 0; } early_param("highmem", parse_highmem); #define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n" #define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it: */ void __init lowmem_pfn_init(void) { /* max_low_pfn is 0, we already have early_res support */ max_low_pfn = max_pfn; if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); highmem_pages = 0; } if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } max_low_pfn -= highmem_pages; } #else if (highmem_pages) printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif } #define MSG_HIGHMEM_TOO_SMALL \ "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: */ void __init highmem_pfn_init(void) { max_low_pfn = MAXMEM_PFN; if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; if (highmem_pages + MAXMEM_PFN < max_pfn) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; } #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); if (max_pfn > MAX_NONPAE_PFN) printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); else printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G if (max_pfn > MAX_NONPAE_PFN) { max_pfn = MAX_NONPAE_PFN; printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ } /* * Determine low and high memory ranges: */ void __init find_low_pfn_range(void) { /* it could update max_pfn */ if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); else highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; memblock_x86_register_active_regions(0, 0, highend_pfn); sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else memblock_x86_register_active_regions(0, 0, max_low_pfn); sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); setup_bootmem_allocator(); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; #endif free_area_init_nodes(max_zone_pfns); } void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); after_bootmem = 1; } /* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { pagetable_init(); __flush_tlb_all(); kmap_init(); /* * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } /* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that. */ static void __init test_wp_bit(void) { printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk(KERN_CONT "No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic( "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else { printk(KERN_CONT "Ok.\n"); } } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages: */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; set_highmem_pages_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, #endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #define __FIXADDR_TOP (-PAGE_SIZE) #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); #endif #define high_memory (-128UL << 20) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */ static noinline int do_test_wp_bit(void) { char tmp_reg; int flag; __asm__ __volatile__( " movb %0, %1 \n" "1: movb %1, %0 \n" " xorl %2, %2 \n" "2: \n" _ASM_EXTABLE(1b,2b) :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag; } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); } static void mark_nxdata_nx(void) { /* * When this called, init has already been executed and released, * so everything past _etext should be NX. */ unsigned long start = PFN_ALIGN(_etext); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); } #endif
gpl-2.0
bcnice20/speedy-2.6.32.21
drivers/mfd/wm8350-gpio.c
4419
6197
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0
sndnvaps/android_kernel_lge_hammerhead
arch/blackfin/mach-bf537/boards/tcm_bf537.c
4419
17986
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <linux/ata_platform.h> #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/mmc_spi.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix TCM BF537"; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) static struct platform_device hitachi_fb_device = { .name = "hitachi-tx09", }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .start = 0x20200300, .end = 0x20200300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF14, .end = IRQ_PF14, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20308000, .end = 0x20308000, .flags = IORESOURCE_MEM, }, { .start = 0x20308004, .end = 0x20308004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG15, .end = IRQ_PG15, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG13, .end = IRQ_PG13, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) static struct mtd_partition cm_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x100000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data cm_flash_data = { .width = 2, .parts = cm_partitions, .nr_parts = ARRAY_SIZE(cm_partitions), }; static unsigned cm_flash_gpios[] = { GPIO_PF4, GPIO_PF5 }; static struct resource cm_flash_resource[] = { { .name = "cfi_probe", .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }, { .start = (unsigned long)cm_flash_gpios, .end = ARRAY_SIZE(cm_flash_gpios), .flags = IORESOURCE_IRQ, } }; static struct platform_device cm_flash_device = { .name = "gpio-addr-flash", .id = 0, .dev = { .platform_data = &cm_flash_data, }, .num_resources = ARRAY_SIZE(cm_flash_resource), .resource = cm_flash_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_MII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_MII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) #define PATA_INT IRQ_PF14 static struct pata_platform_info bfin_pata_platform_data = { .ioport_shift = 2, .irq_type = IRQF_TRIGGER_HIGH, }; static struct resource bfin_pata_resources[] = { { .start = 0x2030C000, .end = 0x2030C01F, .flags = IORESOURCE_MEM, }, { .start = 0x2030D018, .end = 0x2030D01B, .flags = IORESOURCE_MEM, }, { .start = PATA_INT, .end = PATA_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pata_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(bfin_pata_resources), .resource = bfin_pata_resources, .dev = { .platform_data = &bfin_pata_platform_data, } }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 376000000), VRPAIR(VLEV_095, 426000000), VRPAIR(VLEV_100, 426000000), VRPAIR(VLEV_105, 476000000), VRPAIR(VLEV_110, 476000000), VRPAIR(VLEV_115, 476000000), VRPAIR(VLEV_120, 500000000), VRPAIR(VLEV_125, 533000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cm_bf537_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) &hitachi_fb_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) &bfin_pata_device, #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) &cm_flash_device, #endif }; static int __init net2272_init(void) { #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) int ret; ret = gpio_request(GPIO_PG14, "net2272"); if (ret) return ret; /* Reset USB Chip, PG14 */ gpio_direction_output(GPIO_PG14, 0); mdelay(2); gpio_set_value(GPIO_PG14, 1); #endif return 0; } static int __init tcm_bf537_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices)); #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); #endif if (net2272_init()) pr_warning("unable to configure net2272; it probably won't work\n"); return 0; } arch_initcall(tcm_bf537_init); static struct platform_device *cm_bf537_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(cm_bf537_early_devices, ARRAY_SIZE(cm_bf537_early_devices)); } int bfin_get_ether_addr(char *addr) { return 1; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
skeevy420/android_kernel_lge_d850
arch/blackfin/mach-bf537/boards/tcm_bf537.c
4419
17986
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <linux/ata_platform.h> #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/mmc_spi.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix TCM BF537"; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) static struct platform_device hitachi_fb_device = { .name = "hitachi-tx09", }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .start = 0x20200300, .end = 0x20200300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF14, .end = IRQ_PF14, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20308000, .end = 0x20308000, .flags = IORESOURCE_MEM, }, { .start = 0x20308004, .end = 0x20308004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG15, .end = IRQ_PG15, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG13, .end = IRQ_PG13, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) static struct mtd_partition cm_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x100000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data cm_flash_data = { .width = 2, .parts = cm_partitions, .nr_parts = ARRAY_SIZE(cm_partitions), }; static unsigned cm_flash_gpios[] = { GPIO_PF4, GPIO_PF5 }; static struct resource cm_flash_resource[] = { { .name = "cfi_probe", .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }, { .start = (unsigned long)cm_flash_gpios, .end = ARRAY_SIZE(cm_flash_gpios), .flags = IORESOURCE_IRQ, } }; static struct platform_device cm_flash_device = { .name = "gpio-addr-flash", .id = 0, .dev = { .platform_data = &cm_flash_data, }, .num_resources = ARRAY_SIZE(cm_flash_resource), .resource = cm_flash_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_MII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_MII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) #define PATA_INT IRQ_PF14 static struct pata_platform_info bfin_pata_platform_data = { .ioport_shift = 2, .irq_type = IRQF_TRIGGER_HIGH, }; static struct resource bfin_pata_resources[] = { { .start = 0x2030C000, .end = 0x2030C01F, .flags = IORESOURCE_MEM, }, { .start = 0x2030D018, .end = 0x2030D01B, .flags = IORESOURCE_MEM, }, { .start = PATA_INT, .end = PATA_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pata_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(bfin_pata_resources), .resource = bfin_pata_resources, .dev = { .platform_data = &bfin_pata_platform_data, } }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 376000000), VRPAIR(VLEV_095, 426000000), VRPAIR(VLEV_100, 426000000), VRPAIR(VLEV_105, 476000000), VRPAIR(VLEV_110, 476000000), VRPAIR(VLEV_115, 476000000), VRPAIR(VLEV_120, 500000000), VRPAIR(VLEV_125, 533000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cm_bf537_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) &hitachi_fb_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) &bfin_pata_device, #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) &cm_flash_device, #endif }; static int __init net2272_init(void) { #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) int ret; ret = gpio_request(GPIO_PG14, "net2272"); if (ret) return ret; /* Reset USB Chip, PG14 */ gpio_direction_output(GPIO_PG14, 0); mdelay(2); gpio_set_value(GPIO_PG14, 1); #endif return 0; } static int __init tcm_bf537_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices)); #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); #endif if (net2272_init()) pr_warning("unable to configure net2272; it probably won't work\n"); return 0; } arch_initcall(tcm_bf537_init); static struct platform_device *cm_bf537_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(cm_bf537_early_devices, ARRAY_SIZE(cm_bf537_early_devices)); } int bfin_get_ether_addr(char *addr) { return 1; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
shao2610/3.4-kernel
arch/blackfin/mach-bf527/boards/tll6527m.c
4419
23125
/* File: arch/blackfin/mach-bf527/boards/tll6527m.c * Based on: arch/blackfin/mach-bf527/boards/ezkit.c * Author: Ashish Gupta * * Copyright: 2010 - The Learning Labs Inc. * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <linux/leds.h> #include <linux/input.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #if defined(CONFIG_TOUCHSCREEN_AD7879) \ || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> #define LCD_BACKLIGHT_GPIO 0x40 /* TLL6527M uses TLL7UIQ35 / ADI LCD EZ Extender. AD7879 AUX GPIO is used for * LCD Backlight Enable */ #endif /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "TLL6527M"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, /*.gpio_vrsel = GPIO_PG13,*/ /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 1, .gpio_bl = LCD_BACKLIGHT_GPIO, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) static struct mtd_partition tll6527m_partitions[] = { { .name = "bootloader(nor)", .size = 0xA0000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0xD00000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data tll6527m_flash_data = { .width = 2, .parts = tll6527m_partitions, .nr_parts = ARRAY_SIZE(tll6527m_partitions), }; static unsigned tll6527m_flash_gpios[] = { GPIO_PG11, GPIO_PH11, GPIO_PH12 }; static struct resource tll6527m_flash_resource[] = { { .name = "cfi_probe", .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }, { .start = (unsigned long)tll6527m_flash_gpios, .end = ARRAY_SIZE(tll6527m_flash_gpios), .flags = IORESOURCE_IRQ, } }; static struct platform_device tll6527m_flash_device = { .name = "gpio-addr-flash", .id = 0, .dev = { .platform_data = &tll6527m_flash_data, }, .num_resources = ARRAY_SIZE(tll6527m_flash_resource), .resource = tll6527m_flash_resource, }; #endif #if defined(CONFIG_GPIO_DECODER) || defined(CONFIG_GPIO_DECODER_MODULE) /* An SN74LVC138A 3:8 decoder chip has been used to generate 7 augmented * outputs used as SPI CS lines for all SPI SLAVE devices on TLL6527v1-0. * EXP_GPIO_SPISEL_BASE is the base number for the expanded outputs being * used as SPI CS lines, this should be > MAX_BLACKFIN_GPIOS */ #include <linux/gpio-decoder.h> #define EXP_GPIO_SPISEL_BASE 0x64 static unsigned gpio_addr_inputs[] = { GPIO_PG1, GPIO_PH9, GPIO_PH10 }; static struct gpio_decoder_platform_data spi_decoded_cs = { .base = EXP_GPIO_SPISEL_BASE, .input_addrs = gpio_addr_inputs, .nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs), .default_output = 0, /* .default_output = (1 << ARRAY_SIZE(gpio_addr_inputs)) - 1 */ }; static struct platform_device spi_decoded_gpio = { .name = "gpio-decoder", .id = 0, .dev = { .platform_data = &spi_decoded_cs, }, }; #else #define EXP_GPIO_SPISEL_BASE 0x0 #endif #if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE) #include <linux/input/adxl34x.h> static const struct adxl34x_platform_data adxl345_info = { .x_axis_offset = 0, .y_axis_offset = 0, .z_axis_offset = 0, .tap_threshold = 0x31, .tap_duration = 0x10, .tap_latency = 0x60, .tap_window = 0xF0, .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN, .act_axis_control = 0xFF, .activity_threshold = 5, .inactivity_threshold = 2, .inactivity_time = 2, .free_fall_threshold = 0x7, .free_fall_time = 0x20, .data_rate = 0x8, .data_range = ADXL_FULL_RES, .ev_type = EV_ABS, .ev_code_x = ABS_X, /* EV_REL */ .ev_code_y = ABS_Y, /* EV_REL */ .ev_code_z = ABS_Z, /* EV_REL */ .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */ /* .ev_code_ff = KEY_F,*/ /* EV_KEY */ .ev_code_act_inactivity = KEY_A, /* EV_KEY */ .use_int2 = 1, .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, .fifo_mode = ADXL_FIFO_STREAM, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) \ || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* configure AUX as GPIO output*/ .gpio_base = LCD_BACKLIGHT_GPIO, }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE) #include <linux/spi/mcp23s08.h> static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = { .chip[0].is_present = true, .base = 0x30, }; static const struct mcp23s08_platform_data bfin_mcp23s08_usr_gpio_info = { .chip[2].is_present = true, .base = 0x38, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = EXP_GPIO_SPISEL_BASE + 0x04 + MAX_CTRL_CS, /* Can be connected to TLL6527M GPIO connector */ /* Either SPI_ADC or M25P80 FLASH can be installed at a time */ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", /* * TLL6527M V1.0 does not support SD Card at SPI Clock > 10 MHz due to * SPI buffer limitations */ .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x05 + MAX_CTRL_CS, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) \ || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PH14, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x07 + MAX_CTRL_CS, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 10000000, /* TLL6527Mv1-0 supports max spi clock (SCK) speed = 10 MHz */ .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x03 + MAX_CTRL_CS, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x06 + MAX_CTRL_CS, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE) { .modalias = "mcp23s08", .platform_data = &bfin_mcp23s08_sys_gpio_info, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x01 + MAX_CTRL_CS, .mode = SPI_CPHA | SPI_CPOL, }, { .modalias = "mcp23s08", .platform_data = &bfin_mcp23s08_usr_gpio_info, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = EXP_GPIO_SPISEL_BASE + 0x02 + MAX_CTRL_CS, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = EXP_GPIO_SPISEL_BASE + 8 + MAX_CTRL_CS, /* EXP_GPIO_SPISEL_BASE will be > MAX_BLACKFIN_GPIOS */ .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PF9, .end = GPIO_PF9, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) { I2C_BOARD_INFO("bfin-adv7393", 0x2B), }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) \ || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) { I2C_BOARD_INFO("ad7879", 0x2C), .irq = IRQ_PH14, .platform_data = (void *)&bfin_ad7879_ts_info, }, #endif #if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) { I2C_BOARD_INFO("ssm2602", 0x1b), }, #endif { I2C_BOARD_INFO("adm1192", 0x2e), }, { I2C_BOARD_INFO("ltc3576", 0x09), }, #if defined(CONFIG_INPUT_ADXL34X_I2C) \ || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE) { I2C_BOARD_INFO("adxl34x", 0x53), .irq = IRQ_PH13, .platform_data = (void *)&adxl345_info, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) \ || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *tll6527m_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) \ || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) &tll6527m_flash_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_GPIO_DECODER) || defined(CONFIG_GPIO_DECODER_MODULE) &spi_decoded_gpio, #endif }; static int __init tll6527m_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(tll6527m_devices, ARRAY_SIZE(tll6527m_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(tll6527m_init); static struct platform_device *tll6527m_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(tll6527m_early_devices, ARRAY_SIZE(tll6527m_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
marc1706/desire_kernel_35
drivers/mfd/wm8350-gpio.c
4419
6197
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0
Nicklas373/Hana-CoreUX-Kernel_MSM8627-AOSP_7.0
drivers/macintosh/macio_asic.c
4931
20753
/* * Bus & driver management routines for devices within * a MacIO ASIC. Interface to new driver model mostly * stolen from the PCI version. * * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * TODO: * * - Don't probe below media bay by default, but instead provide * some hooks for media bay to dynamically add/remove it's own * sub-devices. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/machdep.h> #include <asm/macio.h> #include <asm/pmac_feature.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #undef DEBUG #define MAX_NODE_NAME_SIZE (20 - 12) static struct macio_chip *macio_on_hold; static int macio_bus_match(struct device *dev, struct device_driver *drv) { const struct of_device_id * matches = drv->of_match_table; if (!matches) return 0; return of_match_device(matches, dev) != NULL; } struct macio_dev *macio_dev_get(struct macio_dev *dev) { struct device *tmp; if (!dev) return NULL; tmp = get_device(&dev->ofdev.dev); if (tmp) return to_macio_device(tmp); else return NULL; } void macio_dev_put(struct macio_dev *dev) { if (dev) put_device(&dev->ofdev.dev); } static int macio_device_probe(struct device *dev) { int error = -ENODEV; struct macio_driver *drv; struct macio_dev *macio_dev; const struct of_device_id *match; drv = to_macio_driver(dev->driver); macio_dev = to_macio_device(dev); if (!drv->probe) return error; macio_dev_get(macio_dev); match = of_match_device(drv->driver.of_match_table, dev); if (match) error = drv->probe(macio_dev, match); if (error) macio_dev_put(macio_dev); return error; } static int macio_device_remove(struct device *dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(macio_dev); macio_dev_put(macio_dev); return 0; } static void macio_device_shutdown(struct device *dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(macio_dev); } static int macio_device_suspend(struct device *dev, pm_message_t state) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->suspend) return drv->suspend(macio_dev, state); return 0; } static int macio_device_resume(struct device * dev) { struct macio_dev * macio_dev = to_macio_device(dev); struct macio_driver * drv = to_macio_driver(dev->driver); if (dev->driver && drv->resume) return drv->resume(macio_dev); return 0; } extern struct device_attribute macio_dev_attrs[]; struct bus_type macio_bus_type = { .name = "macio", .match = macio_bus_match, .uevent = of_device_uevent_modalias, .probe = macio_device_probe, .remove = macio_device_remove, .shutdown = macio_device_shutdown, .suspend = macio_device_suspend, .resume = macio_device_resume, .dev_attrs = macio_dev_attrs, }; static int __init macio_bus_driver_init(void) { return bus_register(&macio_bus_type); } postcore_initcall(macio_bus_driver_init); /** * macio_release_dev - free a macio device structure when all users of it are * finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this macio device * are done. This currently means never as we don't hot remove any macio * device yet, though that will happen with mediabay based devices in a later * implementation. */ static void macio_release_dev(struct device *dev) { struct macio_dev *mdev; mdev = to_macio_device(dev); kfree(mdev); } /** * macio_resource_quirks - tweak or skip some resources for a device * @np: pointer to the device node * @res: resulting resource * @index: index of resource in node * * If this routine returns non-null, then the resource is completely * skipped. */ static int macio_resource_quirks(struct device_node *np, struct resource *res, int index) { /* Only quirks for memory resources for now */ if ((res->flags & IORESOURCE_MEM) == 0) return 0; /* Grand Central has too large resource 0 on some machines */ if (index == 0 && !strcmp(np->name, "gc")) res->end = res->start + 0x1ffff; /* Airport has bogus resource 2 */ if (index >= 2 && !strcmp(np->name, "radio")) return 1; #ifndef CONFIG_PPC64 /* DBDMAs may have bogus sizes */ if ((res->start & 0x0001f000) == 0x00008000) res->end = res->start + 0xff; #endif /* CONFIG_PPC64 */ /* ESCC parent eats child resources. We could have added a * level of hierarchy, but I don't really feel the need * for it */ if (!strcmp(np->name, "escc")) return 1; /* ESCC has bogus resources >= 3 */ if (index >= 3 && !(strcmp(np->name, "ch-a") && strcmp(np->name, "ch-b"))) return 1; /* Media bay has too many resources, keep only first one */ if (index > 0 && !strcmp(np->name, "media-bay")) return 1; /* Some older IDE resources have bogus sizes */ if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { if (index == 0 && (res->end - res->start) > 0xfff) res->end = res->start + 0xfff; if (index == 1 && (res->end - res->start) > 0xff) res->end = res->start + 0xff; } return 0; } static void macio_create_fixup_irq(struct macio_dev *dev, int index, unsigned int line) { unsigned int irq; irq = irq_create_mapping(NULL, line); if (irq != NO_IRQ) { dev->interrupt[index].start = irq; dev->interrupt[index].flags = IORESOURCE_IRQ; dev->interrupt[index].name = dev_name(&dev->ofdev.dev); } if (dev->n_interrupts <= index) dev->n_interrupts = index + 1; } static void macio_add_missing_resources(struct macio_dev *dev) { struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq_base; /* Gatwick has some missing interrupts on child nodes */ if (dev->bus->chip->type != macio_gatwick) return; /* irq_base is always 64 on gatwick. I have no cleaner way to get * that value from here at this point */ irq_base = 64; /* Fix SCC */ if (strcmp(np->name, "ch-a") == 0) { macio_create_fixup_irq(dev, 0, 15 + irq_base); macio_create_fixup_irq(dev, 1, 4 + irq_base); macio_create_fixup_irq(dev, 2, 5 + irq_base); printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n"); } /* Fix media-bay */ if (strcmp(np->name, "media-bay") == 0) { macio_create_fixup_irq(dev, 0, 29 + irq_base); printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n"); } /* Fix left media bay childs */ if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) { macio_create_fixup_irq(dev, 0, 19 + irq_base); macio_create_fixup_irq(dev, 1, 1 + irq_base); printk(KERN_INFO "macio: fixed left floppy irqs\n"); } if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) { macio_create_fixup_irq(dev, 0, 14 + irq_base); macio_create_fixup_irq(dev, 0, 3 + irq_base); printk(KERN_INFO "macio: fixed left ide irqs\n"); } } static void macio_setup_interrupts(struct macio_dev *dev) { struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq; int i = 0, j = 0; for (;;) { struct resource *res; if (j >= MACIO_DEV_COUNT_IRQS) break; res = &dev->interrupt[j]; irq = irq_of_parse_and_map(np, i++); if (irq == NO_IRQ) break; res->start = irq; res->flags = IORESOURCE_IRQ; res->name = dev_name(&dev->ofdev.dev); if (macio_resource_quirks(np, res, i - 1)) { memset(res, 0, sizeof(struct resource)); continue; } else j++; } dev->n_interrupts = j; } static void macio_setup_resources(struct macio_dev *dev, struct resource *parent_res) { struct device_node *np = dev->ofdev.dev.of_node; struct resource r; int index; for (index = 0; of_address_to_resource(np, index, &r) == 0; index++) { struct resource *res; if (index >= MACIO_DEV_COUNT_RESOURCES) break; res = &dev->resource[index]; *res = r; res->name = dev_name(&dev->ofdev.dev); if (macio_resource_quirks(np, res, index)) { memset(res, 0, sizeof(struct resource)); continue; } /* Currently, we consider failure as harmless, this may * change in the future, once I've found all the device * tree bugs in older machines & worked around them */ if (insert_resource(parent_res, res)) { printk(KERN_WARNING "Can't request resource " "%d for MacIO device %s\n", index, dev_name(&dev->ofdev.dev)); } } dev->n_resources = index; } /** * macio_add_one_device - Add one device from OF node to the device tree * @chip: pointer to the macio_chip holding the device * @np: pointer to the device node in the OF tree * @in_bay: set to 1 if device is part of a media-bay * * When media-bay is changed to hotswap drivers, this function will * be exposed to the bay driver some way... */ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct device *parent, struct device_node *np, struct macio_dev *in_bay, struct resource *parent_res) { struct macio_dev *dev; const u32 *reg; if (np == NULL) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus = &chip->lbus; dev->media_bay = in_bay; dev->ofdev.dev.of_node = np; dev->ofdev.archdata.dma_mask = 0xffffffffUL; dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; dev->ofdev.dev.dma_parms = &dev->dma_parms; /* Standard DMA paremeters */ dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); #ifdef CONFIG_PCI /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine * * To get all the fields, copy all archdata */ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; #endif /* CONFIG_PCI */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); #endif /* MacIO itself has a different reg, we use it's PCI base */ if (np == chip->of_node) { dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, #ifdef CONFIG_PCI (unsigned int)pci_resource_start(chip->lbus.pdev, 0), #else 0, /* NuBus may want to do something better here */ #endif MAX_NODE_NAME_SIZE, np->name); } else { reg = of_get_property(np, "reg", NULL); dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); } /* Setup interrupts & resources */ macio_setup_interrupts(dev); macio_setup_resources(dev, parent_res); macio_add_missing_resources(dev); /* Register with core */ if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); kfree(dev); return NULL; } return dev; } static int macio_skip_device(struct device_node *np) { if (strncmp(np->name, "battery", 7) == 0) return 1; if (strncmp(np->name, "escc-legacy", 11) == 0) return 1; return 0; } /** * macio_pci_add_devices - Adds sub-devices of mac-io to the device tree * @chip: pointer to the macio_chip holding the devices * * This function will do the job of extracting devices from the * Open Firmware device tree, build macio_dev structures and add * them to the Linux device tree. * * For now, childs of media-bay are added now as well. This will * change rsn though. */ static void macio_pci_add_devices(struct macio_chip *chip) { struct device_node *np, *pnode; struct macio_dev *rdev, *mdev, *mbdev = NULL, *sdev = NULL; struct device *parent = NULL; struct resource *root_res = &iomem_resource; /* Add a node for the macio bus itself */ #ifdef CONFIG_PCI if (chip->lbus.pdev) { parent = &chip->lbus.pdev->dev; root_res = &chip->lbus.pdev->resource[0]; } #endif pnode = of_node_get(chip->of_node); if (pnode == NULL) return; /* Add macio itself to hierarchy */ rdev = macio_add_one_device(chip, parent, pnode, NULL, root_res); if (rdev == NULL) return; root_res = &rdev->resource[0]; /* First scan 1st level */ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); mdev = macio_add_one_device(chip, &rdev->ofdev.dev, np, NULL, root_res); if (mdev == NULL) of_node_put(np); else if (strncmp(np->name, "media-bay", 9) == 0) mbdev = mdev; else if (strncmp(np->name, "escc", 4) == 0) sdev = mdev; } /* Add media bay devices if any */ if (mbdev) { pnode = mbdev->ofdev.dev.of_node; for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); if (macio_add_one_device(chip, &mbdev->ofdev.dev, np, mbdev, root_res) == NULL) of_node_put(np); } } /* Add serial ports if any */ if (sdev) { pnode = sdev->ofdev.dev.of_node; for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); if (macio_add_one_device(chip, &sdev->ofdev.dev, np, NULL, root_res) == NULL) of_node_put(np); } } } /** * macio_register_driver - Registers a new MacIO device driver * @drv: pointer to the driver definition structure */ int macio_register_driver(struct macio_driver *drv) { /* initialize common driver fields */ drv->driver.bus = &macio_bus_type; /* register with core */ return driver_register(&drv->driver); } /** * macio_unregister_driver - Unregisters a new MacIO device driver * @drv: pointer to the driver definition structure */ void macio_unregister_driver(struct macio_driver *drv) { driver_unregister(&drv->driver); } /* Managed MacIO resources */ struct macio_devres { u32 res_mask; }; static void maciom_release(struct device *gendev, void *res) { struct macio_dev *dev = to_macio_device(gendev); struct macio_devres *dr = res; int i, max; max = min(dev->n_resources, 32); for (i = 0; i < max; i++) { if (dr->res_mask & (1 << i)) macio_release_resource(dev, i); } } int macio_enable_devres(struct macio_dev *dev) { struct macio_devres *dr; dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); if (!dr) { dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; } return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL; } static struct macio_devres * find_macio_dr(struct macio_dev *dev) { return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); } /** * macio_request_resource - Request an MMIO resource * @dev: pointer to the device holding the resource * @resource_no: resource number to request * @name: resource name * * Mark memory region number @resource_no associated with MacIO * device @dev as being reserved by owner @name. Do not access * any address inside the memory regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name) { struct macio_devres *dr = find_macio_dr(dev); if (macio_resource_len(dev, resource_no) == 0) return 0; if (!request_mem_region(macio_resource_start(dev, resource_no), macio_resource_len(dev, resource_no), name)) goto err_out; if (dr && resource_no < 32) dr->res_mask |= 1 << resource_no; return 0; err_out: printk (KERN_WARNING "MacIO: Unable to reserve resource #%d:%lx@%lx" " for device %s\n", resource_no, macio_resource_len(dev, resource_no), macio_resource_start(dev, resource_no), dev_name(&dev->ofdev.dev)); return -EBUSY; } /** * macio_release_resource - Release an MMIO resource * @dev: pointer to the device holding the resource * @resource_no: resource number to release */ void macio_release_resource(struct macio_dev *dev, int resource_no) { struct macio_devres *dr = find_macio_dr(dev); if (macio_resource_len(dev, resource_no) == 0) return; release_mem_region(macio_resource_start(dev, resource_no), macio_resource_len(dev, resource_no)); if (dr && resource_no < 32) dr->res_mask &= ~(1 << resource_no); } /** * macio_request_resources - Reserve all memory resources * @dev: MacIO device whose resources are to be reserved * @name: Name to be associated with resource. * * Mark all memory regions associated with MacIO device @dev as * being reserved by owner @name. Do not access any address inside * the memory regions unless this call returns successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int macio_request_resources(struct macio_dev *dev, const char *name) { int i; for (i = 0; i < dev->n_resources; i++) if (macio_request_resource(dev, i, name)) goto err_out; return 0; err_out: while(--i >= 0) macio_release_resource(dev, i); return -EBUSY; } /** * macio_release_resources - Release reserved memory resources * @dev: MacIO device whose resources were previously reserved */ void macio_release_resources(struct macio_dev *dev) { int i; for (i = 0; i < dev->n_resources; i++) macio_release_resource(dev, i); } #ifdef CONFIG_PCI static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node* np; struct macio_chip* chip; if (ent->vendor != PCI_VENDOR_ID_APPLE) return -ENODEV; /* Note regarding refcounting: We assume pci_device_to_OF_node() is * ported to new OF APIs and returns a node with refcount incremented. */ np = pci_device_to_OF_node(pdev); if (np == NULL) return -ENODEV; /* The above assumption is wrong !!! * fix that here for now until I fix the arch code */ of_node_get(np); /* We also assume that pmac_feature will have done a get() on nodes * stored in the macio chips array */ chip = macio_find(np, macio_unknown); of_node_put(np); if (chip == NULL) return -ENODEV; /* XXX Need locking ??? */ if (chip->lbus.pdev == NULL) { chip->lbus.pdev = pdev; chip->lbus.chip = chip; pci_set_drvdata(pdev, &chip->lbus); pci_set_master(pdev); } printk(KERN_INFO "MacIO PCI driver attached to %s chipset\n", chip->name); /* * HACK ALERT: The WallStreet PowerBook and some OHare based machines * have 2 macio ASICs. I must probe the "main" one first or IDE * ordering will be incorrect. So I put on "hold" the second one since * it seem to appear first on PCI */ if (chip->type == macio_gatwick || chip->type == macio_ohareII) if (macio_chips[0].lbus.pdev == NULL) { macio_on_hold = chip; return 0; } macio_pci_add_devices(chip); if (macio_on_hold && macio_chips[0].lbus.pdev != NULL) { macio_pci_add_devices(macio_on_hold); macio_on_hold = NULL; } return 0; } static void __devexit macio_pci_remove(struct pci_dev* pdev) { panic("removing of macio-asic not supported !\n"); } /* * MacIO is matched against any Apple ID, it's probe() function * will then decide wether it applies or not */ static const struct pci_device_id __devinitdata pci_ids [] = { { .vendor = PCI_VENDOR_ID_APPLE, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver macio_pci_driver = { .name = (char *) "macio", .id_table = pci_ids, .probe = macio_pci_probe, .remove = macio_pci_remove, }; #endif /* CONFIG_PCI */ static int __init macio_module_init (void) { #ifdef CONFIG_PCI int rc; rc = pci_register_driver(&macio_pci_driver); if (rc) return rc; #endif /* CONFIG_PCI */ return 0; } module_init(macio_module_init); EXPORT_SYMBOL(macio_register_driver); EXPORT_SYMBOL(macio_unregister_driver); EXPORT_SYMBOL(macio_dev_get); EXPORT_SYMBOL(macio_dev_put); EXPORT_SYMBOL(macio_request_resource); EXPORT_SYMBOL(macio_release_resource); EXPORT_SYMBOL(macio_request_resources); EXPORT_SYMBOL(macio_release_resources); EXPORT_SYMBOL(macio_enable_devres);
gpl-2.0
DarkminecrafterHD/android_kernel_samsung_jf
sound/pci/hda/hda_generic.c
5187
28446
/* * Universal Interface for Intel High Definition Audio Codec * * Generic widget tree parser * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* widget node for parsing */ struct hda_gnode { hda_nid_t nid; /* NID of this widget */ unsigned short nconns; /* number of input connections */ hda_nid_t *conn_list; hda_nid_t slist[2]; /* temporay list */ unsigned int wid_caps; /* widget capabilities */ unsigned char type; /* widget type */ unsigned char pin_ctl; /* pin controls */ unsigned char checked; /* the flag indicates that the node is already parsed */ unsigned int pin_caps; /* pin widget capabilities */ unsigned int def_cfg; /* default configuration */ unsigned int amp_out_caps; /* AMP out capabilities */ unsigned int amp_in_caps; /* AMP in capabilities */ struct list_head list; }; /* patch-specific record */ #define MAX_PCM_VOLS 2 struct pcm_vol { struct hda_gnode *node; /* Node for PCM volume */ unsigned int index; /* connection of PCM volume */ }; struct hda_gspec { struct hda_gnode *dac_node[2]; /* DAC node */ struct hda_gnode *out_pin_node[2]; /* Output pin (Line-Out) node */ struct pcm_vol pcm_vol[MAX_PCM_VOLS]; /* PCM volumes */ unsigned int pcm_vol_nodes; /* number of PCM volumes */ struct hda_gnode *adc_node; /* ADC node */ struct hda_gnode *cap_vol_node; /* Node for capture volume */ unsigned int cur_cap_src; /* current capture source */ struct hda_input_mux input_mux; unsigned int def_amp_in_caps; unsigned int def_amp_out_caps; struct hda_pcm pcm_rec; /* PCM information */ struct list_head nid_list; /* list of widgets */ #ifdef CONFIG_SND_HDA_POWER_SAVE #define MAX_LOOPBACK_AMPS 7 struct hda_loopback_check loopback; int num_loopbacks; struct hda_amp_list loopback_list[MAX_LOOPBACK_AMPS + 1]; #endif }; /* * retrieve the default device type from the default config value */ #define defcfg_type(node) (((node)->def_cfg & AC_DEFCFG_DEVICE) >> \ AC_DEFCFG_DEVICE_SHIFT) #define defcfg_location(node) (((node)->def_cfg & AC_DEFCFG_LOCATION) >> \ AC_DEFCFG_LOCATION_SHIFT) #define defcfg_port_conn(node) (((node)->def_cfg & AC_DEFCFG_PORT_CONN) >> \ AC_DEFCFG_PORT_CONN_SHIFT) /* * destructor */ static void snd_hda_generic_free(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node, *n; if (! spec) return; /* free all widgets */ list_for_each_entry_safe(node, n, &spec->nid_list, list) { if (node->conn_list != node->slist) kfree(node->conn_list); kfree(node); } kfree(spec); } /* * add a new widget node and read its attributes */ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid_t nid) { struct hda_gnode *node; int nconns; hda_nid_t conn_list[HDA_MAX_CONNECTIONS]; node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return -ENOMEM; node->nid = nid; node->wid_caps = get_wcaps(codec, nid); node->type = get_wcaps_type(node->wid_caps); if (node->wid_caps & AC_WCAP_CONN_LIST) { nconns = snd_hda_get_connections(codec, nid, conn_list, HDA_MAX_CONNECTIONS); if (nconns < 0) { kfree(node); return nconns; } } else { nconns = 0; } if (nconns <= ARRAY_SIZE(node->slist)) node->conn_list = node->slist; else { node->conn_list = kmalloc(sizeof(hda_nid_t) * nconns, GFP_KERNEL); if (! node->conn_list) { snd_printk(KERN_ERR "hda-generic: cannot malloc\n"); kfree(node); return -ENOMEM; } } memcpy(node->conn_list, conn_list, nconns * sizeof(hda_nid_t)); node->nconns = nconns; if (node->type == AC_WID_PIN) { node->pin_caps = snd_hda_query_pin_caps(codec, node->nid); node->pin_ctl = snd_hda_codec_read(codec, node->nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); node->def_cfg = snd_hda_codec_get_pincfg(codec, node->nid); } if (node->wid_caps & AC_WCAP_OUT_AMP) { if (node->wid_caps & AC_WCAP_AMP_OVRD) node->amp_out_caps = snd_hda_param_read(codec, node->nid, AC_PAR_AMP_OUT_CAP); if (! node->amp_out_caps) node->amp_out_caps = spec->def_amp_out_caps; } if (node->wid_caps & AC_WCAP_IN_AMP) { if (node->wid_caps & AC_WCAP_AMP_OVRD) node->amp_in_caps = snd_hda_param_read(codec, node->nid, AC_PAR_AMP_IN_CAP); if (! node->amp_in_caps) node->amp_in_caps = spec->def_amp_in_caps; } list_add_tail(&node->list, &spec->nid_list); return 0; } /* * build the AFG subtree */ static int build_afg_tree(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; int i, nodes, err; hda_nid_t nid; if (snd_BUG_ON(!spec)) return -EINVAL; spec->def_amp_out_caps = snd_hda_param_read(codec, codec->afg, AC_PAR_AMP_OUT_CAP); spec->def_amp_in_caps = snd_hda_param_read(codec, codec->afg, AC_PAR_AMP_IN_CAP); nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid); if (! nid || nodes < 0) { printk(KERN_ERR "Invalid AFG subtree\n"); return -EINVAL; } /* parse all nodes belonging to the AFG */ for (i = 0; i < nodes; i++, nid++) { if ((err = add_new_node(codec, spec, nid)) < 0) return err; } return 0; } /* * look for the node record for the given NID */ /* FIXME: should avoid the braindead linear search */ static struct hda_gnode *hda_get_node(struct hda_gspec *spec, hda_nid_t nid) { struct hda_gnode *node; list_for_each_entry(node, &spec->nid_list, list) { if (node->nid == nid) return node; } return NULL; } /* * unmute (and set max vol) the output amplifier */ static int unmute_output(struct hda_codec *codec, struct hda_gnode *node) { unsigned int val, ofs; snd_printdd("UNMUTE OUT: NID=0x%x\n", node->nid); val = (node->amp_out_caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; ofs = (node->amp_out_caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; if (val >= ofs) val -= ofs; snd_hda_codec_amp_stereo(codec, node->nid, HDA_OUTPUT, 0, 0xff, val); return 0; } /* * unmute (and set max vol) the input amplifier */ static int unmute_input(struct hda_codec *codec, struct hda_gnode *node, unsigned int index) { unsigned int val, ofs; snd_printdd("UNMUTE IN: NID=0x%x IDX=0x%x\n", node->nid, index); val = (node->amp_in_caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; ofs = (node->amp_in_caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; if (val >= ofs) val -= ofs; snd_hda_codec_amp_stereo(codec, node->nid, HDA_INPUT, index, 0xff, val); return 0; } /* * select the input connection of the given node. */ static int select_input_connection(struct hda_codec *codec, struct hda_gnode *node, unsigned int index) { snd_printdd("CONNECT: NID=0x%x IDX=0x%x\n", node->nid, index); return snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_CONNECT_SEL, index); } /* * clear checked flag of each node in the node list */ static void clear_check_flags(struct hda_gspec *spec) { struct hda_gnode *node; list_for_each_entry(node, &spec->nid_list, list) { node->checked = 0; } } /* * parse the output path recursively until reach to an audio output widget * * returns 0 if not found, 1 if found, or a negative error code. */ static int parse_output_path(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, int dac_idx) { int i, err; struct hda_gnode *child; if (node->checked) return 0; node->checked = 1; if (node->type == AC_WID_AUD_OUT) { if (node->wid_caps & AC_WCAP_DIGITAL) { snd_printdd("Skip Digital OUT node %x\n", node->nid); return 0; } snd_printdd("AUD_OUT found %x\n", node->nid); if (spec->dac_node[dac_idx]) { /* already DAC node is assigned, just unmute & connect */ return node == spec->dac_node[dac_idx]; } spec->dac_node[dac_idx] = node; if ((node->wid_caps & AC_WCAP_OUT_AMP) && spec->pcm_vol_nodes < MAX_PCM_VOLS) { spec->pcm_vol[spec->pcm_vol_nodes].node = node; spec->pcm_vol[spec->pcm_vol_nodes].index = 0; spec->pcm_vol_nodes++; } return 1; /* found */ } for (i = 0; i < node->nconns; i++) { child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_output_path(codec, spec, child, dac_idx); if (err < 0) return err; else if (err > 0) { /* found one, * select the path, unmute both input and output */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); if (spec->dac_node[dac_idx] && spec->pcm_vol_nodes < MAX_PCM_VOLS && !(spec->dac_node[dac_idx]->wid_caps & AC_WCAP_OUT_AMP)) { if ((node->wid_caps & AC_WCAP_IN_AMP) || (node->wid_caps & AC_WCAP_OUT_AMP)) { int n = spec->pcm_vol_nodes; spec->pcm_vol[n].node = node; spec->pcm_vol[n].index = i; spec->pcm_vol_nodes++; } } return 1; } } return 0; } /* * Look for the output PIN widget with the given jack type * and parse the output path to that PIN. * * Returns the PIN node when the path to DAC is established. */ static struct hda_gnode *parse_output_jack(struct hda_codec *codec, struct hda_gspec *spec, int jack_type) { struct hda_gnode *node; int err; list_for_each_entry(node, &spec->nid_list, list) { if (node->type != AC_WID_PIN) continue; /* output capable? */ if (! (node->pin_caps & AC_PINCAP_OUT)) continue; if (defcfg_port_conn(node) == AC_JACK_PORT_NONE) continue; /* unconnected */ if (jack_type >= 0) { if (jack_type != defcfg_type(node)) continue; if (node->wid_caps & AC_WCAP_DIGITAL) continue; /* skip SPDIF */ } else { /* output as default? */ if (! (node->pin_ctl & AC_PINCTL_OUT_EN)) continue; } clear_check_flags(spec); err = parse_output_path(codec, spec, node, 0); if (err < 0) return NULL; if (! err && spec->out_pin_node[0]) { err = parse_output_path(codec, spec, node, 1); if (err < 0) return NULL; } if (err > 0) { /* unmute the PIN output */ unmute_output(codec, node); /* set PIN-Out enable */ snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN | ((node->pin_caps & AC_PINCAP_HP_DRV) ? AC_PINCTL_HP_EN : 0)); return node; } } return NULL; } /* * parse outputs */ static int parse_output(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; /* * Look for the output PIN widget */ /* first, look for the line-out pin */ node = parse_output_jack(codec, spec, AC_JACK_LINE_OUT); if (node) /* found, remember the PIN node */ spec->out_pin_node[0] = node; else { /* if no line-out is found, try speaker out */ node = parse_output_jack(codec, spec, AC_JACK_SPEAKER); if (node) spec->out_pin_node[0] = node; } /* look for the HP-out pin */ node = parse_output_jack(codec, spec, AC_JACK_HP_OUT); if (node) { if (! spec->out_pin_node[0]) spec->out_pin_node[0] = node; else spec->out_pin_node[1] = node; } if (! spec->out_pin_node[0]) { /* no line-out or HP pins found, * then choose for the first output pin */ spec->out_pin_node[0] = parse_output_jack(codec, spec, -1); if (! spec->out_pin_node[0]) snd_printd("hda_generic: no proper output path found\n"); } return 0; } /* * input MUX */ /* control callbacks */ static int capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; return snd_hda_input_mux_info(&spec->input_mux, uinfo); } static int capture_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->cur_cap_src; return 0; } static int capture_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; return snd_hda_input_mux_put(codec, &spec->input_mux, ucontrol, spec->adc_node->nid, &spec->cur_cap_src); } /* * return the string name of the given input PIN widget */ static const char *get_input_type(struct hda_gnode *node, unsigned int *pinctl) { unsigned int location = defcfg_location(node); switch (defcfg_type(node)) { case AC_JACK_LINE_IN: if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Line"; return "Line"; case AC_JACK_CD: #if 0 if (pinctl) *pinctl |= AC_PINCTL_VREF_GRD; #endif return "CD"; case AC_JACK_AUX: if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Aux"; return "Aux"; case AC_JACK_MIC_IN: if (pinctl && (node->pin_caps & (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT))) *pinctl |= AC_PINCTL_VREF_80; if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Mic"; return "Mic"; case AC_JACK_SPDIF_IN: return "SPDIF"; case AC_JACK_DIG_OTHER_IN: return "Digital"; } return NULL; } /* * parse the nodes recursively until reach to the input PIN * * returns 0 if not found, 1 if found, or a negative error code. */ static int parse_adc_sub_nodes(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, int idx) { int i, err; unsigned int pinctl; const char *type; if (node->checked) return 0; node->checked = 1; if (node->type != AC_WID_PIN) { for (i = 0; i < node->nconns; i++) { struct hda_gnode *child; child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_adc_sub_nodes(codec, spec, child, idx); if (err < 0) return err; if (err > 0) { /* found one, * select the path, unmute both input and output */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); return err; } } return 0; } /* input capable? */ if (! (node->pin_caps & AC_PINCAP_IN)) return 0; if (defcfg_port_conn(node) == AC_JACK_PORT_NONE) return 0; /* unconnected */ if (node->wid_caps & AC_WCAP_DIGITAL) return 0; /* skip SPDIF */ if (spec->input_mux.num_items >= HDA_MAX_NUM_INPUTS) { snd_printk(KERN_ERR "hda_generic: Too many items for capture\n"); return -EINVAL; } pinctl = AC_PINCTL_IN_EN; /* create a proper capture source label */ type = get_input_type(node, &pinctl); if (! type) { /* input as default? */ if (! (node->pin_ctl & AC_PINCTL_IN_EN)) return 0; type = "Input"; } snd_hda_add_imux_item(&spec->input_mux, type, idx, NULL); /* unmute the PIN external input */ unmute_input(codec, node, 0); /* index = 0? */ /* set PIN-In enable */ snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); return 1; /* found */ } /* * parse input */ static int parse_input_path(struct hda_codec *codec, struct hda_gnode *adc_node) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int i, err; snd_printdd("AUD_IN = %x\n", adc_node->nid); clear_check_flags(spec); // awk added - fixed no recording due to muted widget unmute_input(codec, adc_node, 0); /* * check each connection of the ADC * if it reaches to a proper input PIN, add the path as the * input path. */ /* first, check the direct connections to PIN widgets */ for (i = 0; i < adc_node->nconns; i++) { node = hda_get_node(spec, adc_node->conn_list[i]); if (node && node->type == AC_WID_PIN) { err = parse_adc_sub_nodes(codec, spec, node, i); if (err < 0) return err; } } /* ... then check the rests, more complicated connections */ for (i = 0; i < adc_node->nconns; i++) { node = hda_get_node(spec, adc_node->conn_list[i]); if (node && node->type != AC_WID_PIN) { err = parse_adc_sub_nodes(codec, spec, node, i); if (err < 0) return err; } } if (! spec->input_mux.num_items) return 0; /* no input path found... */ snd_printdd("[Capture Source] NID=0x%x, #SRC=%d\n", adc_node->nid, spec->input_mux.num_items); for (i = 0; i < spec->input_mux.num_items; i++) snd_printdd(" [%s] IDX=0x%x\n", spec->input_mux.items[i].label, spec->input_mux.items[i].index); spec->adc_node = adc_node; return 1; } /* * parse input */ static int parse_input(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int err; /* * At first we look for an audio input widget. * If it reaches to certain input PINs, we take it as the * input path. */ list_for_each_entry(node, &spec->nid_list, list) { if (node->wid_caps & AC_WCAP_DIGITAL) continue; /* skip SPDIF */ if (node->type == AC_WID_AUD_IN) { err = parse_input_path(codec, node); if (err < 0) return err; else if (err > 0) return 0; } } snd_printd("hda_generic: no proper input path found\n"); return 0; } #ifdef CONFIG_SND_HDA_POWER_SAVE static void add_input_loopback(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { struct hda_gspec *spec = codec->spec; struct hda_amp_list *p; if (spec->num_loopbacks >= MAX_LOOPBACK_AMPS) { snd_printk(KERN_ERR "hda_generic: Too many loopback ctls\n"); return; } p = &spec->loopback_list[spec->num_loopbacks++]; p->nid = nid; p->dir = dir; p->idx = idx; spec->loopback.amplist = spec->loopback_list; } #else #define add_input_loopback(codec,nid,dir,idx) #endif /* * create mixer controls if possible */ static int create_mixer(struct hda_codec *codec, struct hda_gnode *node, unsigned int index, const char *type, const char *dir_sfx, int is_loopback) { char name[32]; int err; int created = 0; struct snd_kcontrol_new knew; if (type) sprintf(name, "%s %s Switch", type, dir_sfx); else sprintf(name, "%s Switch", dir_sfx); if ((node->wid_caps & AC_WCAP_IN_AMP) && (node->amp_in_caps & AC_AMPCAP_MUTE)) { knew = (struct snd_kcontrol_new)HDA_CODEC_MUTE(name, node->nid, index, HDA_INPUT); if (is_loopback) add_input_loopback(codec, node->nid, HDA_INPUT, index); snd_printdd("[%s] NID=0x%x, DIR=IN, IDX=0x%x\n", name, node->nid, index); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } else if ((node->wid_caps & AC_WCAP_OUT_AMP) && (node->amp_out_caps & AC_AMPCAP_MUTE)) { knew = (struct snd_kcontrol_new)HDA_CODEC_MUTE(name, node->nid, 0, HDA_OUTPUT); if (is_loopback) add_input_loopback(codec, node->nid, HDA_OUTPUT, 0); snd_printdd("[%s] NID=0x%x, DIR=OUT\n", name, node->nid); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } if (type) sprintf(name, "%s %s Volume", type, dir_sfx); else sprintf(name, "%s Volume", dir_sfx); if ((node->wid_caps & AC_WCAP_IN_AMP) && (node->amp_in_caps & AC_AMPCAP_NUM_STEPS)) { knew = (struct snd_kcontrol_new)HDA_CODEC_VOLUME(name, node->nid, index, HDA_INPUT); snd_printdd("[%s] NID=0x%x, DIR=IN, IDX=0x%x\n", name, node->nid, index); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } else if ((node->wid_caps & AC_WCAP_OUT_AMP) && (node->amp_out_caps & AC_AMPCAP_NUM_STEPS)) { knew = (struct snd_kcontrol_new)HDA_CODEC_VOLUME(name, node->nid, 0, HDA_OUTPUT); snd_printdd("[%s] NID=0x%x, DIR=OUT\n", name, node->nid); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } return created; } /* * check whether the controls with the given name and direction suffix already exist */ static int check_existing_control(struct hda_codec *codec, const char *type, const char *dir) { struct snd_ctl_elem_id id; memset(&id, 0, sizeof(id)); sprintf(id.name, "%s %s Volume", type, dir); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; if (snd_ctl_find_id(codec->bus->card, &id)) return 1; sprintf(id.name, "%s %s Switch", type, dir); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; if (snd_ctl_find_id(codec->bus->card, &id)) return 1; return 0; } /* * build output mixer controls */ static int create_output_mixers(struct hda_codec *codec, const char * const *names) { struct hda_gspec *spec = codec->spec; int i, err; for (i = 0; i < spec->pcm_vol_nodes; i++) { err = create_mixer(codec, spec->pcm_vol[i].node, spec->pcm_vol[i].index, names[i], "Playback", 0); if (err < 0) return err; } return 0; } static int build_output_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; static const char * const types_speaker[] = { "Speaker", "Headphone" }; static const char * const types_line[] = { "Front", "Headphone" }; switch (spec->pcm_vol_nodes) { case 1: return create_mixer(codec, spec->pcm_vol[0].node, spec->pcm_vol[0].index, "Master", "Playback", 0); case 2: if (defcfg_type(spec->out_pin_node[0]) == AC_JACK_SPEAKER) return create_output_mixers(codec, types_speaker); else return create_output_mixers(codec, types_line); } return 0; } /* create capture volume/switch */ static int build_input_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *adc_node = spec->adc_node; int i, err; static struct snd_kcontrol_new cap_sel = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = capture_source_info, .get = capture_source_get, .put = capture_source_put, }; if (! adc_node || ! spec->input_mux.num_items) return 0; /* not found */ spec->cur_cap_src = 0; select_input_connection(codec, adc_node, spec->input_mux.items[0].index); /* create capture volume and switch controls if the ADC has an amp */ /* do we have only a single item? */ if (spec->input_mux.num_items == 1) { err = create_mixer(codec, adc_node, spec->input_mux.items[0].index, NULL, "Capture", 0); if (err < 0) return err; return 0; } /* create input MUX if multiple sources are available */ err = snd_hda_ctl_add(codec, spec->adc_node->nid, snd_ctl_new1(&cap_sel, codec)); if (err < 0) return err; /* no volume control? */ if (! (adc_node->wid_caps & AC_WCAP_IN_AMP) || ! (adc_node->amp_in_caps & AC_AMPCAP_NUM_STEPS)) return 0; for (i = 0; i < spec->input_mux.num_items; i++) { struct snd_kcontrol_new knew; char name[32]; sprintf(name, "%s Capture Volume", spec->input_mux.items[i].label); knew = (struct snd_kcontrol_new) HDA_CODEC_VOLUME(name, adc_node->nid, spec->input_mux.items[i].index, HDA_INPUT); err = snd_hda_ctl_add(codec, adc_node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; } return 0; } /* * parse the nodes recursively until reach to the output PIN. * * returns 0 - if not found, * 1 - if found, but no mixer is created * 2 - if found and mixer was already created, (just skip) * a negative error code */ static int parse_loopback_path(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, struct hda_gnode *dest_node, const char *type) { int i, err; if (node->checked) return 0; node->checked = 1; if (node == dest_node) { /* loopback connection found */ return 1; } for (i = 0; i < node->nconns; i++) { struct hda_gnode *child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_loopback_path(codec, spec, child, dest_node, type); if (err < 0) return err; else if (err >= 1) { if (err == 1) { err = create_mixer(codec, node, i, type, "Playback", 1); if (err < 0) return err; if (err > 0) return 2; /* ok, created */ /* not created, maybe in the lower path */ err = 1; } /* connect and unmute */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); return err; } } return 0; } /* * parse the tree and build the loopback controls */ static int build_loopback_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int err; const char *type; if (! spec->out_pin_node[0]) return 0; list_for_each_entry(node, &spec->nid_list, list) { if (node->type != AC_WID_PIN) continue; /* input capable? */ if (! (node->pin_caps & AC_PINCAP_IN)) return 0; type = get_input_type(node, NULL); if (type) { if (check_existing_control(codec, type, "Playback")) continue; clear_check_flags(spec); err = parse_loopback_path(codec, spec, spec->out_pin_node[0], node, type); if (err < 0) return err; if (! err) continue; } } return 0; } /* * build mixer controls */ static int build_generic_controls(struct hda_codec *codec) { int err; if ((err = build_input_controls(codec)) < 0 || (err = build_output_controls(codec)) < 0 || (err = build_loopback_controls(codec)) < 0) return err; return 0; } /* * PCM */ static struct hda_pcm_stream generic_pcm_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, }; static int generic_pcm2_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gspec *spec = codec->spec; snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); snd_hda_codec_setup_stream(codec, spec->dac_node[1]->nid, stream_tag, 0, format); return 0; } static int generic_pcm2_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gspec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, hinfo->nid); snd_hda_codec_cleanup_stream(codec, spec->dac_node[1]->nid); return 0; } static int build_generic_pcms(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_pcm *info = &spec->pcm_rec; if (! spec->dac_node[0] && ! spec->adc_node) { snd_printd("hda_generic: no PCM found\n"); return 0; } codec->num_pcms = 1; codec->pcm_info = info; info->name = "HDA Generic"; if (spec->dac_node[0]) { info->stream[0] = generic_pcm_playback; info->stream[0].nid = spec->dac_node[0]->nid; if (spec->dac_node[1]) { info->stream[0].ops.prepare = generic_pcm2_prepare; info->stream[0].ops.cleanup = generic_pcm2_cleanup; } } if (spec->adc_node) { info->stream[1] = generic_pcm_playback; info->stream[1].nid = spec->adc_node->nid; } return 0; } #ifdef CONFIG_SND_HDA_POWER_SAVE static int generic_check_power_status(struct hda_codec *codec, hda_nid_t nid) { struct hda_gspec *spec = codec->spec; return snd_hda_check_amp_list_power(codec, &spec->loopback, nid); } #endif /* */ static struct hda_codec_ops generic_patch_ops = { .build_controls = build_generic_controls, .build_pcms = build_generic_pcms, .free = snd_hda_generic_free, #ifdef CONFIG_SND_HDA_POWER_SAVE .check_power_status = generic_check_power_status, #endif }; /* * the generic parser */ int snd_hda_parse_generic_codec(struct hda_codec *codec) { struct hda_gspec *spec; int err; if(!codec->afg) return 0; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) { printk(KERN_ERR "hda_generic: can't allocate spec\n"); return -ENOMEM; } codec->spec = spec; INIT_LIST_HEAD(&spec->nid_list); if ((err = build_afg_tree(codec)) < 0) goto error; if ((err = parse_input(codec)) < 0 || (err = parse_output(codec)) < 0) goto error; codec->patch_ops = generic_patch_ops; return 0; error: snd_hda_generic_free(codec); return err; } EXPORT_SYMBOL(snd_hda_parse_generic_codec);
gpl-2.0
carlocaione/linux-meson
drivers/isdn/i4l/isdn_ttyfax.c
9795
25364
/* $Id: isdn_ttyfax.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $ * * Linux ISDN subsystem, tty_fax AT-command emulator (linklevel). * * Copyright 1999 by Armin Schindler (mac@melware.de) * Copyright 1999 by Ralf Spachmann (mel@melware.de) * Copyright 1999 by Cytronics & Melware * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #undef ISDN_TTY_FAX_STAT_DEBUG #undef ISDN_TTY_FAX_CMD_DEBUG #include <linux/isdn.h> #include "isdn_common.h" #include "isdn_tty.h" #include "isdn_ttyfax.h" static char *isdn_tty_fax_revision = "$Revision: 1.1.2.2 $"; #define PARSE_ERROR1 { isdn_tty_fax_modem_result(1, info); return 1; } static char * isdn_getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "???"; return rev; } /* * Fax Class 2 Modem results * */ static void isdn_tty_fax_modem_result(int code, modem_info *info) { atemu *m = &info->emu; T30_s *f = info->fax; char rs[50]; char rss[50]; char *rp; int i; static char *msg[] = {"OK", "ERROR", "+FCON", "+FCSI:", "+FDIS:", "+FHNG:", "+FDCS:", "CONNECT", "+FTSI:", "+FCFR", "+FPTS:", "+FET:"}; isdn_tty_at_cout("\r\n", info); isdn_tty_at_cout(msg[code], info); #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty: Fax send %s on ttyI%d\n", msg[code], info->line); #endif switch (code) { case 0: /* OK */ break; case 1: /* ERROR */ break; case 2: /* +FCON */ /* Append CPN, if enabled */ if ((m->mdmreg[REG_CPNFCON] & BIT_CPNFCON) && (!(dev->usage[info->isdn_channel] & ISDN_USAGE_OUTGOING))) { sprintf(rs, "/%s", m->cpn); isdn_tty_at_cout(rs, info); } info->online = 1; f->fet = 0; if (f->phase == ISDN_FAX_PHASE_A) f->phase = ISDN_FAX_PHASE_B; break; case 3: /* +FCSI */ case 8: /* +FTSI */ sprintf(rs, "\"%s\"", f->r_id); isdn_tty_at_cout(rs, info); break; case 4: /* +FDIS */ rs[0] = 0; rp = &f->r_resolution; for (i = 0; i < 8; i++) { sprintf(rss, "%c%s", rp[i] + 48, (i < 7) ? "," : ""); strcat(rs, rss); } isdn_tty_at_cout(rs, info); #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty: Fax DIS=%s on ttyI%d\n", rs, info->line); #endif break; case 5: /* +FHNG */ sprintf(rs, "%d", f->code); isdn_tty_at_cout(rs, info); info->faxonline = 0; break; case 6: /* +FDCS */ rs[0] = 0; rp = &f->r_resolution; for (i = 0; i < 8; i++) { sprintf(rss, "%c%s", rp[i] + 48, (i < 7) ? "," : ""); strcat(rs, rss); } isdn_tty_at_cout(rs, info); #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty: Fax DCS=%s on ttyI%d\n", rs, info->line); #endif break; case 7: /* CONNECT */ info->faxonline |= 2; break; case 9: /* FCFR */ break; case 10: /* FPTS */ isdn_tty_at_cout("1", info); break; case 11: /* FET */ sprintf(rs, "%d", f->fet); isdn_tty_at_cout(rs, info); break; } isdn_tty_at_cout("\r\n", info); switch (code) { case 7: /* CONNECT */ info->online = 2; if (info->faxonline & 1) { sprintf(rs, "%c", XON); isdn_tty_at_cout(rs, info); } break; } } static int isdn_tty_fax_command1(modem_info *info, isdn_ctrl *c) { static char *msg[] = {"OK", "CONNECT", "NO CARRIER", "ERROR", "FCERROR"}; #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty: FCLASS1 cmd(%d)\n", c->parm.aux.cmd); #endif if (c->parm.aux.cmd < ISDN_FAX_CLASS1_QUERY) { if (info->online) info->online = 1; isdn_tty_at_cout("\r\n", info); isdn_tty_at_cout(msg[c->parm.aux.cmd], info); isdn_tty_at_cout("\r\n", info); } switch (c->parm.aux.cmd) { case ISDN_FAX_CLASS1_CONNECT: info->online = 2; break; case ISDN_FAX_CLASS1_OK: case ISDN_FAX_CLASS1_FCERROR: case ISDN_FAX_CLASS1_ERROR: case ISDN_FAX_CLASS1_NOCARR: break; case ISDN_FAX_CLASS1_QUERY: isdn_tty_at_cout("\r\n", info); if (!c->parm.aux.para[0]) { isdn_tty_at_cout(msg[ISDN_FAX_CLASS1_ERROR], info); isdn_tty_at_cout("\r\n", info); } else { isdn_tty_at_cout(c->parm.aux.para, info); isdn_tty_at_cout("\r\nOK\r\n", info); } break; } return (0); } int isdn_tty_fax_command(modem_info *info, isdn_ctrl *c) { T30_s *f = info->fax; char rs[10]; if (TTY_IS_FCLASS1(info)) return (isdn_tty_fax_command1(info, c)); #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty: Fax cmd %d on ttyI%d\n", f->r_code, info->line); #endif switch (f->r_code) { case ISDN_TTY_FAX_FCON: info->faxonline = 1; isdn_tty_fax_modem_result(2, info); /* +FCON */ return (0); case ISDN_TTY_FAX_FCON_I: info->faxonline = 16; isdn_tty_fax_modem_result(2, info); /* +FCON */ return (0); case ISDN_TTY_FAX_RID: if (info->faxonline & 1) isdn_tty_fax_modem_result(3, info); /* +FCSI */ if (info->faxonline & 16) isdn_tty_fax_modem_result(8, info); /* +FTSI */ return (0); case ISDN_TTY_FAX_DIS: isdn_tty_fax_modem_result(4, info); /* +FDIS */ return (0); case ISDN_TTY_FAX_HNG: if (f->phase == ISDN_FAX_PHASE_C) { if (f->direction == ISDN_TTY_FAX_CONN_IN) { sprintf(rs, "%c%c", DLE, ETX); isdn_tty_at_cout(rs, info); } else { sprintf(rs, "%c", 0x18); isdn_tty_at_cout(rs, info); } info->faxonline &= ~2; /* leave data mode */ info->online = 1; } f->phase = ISDN_FAX_PHASE_E; isdn_tty_fax_modem_result(5, info); /* +FHNG */ isdn_tty_fax_modem_result(0, info); /* OK */ return (0); case ISDN_TTY_FAX_DCS: isdn_tty_fax_modem_result(6, info); /* +FDCS */ isdn_tty_fax_modem_result(7, info); /* CONNECT */ f->phase = ISDN_FAX_PHASE_C; return (0); case ISDN_TTY_FAX_TRAIN_OK: isdn_tty_fax_modem_result(6, info); /* +FDCS */ isdn_tty_fax_modem_result(0, info); /* OK */ return (0); case ISDN_TTY_FAX_SENT: isdn_tty_fax_modem_result(0, info); /* OK */ return (0); case ISDN_TTY_FAX_CFR: isdn_tty_fax_modem_result(9, info); /* +FCFR */ return (0); case ISDN_TTY_FAX_ET: sprintf(rs, "%c%c", DLE, ETX); isdn_tty_at_cout(rs, info); isdn_tty_fax_modem_result(10, info); /* +FPTS */ isdn_tty_fax_modem_result(11, info); /* +FET */ isdn_tty_fax_modem_result(0, info); /* OK */ info->faxonline &= ~2; /* leave data mode */ info->online = 1; f->phase = ISDN_FAX_PHASE_D; return (0); case ISDN_TTY_FAX_PTS: isdn_tty_fax_modem_result(10, info); /* +FPTS */ if (f->direction == ISDN_TTY_FAX_CONN_OUT) { if (f->fet == 1) f->phase = ISDN_FAX_PHASE_B; if (f->fet == 0) isdn_tty_fax_modem_result(0, info); /* OK */ } return (0); case ISDN_TTY_FAX_EOP: info->faxonline &= ~2; /* leave data mode */ info->online = 1; f->phase = ISDN_FAX_PHASE_D; return (0); } return (-1); } void isdn_tty_fax_bitorder(modem_info *info, struct sk_buff *skb) { __u8 LeftMask; __u8 RightMask; __u8 fBit; __u8 Data; int i; if (!info->fax->bor) { for (i = 0; i < skb->len; i++) { Data = skb->data[i]; for ( LeftMask = 0x80, RightMask = 0x01; LeftMask > RightMask; LeftMask >>= 1, RightMask <<= 1 ) { fBit = (Data & LeftMask); if (Data & RightMask) Data |= LeftMask; else Data &= ~LeftMask; if (fBit) Data |= RightMask; else Data &= ~RightMask; } skb->data[i] = Data; } } } /* * Parse AT+F.. FAX class 1 commands */ static int isdn_tty_cmd_FCLASS1(char **p, modem_info *info) { static char *cmd[] = {"AE", "TS", "RS", "TM", "RM", "TH", "RH"}; isdn_ctrl c; int par, i; u_long flags; for (c.parm.aux.cmd = 0; c.parm.aux.cmd < 7; c.parm.aux.cmd++) if (!strncmp(p[0], cmd[c.parm.aux.cmd], 2)) break; #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty_cmd_FCLASS1 (%s,%d)\n", p[0], c.parm.aux.cmd); #endif if (c.parm.aux.cmd == 7) PARSE_ERROR1; p[0] += 2; switch (*p[0]) { case '?': p[0]++; c.parm.aux.subcmd = AT_QUERY; break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; c.parm.aux.subcmd = AT_EQ_QUERY; } else { par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; c.parm.aux.subcmd = AT_EQ_VALUE; c.parm.aux.para[0] = par; } break; case 0: c.parm.aux.subcmd = AT_COMMAND; break; default: PARSE_ERROR1; } c.command = ISDN_CMD_FAXCMD; #ifdef ISDN_TTY_FAX_CMD_DEBUG printk(KERN_DEBUG "isdn_tty_cmd_FCLASS1 %d/%d/%d)\n", c.parm.aux.cmd, c.parm.aux.subcmd, c.parm.aux.para[0]); #endif if (info->isdn_driver < 0) { if ((c.parm.aux.subcmd == AT_EQ_VALUE) || (c.parm.aux.subcmd == AT_COMMAND)) { PARSE_ERROR1; } spin_lock_irqsave(&dev->lock, flags); /* get a temporary connection to the first free fax driver */ i = isdn_get_free_channel(ISDN_USAGE_FAX, ISDN_PROTO_L2_FAX, ISDN_PROTO_L3_FCLASS1, -1, -1, "00"); if (i < 0) { spin_unlock_irqrestore(&dev->lock, flags); PARSE_ERROR1; } info->isdn_driver = dev->drvmap[i]; info->isdn_channel = dev->chanmap[i]; info->drv_index = i; dev->m_idx[i] = info->line; spin_unlock_irqrestore(&dev->lock, flags); c.driver = info->isdn_driver; c.arg = info->isdn_channel; isdn_command(&c); spin_lock_irqsave(&dev->lock, flags); isdn_free_channel(info->isdn_driver, info->isdn_channel, ISDN_USAGE_FAX); info->isdn_driver = -1; info->isdn_channel = -1; if (info->drv_index >= 0) { dev->m_idx[info->drv_index] = -1; info->drv_index = -1; } spin_unlock_irqrestore(&dev->lock, flags); } else { c.driver = info->isdn_driver; c.arg = info->isdn_channel; isdn_command(&c); } return 1; } /* * Parse AT+F.. FAX class 2 commands */ static int isdn_tty_cmd_FCLASS2(char **p, modem_info *info) { atemu *m = &info->emu; T30_s *f = info->fax; isdn_ctrl cmd; int par; char rs[50]; char rss[50]; int maxdccval[] = {1, 5, 2, 2, 3, 2, 0, 7}; /* FAA still unchanged */ if (!strncmp(p[0], "AA", 2)) { /* TODO */ p[0] += 2; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", 0); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; break; default: PARSE_ERROR1; } return 0; } /* BADLIN=value - dummy 0=disable errorchk disabled, 1-255 nr. of lines for making page bad */ if (!strncmp(p[0], "BADLIN", 6)) { p[0] += 6; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->badlin); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-255"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; f->badlin = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FBADLIN=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* BADMUL=value - dummy 0=disable errorchk disabled (threshold multiplier) */ if (!strncmp(p[0], "BADMUL", 6)) { p[0] += 6; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->badmul); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-255"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; f->badmul = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FBADMUL=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* BOR=n - Phase C bit order, 0=direct, 1=reverse */ if (!strncmp(p[0], "BOR", 3)) { p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->bor); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,1"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 1)) PARSE_ERROR1; f->bor = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FBOR=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* NBC=n - No Best Capabilities */ if (!strncmp(p[0], "NBC", 3)) { p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->nbc); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,1"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 1)) PARSE_ERROR1; f->nbc = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FNBC=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* BUF? - Readonly buffersize readout */ if (!strncmp(p[0], "BUF?", 4)) { p[0] += 4; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FBUF? (%d) \n", (16 * m->mdmreg[REG_PSIZE])); #endif p[0]++; sprintf(rs, "\r\n %d ", (16 * m->mdmreg[REG_PSIZE])); isdn_tty_at_cout(rs, info); return 0; } /* CIG=string - local fax station id string for polling rx */ if (!strncmp(p[0], "CIG", 3)) { int i, r; p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n\"%s\"", f->pollid); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n\"STRING\""); isdn_tty_at_cout(rs, info); } else { if (*p[0] == '"') p[0]++; for (i = 0; (*p[0]) && i < (FAXIDLEN - 1) && (*p[0] != '"'); i++) { f->pollid[i] = *p[0]++; } if (*p[0] == '"') p[0]++; for (r = i; r < FAXIDLEN; r++) { f->pollid[r] = 32; } f->pollid[FAXIDLEN - 1] = 0; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax local poll ID rx \"%s\"\n", f->pollid); #endif } break; default: PARSE_ERROR1; } return 0; } /* CQ=n - copy qlty chk, 0= no chk, 1=only 1D chk, 2=1D+2D chk */ if (!strncmp(p[0], "CQ", 2)) { p[0] += 2; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->cq); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,1,2"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 2)) PARSE_ERROR1; f->cq = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FCQ=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* CR=n - can receive? 0= no data rx or poll remote dev, 1=do receive data or poll remote dev */ if (!strncmp(p[0], "CR", 2)) { p[0] += 2; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->cr); /* read actual value from struct and print */ isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,1"); /* display online help */ isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 1)) PARSE_ERROR1; f->cr = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FCR=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* CTCRTY=value - ECM retry count */ if (!strncmp(p[0], "CTCRTY", 6)) { p[0] += 6; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->ctcrty); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-255"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; f->ctcrty = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FCTCRTY=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* DCC=vr,br,wd,ln,df,ec,bf,st - DCE capabilities parms */ if (!strncmp(p[0], "DCC", 3)) { char *rp = &f->resolution; int i; p[0] += 3; switch (*p[0]) { case '?': p[0]++; strcpy(rs, "\r\n"); for (i = 0; i < 8; i++) { sprintf(rss, "%c%s", rp[i] + 48, (i < 7) ? "," : ""); strcat(rs, rss); } isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { isdn_tty_at_cout("\r\n(0,1),(0-5),(0-2),(0-2),(0-3),(0-2),(0),(0-7)", info); p[0]++; } else { for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 8); i++) { if (*p[0] != ',') { if ((*p[0] - 48) > maxdccval[i]) { PARSE_ERROR1; } rp[i] = *p[0] - 48; p[0]++; if (*p[0] == ',') p[0]++; } else p[0]++; } #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FDCC capabilities DCE=%d,%d,%d,%d,%d,%d,%d,%d\n", rp[0], rp[1], rp[2], rp[3], rp[4], rp[5], rp[6], rp[7]); #endif } break; default: PARSE_ERROR1; } return 0; } /* DIS=vr,br,wd,ln,df,ec,bf,st - current session parms */ if (!strncmp(p[0], "DIS", 3)) { char *rp = &f->resolution; int i; p[0] += 3; switch (*p[0]) { case '?': p[0]++; strcpy(rs, "\r\n"); for (i = 0; i < 8; i++) { sprintf(rss, "%c%s", rp[i] + 48, (i < 7) ? "," : ""); strcat(rs, rss); } isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { isdn_tty_at_cout("\r\n(0,1),(0-5),(0-2),(0-2),(0-3),(0-2),(0),(0-7)", info); p[0]++; } else { for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 8); i++) { if (*p[0] != ',') { if ((*p[0] - 48) > maxdccval[i]) { PARSE_ERROR1; } rp[i] = *p[0] - 48; p[0]++; if (*p[0] == ',') p[0]++; } else p[0]++; } #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FDIS session parms=%d,%d,%d,%d,%d,%d,%d,%d\n", rp[0], rp[1], rp[2], rp[3], rp[4], rp[5], rp[6], rp[7]); #endif } break; default: PARSE_ERROR1; } return 0; } /* DR - Receive Phase C data command, initiates document reception */ if (!strncmp(p[0], "DR", 2)) { p[0] += 2; if ((info->faxonline & 16) && /* incoming connection */ ((f->phase == ISDN_FAX_PHASE_B) || (f->phase == ISDN_FAX_PHASE_D))) { #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FDR\n"); #endif f->code = ISDN_TTY_FAX_DR; cmd.driver = info->isdn_driver; cmd.arg = info->isdn_channel; cmd.command = ISDN_CMD_FAXCMD; isdn_command(&cmd); if (f->phase == ISDN_FAX_PHASE_B) { f->phase = ISDN_FAX_PHASE_C; } else if (f->phase == ISDN_FAX_PHASE_D) { switch (f->fet) { case 0: /* next page will be received */ f->phase = ISDN_FAX_PHASE_C; isdn_tty_fax_modem_result(7, info); /* CONNECT */ break; case 1: /* next doc will be received */ f->phase = ISDN_FAX_PHASE_B; break; case 2: /* fax session is terminating */ f->phase = ISDN_FAX_PHASE_E; break; default: PARSE_ERROR1; } } } else { PARSE_ERROR1; } return 1; } /* DT=df,vr,wd,ln - TX phase C data command (release DCE to proceed with negotiation) */ if (!strncmp(p[0], "DT", 2)) { int i, val[] = {4, 0, 2, 3}; char *rp = &f->resolution; p[0] += 2; if (!(info->faxonline & 1)) /* not outgoing connection */ PARSE_ERROR1; for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 4); i++) { if (*p[0] != ',') { if ((*p[0] - 48) > maxdccval[val[i]]) { PARSE_ERROR1; } rp[val[i]] = *p[0] - 48; p[0]++; if (*p[0] == ',') p[0]++; } else p[0]++; } #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FDT tx data command parms=%d,%d,%d,%d\n", rp[4], rp[0], rp[2], rp[3]); #endif if ((f->phase == ISDN_FAX_PHASE_B) || (f->phase == ISDN_FAX_PHASE_D)) { f->code = ISDN_TTY_FAX_DT; cmd.driver = info->isdn_driver; cmd.arg = info->isdn_channel; cmd.command = ISDN_CMD_FAXCMD; isdn_command(&cmd); if (f->phase == ISDN_FAX_PHASE_D) { f->phase = ISDN_FAX_PHASE_C; isdn_tty_fax_modem_result(7, info); /* CONNECT */ } } else { PARSE_ERROR1; } return 1; } /* ECM=n - Error mode control 0=disabled, 2=enabled, handled by DCE alone incl. buff of partial pages */ if (!strncmp(p[0], "ECM", 3)) { p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->ecm); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,2"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par != 0) && (par != 2)) PARSE_ERROR1; f->ecm = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FECM=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* ET=n - End of page or document */ if (!strncmp(p[0], "ET=", 3)) { p[0] += 3; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-2"); isdn_tty_at_cout(rs, info); } else { if ((f->phase != ISDN_FAX_PHASE_D) || (!(info->faxonline & 1))) PARSE_ERROR1; par = isdn_getnum(p); if ((par < 0) || (par > 2)) PARSE_ERROR1; f->fet = par; f->code = ISDN_TTY_FAX_ET; cmd.driver = info->isdn_driver; cmd.arg = info->isdn_channel; cmd.command = ISDN_CMD_FAXCMD; isdn_command(&cmd); #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FET=%d\n", par); #endif return 1; } return 0; } /* K - terminate */ if (!strncmp(p[0], "K", 1)) { p[0] += 1; if ((f->phase == ISDN_FAX_PHASE_IDLE) || (f->phase == ISDN_FAX_PHASE_E)) PARSE_ERROR1; isdn_tty_modem_hup(info, 1); return 1; } /* LID=string - local fax ID */ if (!strncmp(p[0], "LID", 3)) { int i, r; p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n\"%s\"", f->id); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n\"STRING\""); isdn_tty_at_cout(rs, info); } else { if (*p[0] == '"') p[0]++; for (i = 0; (*p[0]) && i < (FAXIDLEN - 1) && (*p[0] != '"'); i++) { f->id[i] = *p[0]++; } if (*p[0] == '"') p[0]++; for (r = i; r < FAXIDLEN; r++) { f->id[r] = 32; } f->id[FAXIDLEN - 1] = 0; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax local ID \"%s\"\n", f->id); #endif } break; default: PARSE_ERROR1; } return 0; } /* MDL? - DCE Model */ if (!strncmp(p[0], "MDL?", 4)) { p[0] += 4; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: FMDL?\n"); #endif isdn_tty_at_cout("\r\nisdn4linux", info); return 0; } /* MFR? - DCE Manufacturer */ if (!strncmp(p[0], "MFR?", 4)) { p[0] += 4; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: FMFR?\n"); #endif isdn_tty_at_cout("\r\nisdn4linux", info); return 0; } /* MINSP=n - Minimum Speed for Phase C */ if (!strncmp(p[0], "MINSP", 5)) { p[0] += 5; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->minsp); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-5"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 5)) PARSE_ERROR1; f->minsp = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FMINSP=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* PHCTO=value - DTE phase C timeout */ if (!strncmp(p[0], "PHCTO", 5)) { p[0] += 5; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->phcto); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0-255"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 255)) PARSE_ERROR1; f->phcto = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FPHCTO=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* REL=n - Phase C received EOL alignment */ if (!strncmp(p[0], "REL", 3)) { p[0] += 3; switch (*p[0]) { case '?': p[0]++; sprintf(rs, "\r\n%d", f->rel); isdn_tty_at_cout(rs, info); break; case '=': p[0]++; if (*p[0] == '?') { p[0]++; sprintf(rs, "\r\n0,1"); isdn_tty_at_cout(rs, info); } else { par = isdn_getnum(p); if ((par < 0) || (par > 1)) PARSE_ERROR1; f->rel = par; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FREL=%d\n", par); #endif } break; default: PARSE_ERROR1; } return 0; } /* REV? - DCE Revision */ if (!strncmp(p[0], "REV?", 4)) { p[0] += 4; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: FREV?\n"); #endif strcpy(rss, isdn_tty_fax_revision); sprintf(rs, "\r\nRev: %s", isdn_getrev(rss)); isdn_tty_at_cout(rs, info); return 0; } /* Phase C Transmit Data Block Size */ if (!strncmp(p[0], "TBC=", 4)) { /* dummy, not used */ p[0] += 4; #ifdef ISDN_TTY_FAX_STAT_DEBUG printk(KERN_DEBUG "isdn_tty: Fax FTBC=%c\n", *p[0]); #endif switch (*p[0]) { case '0': p[0]++; break; default: PARSE_ERROR1; } return 0; } printk(KERN_DEBUG "isdn_tty: unknown token=>AT+F%s<\n", p[0]); PARSE_ERROR1; } int isdn_tty_cmd_PLUSF_FAX(char **p, modem_info *info) { if (TTY_IS_FCLASS2(info)) return (isdn_tty_cmd_FCLASS2(p, info)); else if (TTY_IS_FCLASS1(info)) return (isdn_tty_cmd_FCLASS1(p, info)); PARSE_ERROR1; }
gpl-2.0
uoaerg/linux-dccp
net/bluetooth/ecdh_helper.c
68
5545
/* * ECDH helper functions - KPP wrappings * * Copyright (C) 2017 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS * SOFTWARE IS DISCLAIMED. */ #include "ecdh_helper.h" #include <linux/scatterlist.h> #include <crypto/kpp.h> #include <crypto/ecdh.h> struct ecdh_completion { struct completion completion; int err; }; static void ecdh_complete(struct crypto_async_request *req, int err) { struct ecdh_completion *res = req->data; if (err == -EINPROGRESS) return; res->err = err; complete(&res->completion); } static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) { int i; for (i = 0; i < ndigits; i++) out[i] = __swab64(in[ndigits - 1 - i]); } bool compute_ecdh_secret(const u8 public_key[64], const u8 private_key[32], u8 secret[32]) { struct crypto_kpp *tfm; struct kpp_request *req; struct ecdh p; struct ecdh_completion result; struct scatterlist src, dst; u8 *tmp, *buf; unsigned int buf_len; int err = -ENOMEM; tmp = kmalloc(64, GFP_KERNEL); if (!tmp) return false; tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); if (IS_ERR(tfm)) { pr_err("alg: kpp: Failed to load tfm for kpp: %ld\n", PTR_ERR(tfm)); goto free_tmp; } req = kpp_request_alloc(tfm, GFP_KERNEL); if (!req) goto free_kpp; init_completion(&result.completion); /* Security Manager Protocol holds digits in litte-endian order * while ECC API expect big-endian data */ swap_digits((u64 *)private_key, (u64 *)tmp, 4); p.key = (char *)tmp; p.key_size = 32; /* Set curve_id */ p.curve_id = ECC_CURVE_NIST_P256; buf_len = crypto_ecdh_key_len(&p); buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) goto free_req; crypto_ecdh_encode_key(buf, buf_len, &p); /* Set A private Key */ err = crypto_kpp_set_secret(tfm, (void *)buf, buf_len); if (err) goto free_all; swap_digits((u64 *)public_key, (u64 *)tmp, 4); /* x */ swap_digits((u64 *)&public_key[32], (u64 *)&tmp[32], 4); /* y */ sg_init_one(&src, tmp, 64); sg_init_one(&dst, secret, 32); kpp_request_set_input(req, &src, 64); kpp_request_set_output(req, &dst, 32); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ecdh_complete, &result); err = crypto_kpp_compute_shared_secret(req); if (err == -EINPROGRESS) { wait_for_completion(&result.completion); err = result.err; } if (err < 0) { pr_err("alg: ecdh: compute shared secret failed. err %d\n", err); goto free_all; } swap_digits((u64 *)secret, (u64 *)tmp, 4); memcpy(secret, tmp, 32); free_all: kzfree(buf); free_req: kpp_request_free(req); free_kpp: crypto_free_kpp(tfm); free_tmp: kfree(tmp); return (err == 0); } bool generate_ecdh_keys(u8 public_key[64], u8 private_key[32]) { struct crypto_kpp *tfm; struct kpp_request *req; struct ecdh p; struct ecdh_completion result; struct scatterlist dst; u8 *tmp, *buf; unsigned int buf_len; int err = -ENOMEM; const unsigned short max_tries = 16; unsigned short tries = 0; tmp = kmalloc(64, GFP_KERNEL); if (!tmp) return false; tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); if (IS_ERR(tfm)) { pr_err("alg: kpp: Failed to load tfm for kpp: %ld\n", PTR_ERR(tfm)); goto free_tmp; } req = kpp_request_alloc(tfm, GFP_KERNEL); if (!req) goto free_kpp; init_completion(&result.completion); /* Set curve_id */ p.curve_id = ECC_CURVE_NIST_P256; p.key_size = 32; buf_len = crypto_ecdh_key_len(&p); buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) goto free_req; do { if (tries++ >= max_tries) goto free_all; /* Set private Key */ p.key = (char *)private_key; crypto_ecdh_encode_key(buf, buf_len, &p); err = crypto_kpp_set_secret(tfm, buf, buf_len); if (err) goto free_all; sg_init_one(&dst, tmp, 64); kpp_request_set_input(req, NULL, 0); kpp_request_set_output(req, &dst, 64); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ecdh_complete, &result); err = crypto_kpp_generate_public_key(req); if (err == -EINPROGRESS) { wait_for_completion(&result.completion); err = result.err; } /* Private key is not valid. Regenerate */ if (err == -EINVAL) continue; if (err < 0) goto free_all; else break; } while (true); /* Keys are handed back in little endian as expected by Security * Manager Protocol */ swap_digits((u64 *)tmp, (u64 *)public_key, 4); /* x */ swap_digits((u64 *)&tmp[32], (u64 *)&public_key[32], 4); /* y */ swap_digits((u64 *)private_key, (u64 *)tmp, 4); memcpy(private_key, tmp, 32); free_all: kzfree(buf); free_req: kpp_request_free(req); free_kpp: crypto_free_kpp(tfm); free_tmp: kfree(tmp); return (err == 0); }
gpl-2.0
verybadsoldier/xbmc
lib/libmicrohttpd/src/daemon/https/tls/gnutls_cert.c
68
11976
/* * Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation * * Author: Nikos Mavrogiannopoulos * * This file is part of GNUTLS. * * The GNUTLS library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA * */ /* Some of the stuff needed for Certificate authentication is contained * in this file. */ #include <gnutls_int.h> #include <gnutls_errors.h> #include <auth_cert.h> #include <gnutls_cert.h> #include <gnutls_datum.h> #include <gnutls_mpi.h> #include <gnutls_global.h> #include <gnutls_algorithms.h> #include <gnutls_dh.h> #include <gnutls_str.h> #include <gnutls_state.h> #include <gnutls_auth_int.h> #include <gnutls_x509.h> /* x509 */ #include "x509.h" #include "mpi.h" /** * MHD__gnutls_certificate_free_keys - Used to free all the keys from a MHD_gtls_cert_credentials_t structure * @sc: is an #MHD_gtls_cert_credentials_t structure. * * This function will delete all the keys and the certificates associated * with the given credentials. This function must not be called when a * TLS negotiation that uses the credentials is in progress. * **/ void MHD__gnutls_certificate_free_keys (MHD_gtls_cert_credentials_t sc) { unsigned i, j; for (i = 0; i < sc->ncerts; i++) { for (j = 0; j < sc->cert_list_length[i]; j++) { MHD_gtls_gcert_deinit (&sc->cert_list[i][j]); } MHD_gnutls_free (sc->cert_list[i]); } MHD_gnutls_free (sc->cert_list_length); sc->cert_list_length = NULL; MHD_gnutls_free (sc->cert_list); sc->cert_list = NULL; for (i = 0; i < sc->ncerts; i++) { MHD_gtls_gkey_deinit (&sc->pkey[i]); } MHD_gnutls_free (sc->pkey); sc->pkey = NULL; sc->ncerts = 0; } /** * MHD__gnutls_certificate_free_cas - Used to free all the CAs from a MHD_gtls_cert_credentials_t structure * @sc: is an #MHD_gtls_cert_credentials_t structure. * * This function will delete all the CAs associated * with the given credentials. Servers that do not use * MHD_gtls_certificate_verify_peers2() may call this to * save some memory. * **/ void MHD__gnutls_certificate_free_cas (MHD_gtls_cert_credentials_t sc) { unsigned j; for (j = 0; j < sc->x509_ncas; j++) { MHD_gnutls_x509_crt_deinit (sc->x509_ca_list[j]); } sc->x509_ncas = 0; MHD_gnutls_free (sc->x509_ca_list); sc->x509_ca_list = NULL; } /** * MHD__gnutls_certificate_free_ca_names - Used to free all the CA names from a MHD_gtls_cert_credentials_t structure * @sc: is an #MHD_gtls_cert_credentials_t structure. * * This function will delete all the CA name in the * given credentials. Clients may call this to save some memory * since in client side the CA names are not used. * * CA names are used by servers to advertize the CAs they * support to clients. * **/ void MHD__gnutls_certificate_free_ca_names (MHD_gtls_cert_credentials_t sc) { MHD__gnutls_free_datum (&sc->x509_rdn_sequence); } /*- * MHD_gtls_certificate_get_rsa_params - Returns the RSA parameters pointer * @rsa_params: holds the RSA parameters or NULL. * @func: function to retrieve the parameters or NULL. * @session: The session. * * This function will return the rsa parameters pointer. * -*/ MHD_gtls_rsa_params_t MHD_gtls_certificate_get_rsa_params (MHD_gtls_rsa_params_t rsa_params, MHD_gnutls_params_function * func, MHD_gtls_session_t session) { MHD_gnutls_params_st params; int ret; if (session->internals.params.rsa_params) { return session->internals.params.rsa_params; } if (rsa_params) { session->internals.params.rsa_params = rsa_params; } else if (func) { ret = func (session, GNUTLS_PARAMS_RSA_EXPORT, &params); if (ret == 0 && params.type == GNUTLS_PARAMS_RSA_EXPORT) { session->internals.params.rsa_params = params.params.rsa_export; session->internals.params.free_rsa_params = params.deinit; } } return session->internals.params.rsa_params; } /** * MHD__gnutls_certificate_free_credentials - Used to free an allocated MHD_gtls_cert_credentials_t structure * @sc: is an #MHD_gtls_cert_credentials_t structure. * * This structure is complex enough to manipulate directly thus * this helper function is provided in order to free (deallocate) it. * * This function does not free any temporary parameters associated * with this structure (ie RSA and DH parameters are not freed by * this function). **/ void MHD__gnutls_certificate_free_credentials (MHD_gtls_cert_credentials_t sc) { MHD__gnutls_certificate_free_keys (sc); MHD__gnutls_certificate_free_cas (sc); MHD__gnutls_certificate_free_ca_names (sc); #ifdef KEYRING_HACK MHD__gnutls_free_datum (&sc->keyring); #endif MHD_gnutls_free (sc); } /** * MHD__gnutls_certificate_allocate_credentials - Used to allocate a MHD_gtls_cert_credentials_t structure * @res: is a pointer to an #MHD_gtls_cert_credentials_t structure. * * This structure is complex enough to manipulate directly thus this * helper function is provided in order to allocate it. * * Returns: %GNUTLS_E_SUCCESS on success, or an error code. **/ int MHD__gnutls_certificate_allocate_credentials (MHD_gtls_cert_credentials_t * res) { *res = MHD_gnutls_calloc (1, sizeof (MHD_gtls_cert_credentials_st)); if (*res == NULL) return GNUTLS_E_MEMORY_ERROR; (*res)->verify_bits = DEFAULT_VERIFY_BITS; (*res)->verify_depth = DEFAULT_VERIFY_DEPTH; return 0; } /* returns the KX algorithms that are supported by a * certificate. (Eg a certificate with RSA params, supports * GNUTLS_KX_RSA algorithm). * This function also uses the KeyUsage field of the certificate * extensions in order to disable unneded algorithms. */ int MHD_gtls_selected_cert_supported_kx (MHD_gtls_session_t session, enum MHD_GNUTLS_KeyExchangeAlgorithm **alg, int *alg_size) { enum MHD_GNUTLS_KeyExchangeAlgorithm kx; enum MHD_GNUTLS_PublicKeyAlgorithm pk; enum MHD_GNUTLS_KeyExchangeAlgorithm kxlist[MAX_ALGOS]; MHD_gnutls_cert *cert; int i; if (session->internals.selected_cert_list_length == 0) { *alg_size = 0; *alg = NULL; return 0; } cert = &session->internals.selected_cert_list[0]; i = 0; for (kx = 0; kx < MAX_ALGOS; kx++) { pk = MHD_gtls_map_pk_get_pk (kx); if (pk == cert->subject_pk_algorithm) { /* then check key usage */ if (MHD__gnutls_check_key_usage (cert, kx) == 0) { kxlist[i] = kx; i++; } } } if (i == 0) { MHD_gnutls_assert (); return GNUTLS_E_INVALID_REQUEST; } *alg = MHD_gnutls_calloc (1, sizeof (enum MHD_GNUTLS_KeyExchangeAlgorithm) * i); if (*alg == NULL) return GNUTLS_E_MEMORY_ERROR; *alg_size = i; memcpy (*alg, kxlist, i * sizeof (enum MHD_GNUTLS_KeyExchangeAlgorithm)); return 0; } int MHD_gtls_raw_cert_to_gcert (MHD_gnutls_cert * gcert, enum MHD_GNUTLS_CertificateType type, const MHD_gnutls_datum_t * raw_cert, int flags /* OR of ConvFlags */ ) { switch (type) { case MHD_GNUTLS_CRT_X509: return MHD_gtls_x509_raw_cert_to_gcert (gcert, raw_cert, flags); default: MHD_gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } } /* This function will convert a der certificate to a format * (structure) that gnutls can understand and use. Actually the * important thing on this function is that it extracts the * certificate's (public key) parameters. * * The noext flag is used to complete the handshake even if the * extensions found in the certificate are unsupported and critical. * The critical extensions will be catched by the verification functions. */ int MHD_gtls_x509_raw_cert_to_gcert (MHD_gnutls_cert * gcert, const MHD_gnutls_datum_t * derCert, int flags /* OR of ConvFlags */ ) { int ret; MHD_gnutls_x509_crt_t cert; ret = MHD_gnutls_x509_crt_init (&cert); if (ret < 0) { MHD_gnutls_assert (); return ret; } ret = MHD_gnutls_x509_crt_import (cert, derCert, GNUTLS_X509_FMT_DER); if (ret < 0) { MHD_gnutls_assert (); MHD_gnutls_x509_crt_deinit (cert); return ret; } ret = MHD_gtls_x509_crt_to_gcert (gcert, cert, flags); MHD_gnutls_x509_crt_deinit (cert); return ret; } /* Like above but it accepts a parsed certificate instead. */ int MHD_gtls_x509_crt_to_gcert (MHD_gnutls_cert * gcert, MHD_gnutls_x509_crt_t cert, unsigned int flags) { int ret = 0; memset (gcert, 0, sizeof (MHD_gnutls_cert)); gcert->cert_type = MHD_GNUTLS_CRT_X509; if (!(flags & CERT_NO_COPY)) { #define SMALL_DER 512 opaque *der; size_t der_size = SMALL_DER; /* initially allocate a bogus size, just in case the certificate * fits in it. That way we minimize the DER encodings performed. */ der = MHD_gnutls_malloc (SMALL_DER); if (der == NULL) { MHD_gnutls_assert (); return GNUTLS_E_MEMORY_ERROR; } ret = MHD_gnutls_x509_crt_export (cert, GNUTLS_X509_FMT_DER, der, &der_size); if (ret < 0 && ret != GNUTLS_E_SHORT_MEMORY_BUFFER) { MHD_gnutls_assert (); MHD_gnutls_free (der); return ret; } if (ret == GNUTLS_E_SHORT_MEMORY_BUFFER) { der = MHD_gnutls_realloc (der, der_size); if (der == NULL) { MHD_gnutls_assert (); return GNUTLS_E_MEMORY_ERROR; } ret = MHD_gnutls_x509_crt_export (cert, GNUTLS_X509_FMT_DER, der, &der_size); if (ret < 0) { MHD_gnutls_assert (); MHD_gnutls_free (der); return ret; } } gcert->raw.data = der; gcert->raw.size = der_size; } else /* now we have 0 or a bitwise or of things to decode */ flags ^= CERT_NO_COPY; if (flags & CERT_ONLY_EXTENSIONS || flags == 0) { MHD_gnutls_x509_crt_get_key_usage (cert, &gcert->key_usage, NULL); gcert->version = MHD_gnutls_x509_crt_get_version (cert); } gcert->subject_pk_algorithm = MHD_gnutls_x509_crt_get_pk_algorithm (cert, NULL); if (flags & CERT_ONLY_PUBKEY || flags == 0) { gcert->params_size = MAX_PUBLIC_PARAMS_SIZE; ret = MHD__gnutls_x509_crt_get_mpis (cert, gcert->params, &gcert->params_size); if (ret < 0) { MHD_gnutls_assert (); return ret; } } return 0; } void MHD_gtls_gcert_deinit (MHD_gnutls_cert * cert) { int i; if (cert == NULL) return; for (i = 0; i < cert->params_size; i++) { MHD_gtls_mpi_release (&cert->params[i]); } MHD__gnutls_free_datum (&cert->raw); }
gpl-2.0
networkosnet/linux
arch/alpha/kernel/sys_marvel.c
1092
11090
/* * linux/arch/alpha/kernel/sys_marvel.c * * Marvel / IO7 support */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_marvel.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "err_impl.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #if NR_IRQS < MARVEL_NR_IRQS # error NR_IRQS < MARVEL_NR_IRQS !!! #endif /* * Interrupt handling. */ static void io7_device_interrupt(unsigned long vector) { unsigned int pid; unsigned int irq; /* * Vector is 0x800 + (interrupt) * * where (interrupt) is: * * ...16|15 14|13 4|3 0 * -----+-----+--------+--- * PE | 0 | irq | 0 * * where (irq) is * * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4) * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4) */ pid = vector >> 16; irq = ((vector & 0xffff) - 0x800) >> 4; irq += 16; /* offset for legacy */ irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */ irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ handle_irq(irq); } static volatile unsigned long * io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) { volatile unsigned long *ctl; unsigned int pid; struct io7 *io7; pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT; if (!(io7 = marvel_find_io7(pid))) { printk(KERN_ERR "%s for nonexistent io7 -- vec %x, pid %d\n", __func__, irq, pid); return NULL; } irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */ irq -= 16; /* subtract legacy bias */ if (irq >= 0x180) { printk(KERN_ERR "%s for invalid irq -- pid %d adjusted irq %x\n", __func__, pid, irq); return NULL; } ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */ if (irq >= 0x80) /* MSI */ ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr; if (pio7) *pio7 = io7; return ctl; } static void io7_enable_irq(struct irq_data *d) { volatile unsigned long *ctl; unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); if (!ctl || !io7) { printk(KERN_ERR "%s: get_ctl failed for irq %x\n", __func__, irq); return; } spin_lock(&io7->irq_lock); *ctl |= 1UL << 24; mb(); *ctl; spin_unlock(&io7->irq_lock); } static void io7_disable_irq(struct irq_data *d) { volatile unsigned long *ctl; unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); if (!ctl || !io7) { printk(KERN_ERR "%s: get_ctl failed for irq %x\n", __func__, irq); return; } spin_lock(&io7->irq_lock); *ctl &= ~(1UL << 24); mb(); *ctl; spin_unlock(&io7->irq_lock); } static void marvel_irq_noop(struct irq_data *d) { return; } static struct irq_chip marvel_legacy_irq_type = { .name = "LEGACY", .irq_mask = marvel_irq_noop, .irq_unmask = marvel_irq_noop, }; static struct irq_chip io7_lsi_irq_type = { .name = "LSI", .irq_unmask = io7_enable_irq, .irq_mask = io7_disable_irq, .irq_mask_ack = io7_disable_irq, }; static struct irq_chip io7_msi_irq_type = { .name = "MSI", .irq_unmask = io7_enable_irq, .irq_mask = io7_disable_irq, .irq_ack = marvel_irq_noop, }; static void io7_redirect_irq(struct io7 *io7, volatile unsigned long *csr, unsigned int where) { unsigned long val; val = *csr; val &= ~(0x1ffUL << 24); /* clear the target pid */ val |= ((unsigned long)where << 24); /* set the new target pid */ *csr = val; mb(); *csr; } static void io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where) { unsigned long val; /* * LSI_CTL has target PID @ 14 */ val = io7->csrs->PO7_LSI_CTL[which].csr; val &= ~(0x1ffUL << 14); /* clear the target pid */ val |= ((unsigned long)where << 14); /* set the new target pid */ io7->csrs->PO7_LSI_CTL[which].csr = val; mb(); io7->csrs->PO7_LSI_CTL[which].csr; } static void io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where) { unsigned long val; /* * MSI_CTL has target PID @ 14 */ val = io7->csrs->PO7_MSI_CTL[which].csr; val &= ~(0x1ffUL << 14); /* clear the target pid */ val |= ((unsigned long)where << 14); /* set the new target pid */ io7->csrs->PO7_MSI_CTL[which].csr = val; mb(); io7->csrs->PO7_MSI_CTL[which].csr; } static void __init init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where) { /* * LSI_CTL has target PID @ 14 */ io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14); mb(); io7->csrs->PO7_LSI_CTL[which].csr; } static void __init init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where) { /* * MSI_CTL has target PID @ 14 */ io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14); mb(); io7->csrs->PO7_MSI_CTL[which].csr; } static void __init init_io7_irqs(struct io7 *io7, struct irq_chip *lsi_ops, struct irq_chip *msi_ops) { long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; long i; printk("Initializing interrupts for IO7 at PE %u - base %lx\n", io7->pe, base); /* * Where should interrupts from this IO7 go? * * They really should be sent to the local CPU to avoid having to * traverse the mesh, but if it's not an SMP kernel, they have to * go to the boot CPU. Send them all to the boot CPU for now, * as each secondary starts, it can redirect it's local device * interrupts. */ printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid); spin_lock(&io7->irq_lock); /* set up the error irqs */ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid); /* Set up the lsi irqs. */ for (i = 0; i < 128; ++i) { irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } /* Disable the implemented irqs in hardware. */ for (i = 0; i < 0x60; ++i) init_one_io7_lsi(io7, i, boot_cpuid); init_one_io7_lsi(io7, 0x74, boot_cpuid); init_one_io7_lsi(io7, 0x75, boot_cpuid); /* Set up the msi irqs. */ for (i = 128; i < (128 + 512); ++i) { irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } for (i = 0; i < 16; ++i) init_one_io7_msi(io7, i, boot_cpuid); spin_unlock(&io7->irq_lock); } static void __init marvel_init_irq(void) { int i; struct io7 *io7 = NULL; /* Reserve the legacy irqs. */ for (i = 0; i < 16; ++i) { irq_set_chip_and_handler(i, &marvel_legacy_irq_type, handle_level_irq); } /* Init the io7 irqs. */ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type); } static int marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin) { struct pci_dev *dev = (struct pci_dev *)cdev; struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; struct io7 *io7 = io7_port->io7; int msi_loc, msi_data_off; u16 msg_ctl; u16 msg_dat; u8 intline; int irq; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; msi_loc = dev->msi_cap; msg_ctl = 0; if (msi_loc) pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); if (msg_ctl & PCI_MSI_FLAGS_ENABLE) { msi_data_off = PCI_MSI_DATA_32; if (msg_ctl & PCI_MSI_FLAGS_64BIT) msi_data_off = PCI_MSI_DATA_64; pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat); irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */ irq += 0x80; /* offset for lsi */ #if 1 printk("PCI:%d:%d:%d (hose %d) is using MSI\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), hose->index); printk(" %d message(s) from 0x%04x\n", 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), msg_dat); printk(" reporting on %d IRQ(s) from %d (0x%x)\n", 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT), (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT)); #endif #if 0 pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS, msg_ctl & ~PCI_MSI_FLAGS_ENABLE); pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq); #endif } irq += 16; /* offset for legacy */ irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ return irq; } static void __init marvel_init_pci(void) { struct io7 *io7; marvel_register_error_handlers(); /* Indicate that we trust the console to configure things properly */ pci_set_flags(PCI_PROBE_ONLY); common_init_pci(); locate_and_init_vga(NULL); /* Clear any io7 errors. */ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) io7_clear_errors(io7); } static void __init marvel_init_rtc(void) { init_rtc_irq(); } static void marvel_smp_callin(void) { int cpuid = hard_smp_processor_id(); struct io7 *io7 = marvel_find_io7(cpuid); unsigned int i; if (!io7) return; /* * There is a local IO7 - redirect all of its interrupts here. */ printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid); /* Redirect the error IRQS here. */ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid); /* Redirect the implemented LSIs here. */ for (i = 0; i < 0x60; ++i) io7_redirect_one_lsi(io7, i, cpuid); io7_redirect_one_lsi(io7, 0x74, cpuid); io7_redirect_one_lsi(io7, 0x75, cpuid); /* Redirect the MSIs here. */ for (i = 0; i < 16; ++i) io7_redirect_one_msi(io7, i, cpuid); } /* * System Vectors */ struct alpha_machine_vector marvel_ev7_mv __initmv = { .vector_name = "MARVEL/EV7", DO_EV7_MMU, .rtc_port = 0x70, .rtc_boot_cpu_only = 1, DO_MARVEL_IO, .machine_check = marvel_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = IO7_DAC_OFFSET, .nr_irqs = MARVEL_NR_IRQS, .device_interrupt = io7_device_interrupt, .agp_info = marvel_agp_info, .smp_callin = marvel_smp_callin, .init_arch = marvel_init_arch, .init_irq = marvel_init_irq, .init_rtc = marvel_init_rtc, .init_pci = marvel_init_pci, .kill_arch = marvel_kill_arch, .pci_map_irq = marvel_map_irq, .pci_swizzle = common_swizzle, .pa_to_nid = marvel_pa_to_nid, .cpuid_to_nid = marvel_cpuid_to_nid, .node_mem_start = marvel_node_mem_start, .node_mem_size = marvel_node_mem_size, }; ALIAS_MV(marvel_ev7)
gpl-2.0
ptmr3/android_kernel_lge_g3
arch/arm/mach-msm/pil-gss.c
1860
15473
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/smp.h> #include <linux/miscdevice.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <mach/msm_xo.h> #include <mach/socinfo.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include <mach/subsystem_restart.h> #include <mach/ramdump.h> #include <mach/msm_smem.h> #include "peripheral-loader.h" #include "scm-pas.h" #include "smd_private.h" #define GSS_CSR_AHB_CLK_SEL 0x0 #define GSS_CSR_RESET 0x4 #define GSS_CSR_CLK_BLK_CONFIG 0x8 #define GSS_CSR_CLK_ENABLE 0xC #define GSS_CSR_BOOT_REMAP 0x14 #define GSS_CSR_POWER_UP_DOWN 0x18 #define GSS_CSR_CFG_HID 0x2C #define GSS_SLP_CLK_CTL 0x2C60 #define GSS_RESET 0x2C64 #define GSS_CLAMP_ENA 0x2C68 #define GSS_CXO_SRC_CTL 0x2C74 #define PLL5_STATUS 0x30F8 #define PLL_ENA_GSS 0x3480 #define PLL5_VOTE BIT(5) #define PLL_STATUS BIT(16) #define REMAP_ENABLE BIT(16) #define A5_POWER_STATUS BIT(4) #define A5_POWER_ENA BIT(0) #define NAV_POWER_ENA BIT(1) #define XO_CLK_BRANCH_ENA BIT(0) #define SLP_CLK_BRANCH_ENA BIT(4) #define A5_RESET BIT(0) struct gss_data { void __iomem *base; void __iomem *qgic2_base; void __iomem *cbase; struct clk *xo; struct pil_desc pil_desc; struct miscdevice misc_dev; struct subsys_device *subsys; struct subsys_desc subsys_desc; int crash_shutdown; int irq; void *subsys_handle; struct ramdump_device *ramdump_dev; struct ramdump_device *smem_ramdump_dev; }; static int make_gss_proxy_votes(struct pil_desc *pil) { int ret; struct gss_data *drv = dev_get_drvdata(pil->dev); ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } return 0; } static void remove_gss_proxy_votes(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); clk_disable_unprepare(drv->xo); } static void gss_init(struct gss_data *drv) { void __iomem *base = drv->base; void __iomem *cbase = drv->cbase; /* Supply clocks to GSS. */ writel_relaxed(XO_CLK_BRANCH_ENA, cbase + GSS_CXO_SRC_CTL); writel_relaxed(SLP_CLK_BRANCH_ENA, cbase + GSS_SLP_CLK_CTL); /* Deassert GSS reset and clamps. */ writel_relaxed(0x0, cbase + GSS_RESET); writel_relaxed(0x0, cbase + GSS_CLAMP_ENA); mb(); /* * Configure clock source and dividers for 288MHz core, 144MHz AXI and * 72MHz AHB, all derived from the 288MHz PLL. */ writel_relaxed(0x341, base + GSS_CSR_CLK_BLK_CONFIG); writel_relaxed(0x1, base + GSS_CSR_AHB_CLK_SEL); /* Assert all GSS resets. */ writel_relaxed(0x7F, base + GSS_CSR_RESET); /* Enable all bus clocks and wait for resets to propagate. */ writel_relaxed(0x1F, base + GSS_CSR_CLK_ENABLE); mb(); udelay(1); /* Release subsystem from reset, but leave A5 in reset. */ writel_relaxed(A5_RESET, base + GSS_CSR_RESET); } static void cfg_qgic2_bus_access(void *data) { struct gss_data *drv = data; int i; /* * Apply a 8064 v1.0 workaround to configure QGIC bus access. * This must be done from Krait 0 to configure the Master ID * correctly. */ writel_relaxed(0x2, drv->base + GSS_CSR_CFG_HID); for (i = 0; i <= 3; i++) readl_relaxed(drv->qgic2_base); } static int pil_gss_shutdown(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); void __iomem *base = drv->base; void __iomem *cbase = drv->cbase; u32 regval; int ret; ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } /* Make sure bus port is halted. */ msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); /* * Vote PLL on in GSS's voting register and wait for it to enable. * The PLL must be enable to switch the GFMUX to a low-power source. */ writel_relaxed(PLL5_VOTE, cbase + PLL_ENA_GSS); while ((readl_relaxed(cbase + PLL5_STATUS) & PLL_STATUS) == 0) cpu_relax(); /* Perform one-time GSS initialization. */ gss_init(drv); /* Assert A5 reset. */ regval = readl_relaxed(base + GSS_CSR_RESET); regval |= A5_RESET; writel_relaxed(regval, base + GSS_CSR_RESET); /* Power down A5 and NAV. */ regval = readl_relaxed(base + GSS_CSR_POWER_UP_DOWN); regval &= ~(A5_POWER_ENA|NAV_POWER_ENA); writel_relaxed(regval, base + GSS_CSR_POWER_UP_DOWN); /* Select XO clock source and increase dividers to save power. */ regval = readl_relaxed(base + GSS_CSR_CLK_BLK_CONFIG); regval |= 0x3FF; writel_relaxed(regval, base + GSS_CSR_CLK_BLK_CONFIG); /* Disable bus clocks. */ writel_relaxed(0x1F, base + GSS_CSR_CLK_ENABLE); /* Clear GSS PLL votes. */ writel_relaxed(0, cbase + PLL_ENA_GSS); mb(); clk_disable_unprepare(drv->xo); return 0; } static int pil_gss_reset(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); void __iomem *base = drv->base; phys_addr_t start_addr = pil_get_entry_addr(pil); void __iomem *cbase = drv->cbase; int ret; /* Unhalt bus port. */ ret = msm_bus_axi_portunhalt(MSM_BUS_MASTER_GSS_NAV); if (ret) { dev_err(pil->dev, "Failed to unhalt bus port\n"); return ret; } /* Vote PLL on in GSS's voting register and wait for it to enable. */ writel_relaxed(PLL5_VOTE, cbase + PLL_ENA_GSS); while ((readl_relaxed(cbase + PLL5_STATUS) & PLL_STATUS) == 0) cpu_relax(); /* Perform GSS initialization. */ gss_init(drv); /* Configure boot address and enable remap. */ writel_relaxed(REMAP_ENABLE | (start_addr >> 16), base + GSS_CSR_BOOT_REMAP); /* Power up A5 core. */ writel_relaxed(A5_POWER_ENA, base + GSS_CSR_POWER_UP_DOWN); while (!(readl_relaxed(base + GSS_CSR_POWER_UP_DOWN) & A5_POWER_STATUS)) cpu_relax(); if (cpu_is_apq8064() && ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) && (SOCINFO_VERSION_MINOR(socinfo_get_version()) == 0))) { ret = smp_call_function_single(0, cfg_qgic2_bus_access, drv, 1); if (ret) { pr_err("Failed to configure QGIC2 bus access\n"); pil_gss_shutdown(pil); return ret; } } /* Release A5 from reset. */ writel_relaxed(0x0, base + GSS_CSR_RESET); return 0; } static struct pil_reset_ops pil_gss_ops = { .auth_and_reset = pil_gss_reset, .shutdown = pil_gss_shutdown, .proxy_vote = make_gss_proxy_votes, .proxy_unvote = remove_gss_proxy_votes, }; static int pil_gss_init_image_trusted(struct pil_desc *pil, const u8 *metadata, size_t size) { return pas_init_image(PAS_GSS, metadata, size); } static int pil_gss_shutdown_trusted(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); int ret; /* * CXO is used in the secure shutdown code to configure the processor * for low power mode. */ ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); ret = pas_shutdown(PAS_GSS); clk_disable_unprepare(drv->xo); return ret; } static int pil_gss_reset_trusted(struct pil_desc *pil) { int err; err = msm_bus_axi_portunhalt(MSM_BUS_MASTER_GSS_NAV); if (err) { dev_err(pil->dev, "Failed to unhalt bus port\n"); goto out; } err = pas_auth_and_reset(PAS_GSS); if (err) goto halt_port; return 0; halt_port: msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); out: return err; } static struct pil_reset_ops pil_gss_ops_trusted = { .init_image = pil_gss_init_image_trusted, .auth_and_reset = pil_gss_reset_trusted, .shutdown = pil_gss_shutdown_trusted, .proxy_vote = make_gss_proxy_votes, .proxy_unvote = remove_gss_proxy_votes, }; #define MAX_SSR_REASON_LEN 81U static void log_gss_sfr(void) { u32 size; char *smem_reason, reason[MAX_SSR_REASON_LEN]; smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size); if (!smem_reason || !size) { pr_err("GSS subsystem failure reason: (unknown, smem_get_entry failed).\n"); return; } if (!smem_reason[0]) { pr_err("GSS subsystem failure reason: (unknown, init string found).\n"); return; } size = min(size, MAX_SSR_REASON_LEN-1); memcpy(reason, smem_reason, size); reason[size] = '\0'; pr_err("GSS subsystem failure reason: %s.\n", reason); smem_reason[0] = '\0'; wmb(); } static void restart_gss(struct gss_data *drv) { log_gss_sfr(); subsystem_restart_dev(drv->subsys); } static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state) { struct gss_data *drv = data; /* Ignore if we're the one that set SMSM_RESET */ if (drv->crash_shutdown) return; if (new_state & SMSM_RESET) { pr_err("GSS SMSM state changed to SMSM_RESET.\n" "Probable err_fatal on the GSS. " "Calling subsystem restart...\n"); restart_gss(drv); } } static int gss_start(const struct subsys_desc *desc) { struct gss_data *drv; drv = container_of(desc, struct gss_data, subsys_desc); return pil_boot(&drv->pil_desc); } static void gss_stop(const struct subsys_desc *desc) { struct gss_data *drv; drv = container_of(desc, struct gss_data, subsys_desc); pil_shutdown(&drv->pil_desc); } static int gss_shutdown(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); pil_shutdown(&drv->pil_desc); disable_irq_nosync(drv->irq); return 0; } static int gss_powerup(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); pil_boot(&drv->pil_desc); enable_irq(drv->irq); return 0; } void gss_crash_shutdown(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); drv->crash_shutdown = 1; smsm_reset_modem(SMSM_RESET); } static struct ramdump_segment smem_segments[] = { {0x80000000, 0x00200000}, }; static int gss_ramdump(int enable, const struct subsys_desc *desc) { int ret; struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); if (!enable) return 0; ret = pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev); if (ret < 0) { pr_err("Unable to dump gss memory\n"); return ret; } ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments, ARRAY_SIZE(smem_segments)); if (ret < 0) { pr_err("Unable to dump smem memory (rc = %d).\n", ret); return ret; } return 0; } static irqreturn_t gss_wdog_bite_irq(int irq, void *dev_id) { struct gss_data *drv = dev_id; pr_err("Watchdog bite received from GSS!\n"); restart_gss(drv); return IRQ_HANDLED; } static int gss_open(struct inode *inode, struct file *filp) { struct miscdevice *c = filp->private_data; struct gss_data *drv = container_of(c, struct gss_data, misc_dev); drv->subsys_handle = subsystem_get("gss"); if (IS_ERR(drv->subsys_handle)) { pr_debug("%s - subsystem_get returned error\n", __func__); return PTR_ERR(drv->subsys_handle); } return 0; } static int gss_release(struct inode *inode, struct file *filp) { struct miscdevice *c = filp->private_data; struct gss_data *drv = container_of(c, struct gss_data, misc_dev); subsystem_put(drv->subsys_handle); pr_debug("%s subsystem_put called on GSS\n", __func__); return 0; } const struct file_operations gss_file_ops = { .open = gss_open, .release = gss_release, .owner = THIS_MODULE, }; static int __devinit pil_gss_probe(struct platform_device *pdev) { struct gss_data *drv; struct resource *res; struct pil_desc *desc; int ret; drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; platform_set_drvdata(pdev, drv); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); drv->base = devm_request_and_ioremap(&pdev->dev, res); if (!drv->base) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); drv->qgic2_base = devm_request_and_ioremap(&pdev->dev, res); if (!drv->qgic2_base) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) return -EINVAL; drv->cbase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!drv->cbase) return -ENOMEM; drv->xo = devm_clk_get(&pdev->dev, "xo"); if (IS_ERR(drv->xo)) return PTR_ERR(drv->xo); drv->irq = platform_get_irq(pdev, 0); if (drv->irq < 0) return drv->irq; desc = &drv->pil_desc; desc->name = "gss"; desc->dev = &pdev->dev; desc->owner = THIS_MODULE; desc->proxy_timeout = 10000; if (pas_supported(PAS_GSS) > 0) { desc->ops = &pil_gss_ops_trusted; dev_info(&pdev->dev, "using secure boot\n"); } else { desc->ops = &pil_gss_ops; dev_info(&pdev->dev, "using non-secure boot\n"); } ret = pil_desc_init(desc); if (ret) return ret; /* Force into low power mode because hardware doesn't do this */ desc->ops->shutdown(desc); ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET, smsm_state_cb, drv); if (ret < 0) dev_warn(&pdev->dev, "Unable to register SMSM callback\n"); drv->subsys_desc.name = "gss"; drv->subsys_desc.dev = &pdev->dev; drv->subsys_desc.owner = THIS_MODULE; drv->subsys_desc.start = gss_start; drv->subsys_desc.stop = gss_stop; drv->subsys_desc.shutdown = gss_shutdown; drv->subsys_desc.powerup = gss_powerup; drv->subsys_desc.ramdump = gss_ramdump; drv->subsys_desc.crash_shutdown = gss_crash_shutdown; drv->subsys = subsys_register(&drv->subsys_desc); if (IS_ERR(drv->subsys)) { ret = PTR_ERR(drv->subsys); goto err_subsys; } drv->misc_dev.minor = MISC_DYNAMIC_MINOR; drv->misc_dev.name = "gss"; drv->misc_dev.fops = &gss_file_ops; ret = misc_register(&drv->misc_dev); if (ret) goto err_misc; drv->ramdump_dev = create_ramdump_device("gss", &pdev->dev); if (!drv->ramdump_dev) { ret = -ENOMEM; goto err_ramdump; } drv->smem_ramdump_dev = create_ramdump_device("smem-gss", &pdev->dev); if (!drv->smem_ramdump_dev) { ret = -ENOMEM; goto err_smem; } scm_pas_init(MSM_BUS_MASTER_SPS); ret = devm_request_irq(&pdev->dev, drv->irq, gss_wdog_bite_irq, IRQF_TRIGGER_RISING, "gss_a5_wdog", drv); if (ret < 0) goto err; return 0; err: destroy_ramdump_device(drv->smem_ramdump_dev); err_smem: destroy_ramdump_device(drv->ramdump_dev); err_ramdump: misc_deregister(&drv->misc_dev); err_misc: subsys_unregister(drv->subsys); err_subsys: pil_desc_release(desc); return ret; } static int __devexit pil_gss_remove(struct platform_device *pdev) { struct gss_data *drv = platform_get_drvdata(pdev); destroy_ramdump_device(drv->smem_ramdump_dev); destroy_ramdump_device(drv->ramdump_dev); misc_deregister(&drv->misc_dev); subsys_unregister(drv->subsys); pil_desc_release(&drv->pil_desc); return 0; } static struct platform_driver pil_gss_driver = { .probe = pil_gss_probe, .remove = __devexit_p(pil_gss_remove), .driver = { .name = "pil_gss", .owner = THIS_MODULE, }, }; static int __init pil_gss_init(void) { return platform_driver_register(&pil_gss_driver); } module_init(pil_gss_init); static void __exit pil_gss_exit(void) { platform_driver_unregister(&pil_gss_driver); } module_exit(pil_gss_exit); MODULE_DESCRIPTION("Support for booting the GSS processor"); MODULE_LICENSE("GPL v2");
gpl-2.0
farchanrifai/Foxy
arch/arm/mach-msm/smd_rpc_sym.c
3652
6155
/* Autogenerated by mkrpcsym.pl. Do not edit */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/module.h> struct sym { const char *str; }; const char *smd_rpc_syms[] = { "CB CM_FUSION", /*0x30010000*/ "CB DB", /*0x30000001*/ "CB SND", /*0x30000002*/ "CB WMS_FUSION", /*0x30010003*/ "CB PDSM", /*0x30000004*/ "CB MISC_MODEM_APIS", /*0x30000005*/ "CB MISC_APPS_APIS", /*0x30000006*/ "CB JOYST", /*0x30000007*/ "CB UNDEFINED", "CB UNDEFINED", "CB ADSPRTOSATOM", /*0x3000000A*/ "CB ADSPRTOSMTOA", /*0x3000000B*/ "CB I2C", /*0x3000000C*/ "CB TIME_REMOTE", /*0x3000000D*/ "CB NV_FUSION", /*0x3001000E*/ "CB CLKRGM_SEC_FUSION", /*0x3001000F*/ "CB RDEVMAP", /*0x30000010*/ "CB UNDEFINED", "CB PBMLIB_FUSION", /*0x30010012*/ "CB AUDMGR", /*0x30000013*/ "CB MVS", /*0x30000014*/ "CB DOG_KEEPALIVE", /*0x30000015*/ "CB GSDI_EXP_FUSION", /*0x30010016*/ "CB AUTH", /*0x30000017*/ "CB NVRUIMI", /*0x30000018*/ "CB MMGSDILIB_FUSION", /*0x30010019*/ "CB CHARGER", /*0x3000001A*/ "CB UIM_FUSION", /*0x3001001B*/ "CB UNDEFINED", "CB PDSM_ATL", /*0x3000001D*/ "CB FS_XMOUNT", /*0x3000001E*/ "CB SECUTIL", /*0x3000001F*/ "CB MCCMEID", /*0x30000020*/ "CB PM_STROBE_FLASH", /*0x30000021*/ "CB UNDEFINED", "CB SMD_BRIDGE", /*0x30000023*/ "CB SMD_PORT_MGR_FUSION", /*0x30010024*/ "CB BUS_PERF", /*0x30000025*/ "CB BUS_MON_REMOTE", /*0x30000026*/ "CB MC", /*0x30000027*/ "CB MCCAP", /*0x30000028*/ "CB MCCDMA", /*0x30000029*/ "CB MCCDS", /*0x3000002A*/ "CB MCCSCH", /*0x3000002B*/ "CB MCCSRID", /*0x3000002C*/ "CB SNM", /*0x3000002D*/ "CB MCCSYOBJ", /*0x3000002E*/ "CB UNDEFINED", "CB UNDEFINED", "CB DSRLP_APIS", /*0x30000031*/ "CB RLP_APIS", /*0x30000032*/ "CB DS_MP_SHIM_MODEM", /*0x30000033*/ "CB UNDEFINED", "CB DSHDR_MDM_APIS", /*0x30000035*/ "CB DS_MP_SHIM_APPS", /*0x30000036*/ "CB HDRMC_APIS", /*0x30000037*/ "CB UNDEFINED", "CB UNDEFINED", "CB PMAPP_OTG", /*0x3000003A*/ "CB DIAG", /*0x3000003B*/ "CB GSTK_EXP_FUSION", /*0x3001003C*/ "CB DSBC_MDM_APIS", /*0x3000003D*/ "CB HDRMRLP_MDM_APIS", /*0x3000003E*/ "CB UNDEFINED", "CB HDRMC_MRLP_APIS", /*0x30000040*/ "CB PDCOMM_APP_API", /*0x30000041*/ "CB DSAT_APIS", /*0x30000042*/ "CB RFM", /*0x30000043*/ "CB CMIPAPP", /*0x30000044*/ "CB DSMP_UMTS_MODEM_APIS", /*0x30000045*/ "CB UNDEFINED", "CB DSUCSDMPSHIM", /*0x30000047*/ "CB TIME_REMOTE_ATOM", /*0x30000048*/ "CB UNDEFINED", "CB SD", /*0x3000004A*/ "CB MMOC", /*0x3000004B*/ "CB UNDEFINED", "CB WLAN_CP_CM", /*0x3000004D*/ "CB FTM_WLAN", /*0x3000004E*/ "CB UNDEFINED", "CB CPRMINTERFACE", /*0x30000050*/ "CB DATA_ON_MODEM_MTOA_APIS", /*0x30000051*/ "CB UNDEFINED", "CB MISC_MODEM_APIS_NONWINMOB", /*0x30000053*/ "CB MISC_APPS_APIS_NONWINMOB", /*0x30000054*/ "CB PMEM_REMOTE", /*0x30000055*/ "CB TCXOMGR", /*0x30000056*/ "CB UNDEFINED", "CB BT", /*0x30000058*/ "CB PD_COMMS_API", /*0x30000059*/ "CB PD_COMMS_CLIENT_API", /*0x3000005A*/ "CB PDAPI", /*0x3000005B*/ "CB UNDEFINED", "CB TIME_REMOTE_MTOA", /*0x3000005D*/ "CB FTM_BT", /*0x3000005E*/ "CB DSUCSDAPPIF_APIS", /*0x3000005F*/ "CB PMAPP_GEN", /*0x30000060*/ "CB PM_LIB_FUSION", /*0x30010061*/ "CB UNDEFINED", "CB HSU_APP_APIS", /*0x30000063*/ "CB HSU_MDM_APIS", /*0x30000064*/ "CB ADIE_ADC_REMOTE_ATOM", /*0x30000065*/ "CB TLMM_REMOTE_ATOM", /*0x30000066*/ "CB UI_CALLCTRL", /*0x30000067*/ "CB UIUTILS", /*0x30000068*/ "CB PRL", /*0x30000069*/ "CB HW", /*0x3000006A*/ "CB OEM_RAPI_FUSION", /*0x3001006B*/ "CB WMSPM", /*0x3000006C*/ "CB BTPF", /*0x3000006D*/ "CB UNDEFINED", "CB USB_APPS_RPC", /*0x3000006F*/ "CB USB_MODEM_RPC", /*0x30000070*/ "CB ADC", /*0x30000071*/ "CB CAMERAREMOTED", /*0x30000072*/ "CB SECAPIREMOTED", /*0x30000073*/ "CB DSATAPI", /*0x30000074*/ "CB CLKCTL_RPC", /*0x30000075*/ "CB BREWAPPCOORD", /*0x30000076*/ "CB UNDEFINED", "CB WLAN_TRP_UTILS", /*0x30000078*/ "CB GPIO_RPC", /*0x30000079*/ "CB UNDEFINED", "CB UNDEFINED", "CB L1_DS", /*0x3000007C*/ "CB UNDEFINED", "CB UNDEFINED", "CB OSS_RRCASN_REMOTE", /*0x3000007F*/ "CB PMAPP_OTG_REMOTE", /*0x30000080*/ "CB PING_LTE_RPC", /*0x30010081*/ "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UKCC_IPC_APIS", /*0x30000087*/ "CB UNDEFINED", "CB VBATT_REMOTE", /*0x30000089*/ "CB MFPAL_FPS", /*0x3000008A*/ "CB DSUMTSPDPREG", /*0x3000008B*/ "CB LOC_API", /*0x3000008C*/ "CB UNDEFINED", "CB CMGAN", /*0x3000008E*/ "CB ISENSE", /*0x3000008F*/ "CB TIME_SECURE", /*0x30000090*/ "CB HS_REM", /*0x30000091*/ "CB ACDB", /*0x30000092*/ "CB NET", /*0x30000093*/ "CB LED", /*0x30000094*/ "CB DSPAE", /*0x30000095*/ "CB MFKAL", /*0x30000096*/ "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB UNDEFINED", "CB TEST_API", /*0x3000009B*/ "CB REMOTEFS_SRV_API_FUSION", /*0x3001009C*/ "CB ISI_TRANSPORT", /*0x3000009D*/ "CB OEM_FTM", /*0x3000009E*/ "CB TOUCH_SCREEN_ADC", /*0x3000009F*/ "CB SMD_BRIDGE_APPS_FUSION", /*0x300100A0*/ "CB SMD_BRIDGE_MODEM_FUSION", /*0x300100A1*/ "CB DOG_KEEPALIVE_MODEM", /*0x300000A2*/ "CB VOEM_IF", /*0x300000A3*/ "CB NPA_REMOTE", /*0x300000A4*/ "CB MMGSDISESSIONLIB_FUSION", /*0x300100A5*/ "CB IFTA_REMOTE", /*0x300000A6*/ "CB REMOTE_STORAGE", /*0x300000A7*/ "CB MF_REMOTE_FILE", /*0x300000A8*/ "CB MFSC_CHUNKED_TRANSPORT", /*0x300000A9*/ "CB MFIM3", /*0x300000AA*/ "CB FM_WAN_API", /*0x300000AB*/ "CB WLAN_RAPI", /*0x300000AC*/ "CB DSMGR_APIS", /*0x300000AD*/ "CB CM_MM_FUSION", /*0x300100AE*/ }; static struct sym_tbl { const char **data; int size; } tbl = { smd_rpc_syms, ARRAY_SIZE(smd_rpc_syms)}; const char *smd_rpc_get_sym(uint32_t val) { int idx = val & 0xFFFF; if (idx < tbl.size) { if (val & 0x01000000) return tbl.data[idx]; else return tbl.data[idx] + 3; } return 0; } EXPORT_SYMBOL(smd_rpc_get_sym);
gpl-2.0
Vegaviet-DevTeam/kernel-stock-4.4.2-ef63slk
arch/cris/arch-v10/drivers/pcf8563.c
4420
8766
/* * PCF8563 RTC * * From Phillips' datasheet: * * The PCF8563 is a CMOS real-time clock/calendar optimized for low power * consumption. A programmable clock output, interrupt output and voltage * low detector are also provided. All address and data are transferred * serially via two-line bidirectional I2C-bus. Maximum bus speed is * 400 kbits/s. The built-in word address register is incremented * automatically after each written or read byte. * * Copyright (c) 2002-2007, Axis Communications AB * All rights reserved. * * Author: Tobias Anderberg <tobiasa@axis.com>. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/delay.h> #include <linux/bcd.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/rtc.h> #include "i2c.h" #define PCF8563_MAJOR 121 /* Local major number. */ #define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */ #define PCF8563_NAME "PCF8563" #define DRIVER_VERSION "$Revision: 1.24 $" /* I2C bus slave registers. */ #define RTC_I2C_READ 0xa3 #define RTC_I2C_WRITE 0xa2 /* Two simple wrapper macros, saves a few keystrokes. */ #define rtc_read(x) i2c_readreg(RTC_I2C_READ, x) #define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y) static DEFINE_MUTEX(pcf8563_mutex); static DEFINE_MUTEX(rtc_lock); /* Protect state etc */ static const unsigned char days_in_month[] = { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long); /* Cache VL bit value read at driver init since writing the RTC_SECOND * register clears the VL status. */ static int voltage_low; static const struct file_operations pcf8563_fops = { .owner = THIS_MODULE, .unlocked_ioctl = pcf8563_unlocked_ioctl, .llseek = noop_llseek, }; unsigned char pcf8563_readreg(int reg) { unsigned char res = rtc_read(reg); /* The PCF8563 does not return 0 for unimplemented bits. */ switch (reg) { case RTC_SECONDS: case RTC_MINUTES: res &= 0x7F; break; case RTC_HOURS: case RTC_DAY_OF_MONTH: res &= 0x3F; break; case RTC_WEEKDAY: res &= 0x07; break; case RTC_MONTH: res &= 0x1F; break; case RTC_CONTROL1: res &= 0xA8; break; case RTC_CONTROL2: res &= 0x1F; break; case RTC_CLOCKOUT_FREQ: case RTC_TIMER_CONTROL: res &= 0x83; break; } return res; } void pcf8563_writereg(int reg, unsigned char val) { rtc_write(reg, val); } void get_rtc_time(struct rtc_time *tm) { tm->tm_sec = rtc_read(RTC_SECONDS); tm->tm_min = rtc_read(RTC_MINUTES); tm->tm_hour = rtc_read(RTC_HOURS); tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH); tm->tm_wday = rtc_read(RTC_WEEKDAY); tm->tm_mon = rtc_read(RTC_MONTH); tm->tm_year = rtc_read(RTC_YEAR); if (tm->tm_sec & 0x80) { printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time " "information is no longer guaranteed!\n", PCF8563_NAME); } tm->tm_year = bcd2bin(tm->tm_year) + ((tm->tm_mon & 0x80) ? 100 : 0); tm->tm_sec &= 0x7F; tm->tm_min &= 0x7F; tm->tm_hour &= 0x3F; tm->tm_mday &= 0x3F; tm->tm_wday &= 0x07; /* Not coded in BCD. */ tm->tm_mon &= 0x1F; tm->tm_sec = bcd2bin(tm->tm_sec); tm->tm_min = bcd2bin(tm->tm_min); tm->tm_hour = bcd2bin(tm->tm_hour); tm->tm_mday = bcd2bin(tm->tm_mday); tm->tm_mon = bcd2bin(tm->tm_mon); tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */ } int __init pcf8563_init(void) { static int res; static int first = 1; if (!first) return res; first = 0; /* Initiate the i2c protocol. */ res = i2c_init(); if (res < 0) { printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n"); return res; } /* * First of all we need to reset the chip. This is done by * clearing control1, control2 and clk freq and resetting * all alarms. */ if (rtc_write(RTC_CONTROL1, 0x00) < 0) goto err; if (rtc_write(RTC_CONTROL2, 0x00) < 0) goto err; if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0) goto err; if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0) goto err; /* Reset the alarms. */ if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0) goto err; if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0) goto err; if (rtc_write(RTC_DAY_ALARM, 0x80) < 0) goto err; if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0) goto err; /* Check for low voltage, and warn about it. */ if (rtc_read(RTC_SECONDS) & 0x80) { voltage_low = 1; printk(KERN_WARNING "%s: RTC Voltage Low - reliable " "date/time information is no longer guaranteed!\n", PCF8563_NAME); } return res; err: printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME); res = -1; return res; } void __exit pcf8563_exit(void) { unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME); } /* * ioctl calls for this driver. Why return -ENOTTY upon error? Because * POSIX says so! */ static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { /* Some sanity checks. */ if (_IOC_TYPE(cmd) != RTC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > RTC_MAX_IOCTL) return -ENOTTY; switch (cmd) { case RTC_RD_TIME: { struct rtc_time tm; mutex_lock(&rtc_lock); memset(&tm, 0, sizeof tm); get_rtc_time(&tm); if (copy_to_user((struct rtc_time *) arg, &tm, sizeof tm)) { mutex_unlock(&rtc_lock); return -EFAULT; } mutex_unlock(&rtc_lock); return 0; } case RTC_SET_TIME: { int leap; int year; int century; struct rtc_time tm; memset(&tm, 0, sizeof tm); if (!capable(CAP_SYS_TIME)) return -EPERM; if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm)) return -EFAULT; /* Convert from struct tm to struct rtc_time. */ tm.tm_year += 1900; tm.tm_mon += 1; /* * Check if tm.tm_year is a leap year. A year is a leap * year if it is divisible by 4 but not 100, except * that years divisible by 400 _are_ leap years. */ year = tm.tm_year; leap = (tm.tm_mon == 2) && ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0); /* Perform some sanity checks. */ if ((tm.tm_year < 1970) || (tm.tm_mon > 12) || (tm.tm_mday == 0) || (tm.tm_mday > days_in_month[tm.tm_mon] + leap) || (tm.tm_wday >= 7) || (tm.tm_hour >= 24) || (tm.tm_min >= 60) || (tm.tm_sec >= 60)) return -EINVAL; century = (tm.tm_year >= 2000) ? 0x80 : 0; tm.tm_year = tm.tm_year % 100; tm.tm_year = bin2bcd(tm.tm_year); tm.tm_mon = bin2bcd(tm.tm_mon); tm.tm_mday = bin2bcd(tm.tm_mday); tm.tm_hour = bin2bcd(tm.tm_hour); tm.tm_min = bin2bcd(tm.tm_min); tm.tm_sec = bin2bcd(tm.tm_sec); tm.tm_mon |= century; mutex_lock(&rtc_lock); rtc_write(RTC_YEAR, tm.tm_year); rtc_write(RTC_MONTH, tm.tm_mon); rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */ rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday); rtc_write(RTC_HOURS, tm.tm_hour); rtc_write(RTC_MINUTES, tm.tm_min); rtc_write(RTC_SECONDS, tm.tm_sec); mutex_unlock(&rtc_lock); return 0; } case RTC_VL_READ: if (voltage_low) { printk(KERN_ERR "%s: RTC Voltage Low - " "reliable date/time information is no " "longer guaranteed!\n", PCF8563_NAME); } if (copy_to_user((int *) arg, &voltage_low, sizeof(int))) return -EFAULT; return 0; case RTC_VL_CLR: { /* Clear the VL bit in the seconds register in case * the time has not been set already (which would * have cleared it). This does not really matter * because of the cached voltage_low value but do it * anyway for consistency. */ int ret = rtc_read(RTC_SECONDS); rtc_write(RTC_SECONDS, (ret & 0x7F)); /* Clear the cached value. */ voltage_low = 0; return 0; } default: return -ENOTTY; } return 0; } static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&pcf8563_mutex); ret = pcf8563_ioctl(filp, cmd, arg); mutex_unlock(&pcf8563_mutex); return ret; } static int __init pcf8563_register(void) { if (pcf8563_init() < 0) { printk(KERN_INFO "%s: Unable to initialize Real-Time Clock " "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION); return -1; } if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) { printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n", PCF8563_NAME, PCF8563_MAJOR); return -1; } printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME, DRIVER_VERSION); /* Check for low voltage, and warn about it. */ if (voltage_low) { printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time " "information is no longer guaranteed!\n", PCF8563_NAME); } return 0; } module_init(pcf8563_register); module_exit(pcf8563_exit);
gpl-2.0
kamma/android_kernel_huawei_angler
drivers/watchdog/iTCO_vendor_support.c
4932
11089
/* * intel TCO vendor specific watchdog driver support * * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Module and version information */ #define DRV_NAME "iTCO_vendor_support" #define DRV_VERSION "1.04" /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/ioport.h> /* For io-port access */ #include <linux/io.h> /* For inb/outb/... */ #include "iTCO_vendor.h" /* List of vendor support modes */ /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ #define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ #define SUPERMICRO_NEW_BOARD 2 /* Broken BIOS */ #define BROKEN_BIOS 911 static int vendorsupport; module_param(vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, " "911=Broken SMI BIOS"); /* * Vendor Specific Support */ /* * Vendor Support: 1 * Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE * iTCO chipset: ICH2 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes * This setting enables SMI to clear the watchdog expired flag. * If BIOS or CPU fail which may cause SMI hang, then system will * reboot. When application starts to use watchdog function, * application has to take over the control from SMI. * * For P3TSSE, J36 jumper needs to be removed to enable the Watchdog * function. * * Note: The system will reboot when Expire Flag is set TWICE. * So, if the watchdog timer is 20 seconds, then the maximum hang * time is about 40 seconds, and the minimum hang time is about * 20.6 seconds. */ static void supermicro_old_pre_start(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(smires->start); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ outl(val32, smires->start); /* Needed to activate watchdog */ } static void supermicro_old_pre_stop(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ val32 = inl(smires->start); val32 |= 0x00002000; /* Turn on SMI clearing watchdog */ outl(val32, smires->start); /* Needed to deactivate watchdog */ } /* * Vendor Support: 2 * Board: Super Micro Computer Inc. P4SBx, P4DPx * iTCO chipset: ICH4 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * 1. BIOS * For P4SBx: * BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature * For P4DPx: * BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog * This setting enables or disables Watchdog function. When enabled, the * default watchdog timer is set to be 5 minutes (about 4m35s). It is * enough to load and run the OS. The application (service or driver) has * to take over the control once OS is running up and before watchdog * expires. * * 2. JUMPER * For P4SBx: JP39 * For P4DPx: JP37 * This jumper is used for safety. Closed is enabled. This jumper * prevents user enables watchdog in BIOS by accident. * * To enable Watch Dog function, both BIOS and JUMPER must be enabled. * * The documentation lists motherboards P4SBx and P4DPx series as of * 20-March-2002. However, this code works flawlessly with much newer * motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82). * * The original iTCO driver as written does not actually reset the * watchdog timer on these machines, as a result they reboot after five * minutes. * * NOTE: You may leave the Watchdog function disabled in the SuperMicro * BIOS to avoid a "boot-race"... This driver will enable watchdog * functionality even if it's disabled in the BIOS once the /dev/watchdog * file is opened. */ /* I/O Port's */ #define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */ #define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */ /* Control Register's */ #define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */ #define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */ #define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */ #define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */ #define SM_ENDWATCH 0xAA /* Watchdog lock control page */ #define SM_COUNTMODE 0xf5 /* Watchdog count mode select */ /* (Bit 3: 0 = seconds, 1 = minutes */ #define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */ #define SM_RESETCONTROL 0xf7 /* Watchdog reset control */ /* Bit 6: timer is reset by kbd interrupt */ /* Bit 7: timer is reset by mouse interrupt */ static void supermicro_new_unlock_watchdog(void) { /* Write 0x87 to port 0x2e twice */ outb(SM_WATCHPAGE, SM_REGINDEX); outb(SM_WATCHPAGE, SM_REGINDEX); /* Switch to watchdog control page */ outb(SM_CTLPAGESW, SM_REGINDEX); outb(SM_CTLPAGE, SM_DATAIO); } static void supermicro_new_lock_watchdog(void) { outb(SM_ENDWATCH, SM_REGINDEX); } static void supermicro_new_pre_start(unsigned int heartbeat) { unsigned int val; supermicro_new_unlock_watchdog(); /* Watchdog timer setting needs to be in seconds*/ outb(SM_COUNTMODE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xF7; outb(val, SM_DATAIO); /* Write heartbeat interval to WDOG */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); /* Make sure keyboard/mouse interrupts don't interfere */ outb(SM_RESETCONTROL, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0x3f; outb(val, SM_DATAIO); /* enable watchdog by setting bit 0 of Watchdog Enable to 1 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val |= 0x01; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_stop(void) { unsigned int val; supermicro_new_unlock_watchdog(); /* disable watchdog by setting bit 0 of Watchdog Enable to 0 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xFE; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat) { supermicro_new_unlock_watchdog(); /* reset watchdog timeout to heartveat value */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); supermicro_new_lock_watchdog(); } /* * Vendor Support: 911 * Board: Some Intel ICHx based motherboards * iTCO chipset: ICH7+ * * Some Intel motherboards have a broken BIOS implementation: i.e. * the SMI handler clear's the TIMEOUT bit in the TC01_STS register * and does not reload the time. Thus the TCO watchdog does not reboot * the system. * * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after * debugging: the SMI handler is quite simple - it tests value in * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set * the handler goes into an infinite loop, apparently to allow the * second timeout and reboot. Otherwise it simply clears TIMEOUT bit * in TCO1_STS and that's it. * So the logic seems to be reversed, because it is hard to see how * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set * (other than a transitional effect). * * The only fix found to get the motherboard(s) to reboot is to put * the glb_smi_en bit to 0. This is a dirty hack that bypasses the * broken code by disabling Global SMI. * * WARNING: globally disabling SMI could possibly lead to dramatic * problems, especially on laptops! I.e. various ACPI things where * SMI is used for communication between OS and firmware. * * Don't use this fix if you don't need to!!! */ static void broken_bios_start(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ val32 &= 0xffffdffe; outl(val32, smires->start); } static void broken_bios_stop(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ val32 |= 0x00002001; outl(val32, smires->start); } /* * Generic Support Functions */ void iTCO_vendor_pre_start(struct resource *smires, unsigned int heartbeat) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_start(heartbeat); break; case BROKEN_BIOS: broken_bios_start(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(struct resource *smires) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_stop(); break; case BROKEN_BIOS: broken_bios_stop(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_stop); void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat); int iTCO_vendor_check_noreboot_on(void) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: return 1; } } EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { pr_info("vendor-support=%d\n", vendorsupport); return 0; } static void __exit iTCO_vendor_exit_module(void) { pr_info("Module Unloaded\n"); } module_init(iTCO_vendor_init_module); module_exit(iTCO_vendor_exit_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, " "R. Seretny <lkpatches@paypc.com>"); MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_asus_grouper
arch/ia64/mm/hugetlbpage.c
7748
4885
/* * IA-64 Huge TLB Page Support for Kernel. * * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com> * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com> * * Sep, 2003: add numa support * Feb, 2004: dynamic hugetlb page size via boot parameter */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/log2.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); pud = pud_alloc(mm, pgd, taddr); if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) pte = pte_alloc_map(mm, NULL, pmd, taddr); } return pte; } pte_t * huge_pte_offset (struct mm_struct *mm, unsigned long addr) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); if (pgd_present(*pgd)) { pud = pud_offset(pgd, taddr); if (pud_present(*pud)) { pmd = pmd_offset(pud, taddr); if (pmd_present(*pmd)) pte = pte_offset_map(pmd, taddr); } } return pte; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } /* * Don't actually need to do any preparation, but need to make sure * the address is in the right region. */ int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) return -EINVAL; if (REGION_NUMBER(addr) != RGN_HPAGE) return -EINVAL; return 0; } struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) { struct page *page; pte_t *ptep; if (REGION_NUMBER(addr) != RGN_HPAGE) return ERR_PTR(-EINVAL); ptep = huge_pte_offset(mm, addr); if (!ptep || pte_none(*ptep)) return NULL; page = pte_page(*ptep); page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); return page; } int pmd_huge(pmd_t pmd) { return 0; } int pud_huge(pud_t pud) { return 0; } struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { return NULL; } void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { /* * This is called to free hugetlb page tables. * * The offset of these addresses from the base of the hugetlb * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that * the standard free_pgd_range will free the right page tables. * * If floor and ceiling are also in the hugetlb region, they * must likewise be scaled down; but if outside, left unchanged. */ addr = htlbpage_to_page(addr); end = htlbpage_to_page(end); if (REGION_NUMBER(floor) == RGN_HPAGE) floor = htlbpage_to_page(floor); if (REGION_NUMBER(ceiling) == RGN_HPAGE) ceiling = htlbpage_to_page(ceiling); free_pgd_range(tlb, addr, end, floor, ceiling); } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vmm; if (len > RGN_MAP_LIMIT) return -ENOMEM; if (len & ~HPAGE_MASK) return -EINVAL; /* Handle MAP_FIXED */ if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } /* This code assumes that RGN_HPAGE != 0. */ if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) addr = HPAGE_REGION_BASE; else addr = ALIGN(addr, HPAGE_SIZE); for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; if (!vmm || (addr + len) <= vmm->vm_start) return addr; addr = ALIGN(vmm->vm_end, HPAGE_SIZE); } } static int __init hugetlb_setup_sz(char *str) { u64 tr_pages; unsigned long long size; if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0) /* * shouldn't happen, but just in case. */ tr_pages = 0x15557000UL; size = memparse(str, &str); if (*str || !is_power_of_2(size) || !(tr_pages & size) || size <= PAGE_SIZE || size >= (1UL << PAGE_SHIFT << MAX_ORDER)) { printk(KERN_WARNING "Invalid huge page size specified\n"); return 1; } hpage_shift = __ffs(size); /* * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT * override here with new page shift. */ ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2); return 0; } early_param("hugepagesz", hugetlb_setup_sz);
gpl-2.0
nbars/Custom-Kernel-SM-P600
kernel-src/drivers/net/wan/sdla.c
7748
38854
/* * SDLA An implementation of a driver for the Sangoma S502/S508 series * multi-protocol PC interface card. Initial offering is with * the DLCI driver, providing Frame Relay support for linux. * * Global definitions for the Frame relay interface. * * Version: @(#)sdla.c 0.30 12 Sep 1996 * * Credits: Sangoma Technologies, for the use of 2 cards for an extended * period of time. * David Mandelstam <dm@sangoma.com> for getting me started on * this project, and incentive to complete it. * Gene Kozen <74604.152@compuserve.com> for providing me with * important information about the cards. * * Author: Mike McLagan <mike.mclagan@linux.org> * * Changes: * 0.15 Mike McLagan Improved error handling, packet dropping * 0.20 Mike McLagan New transmit/receive flags for config * If in FR mode, don't accept packets from * non DLCI devices. * 0.25 Mike McLagan Fixed problem with rejecting packets * from non DLCI devices. * 0.30 Mike McLagan Fixed kernel panic when used with modified * ifconfig * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_frad.h> #include <linux/sdla.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org"; static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390}; static unsigned int valid_mem[] = { 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000, 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000, 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000, 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000, 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000}; static DEFINE_SPINLOCK(sdla_lock); /********************************************************* * * these are the core routines that access the card itself * *********************************************************/ #define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW) static void __sdla_read(struct net_device *dev, int addr, void *buf, short len) { char *temp; const void *base; int offset, bytes; temp = buf; while(len) { offset = addr & SDLA_ADDR_MASK; bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len; base = (const void *) (dev->mem_start + offset); SDLA_WINDOW(dev, addr); memcpy(temp, base, bytes); addr += bytes; temp += bytes; len -= bytes; } } static void sdla_read(struct net_device *dev, int addr, void *buf, short len) { unsigned long flags; spin_lock_irqsave(&sdla_lock, flags); __sdla_read(dev, addr, buf, len); spin_unlock_irqrestore(&sdla_lock, flags); } static void __sdla_write(struct net_device *dev, int addr, const void *buf, short len) { const char *temp; void *base; int offset, bytes; temp = buf; while(len) { offset = addr & SDLA_ADDR_MASK; bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len; base = (void *) (dev->mem_start + offset); SDLA_WINDOW(dev, addr); memcpy(base, temp, bytes); addr += bytes; temp += bytes; len -= bytes; } } static void sdla_write(struct net_device *dev, int addr, const void *buf, short len) { unsigned long flags; spin_lock_irqsave(&sdla_lock, flags); __sdla_write(dev, addr, buf, len); spin_unlock_irqrestore(&sdla_lock, flags); } static void sdla_clear(struct net_device *dev) { unsigned long flags; char *base; int len, addr, bytes; len = 65536; addr = 0; bytes = SDLA_WINDOW_SIZE; base = (void *) dev->mem_start; spin_lock_irqsave(&sdla_lock, flags); while(len) { SDLA_WINDOW(dev, addr); memset(base, 0, bytes); addr += bytes; len -= bytes; } spin_unlock_irqrestore(&sdla_lock, flags); } static char sdla_byte(struct net_device *dev, int addr) { unsigned long flags; char byte, *temp; temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK)); spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); byte = *temp; spin_unlock_irqrestore(&sdla_lock, flags); return byte; } static void sdla_stop(struct net_device *dev) { struct frad_local *flp; flp = netdev_priv(dev); switch(flp->type) { case SDLA_S502A: outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL); flp->state = SDLA_HALT; break; case SDLA_S502E: outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL); outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL); flp->state = SDLA_S502E_ENABLE; break; case SDLA_S507: flp->state &= ~SDLA_CPUEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; case SDLA_S508: flp->state &= ~SDLA_CPUEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; } } static void sdla_start(struct net_device *dev) { struct frad_local *flp; flp = netdev_priv(dev); switch(flp->type) { case SDLA_S502A: outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL); outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL); flp->state = SDLA_S502A_START; break; case SDLA_S502E: outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL); outb(0x00, dev->base_addr + SDLA_REG_CONTROL); flp->state = 0; break; case SDLA_S507: flp->state |= SDLA_CPUEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; case SDLA_S508: flp->state |= SDLA_CPUEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; } } /**************************************************** * * this is used for the S502A/E cards to determine * the speed of the onboard CPU. Calibration is * necessary for the Frame Relay code uploaded * later. Incorrect results cause timing problems * with link checks & status messages * ***************************************************/ static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2) { unsigned long start, done, now; char resp, *temp; start = now = jiffies; done = jiffies + jiffs; temp = (void *)dev->mem_start; temp += z80_addr & SDLA_ADDR_MASK; resp = ~resp1; while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2))) { if (jiffies != now) { SDLA_WINDOW(dev, z80_addr); now = jiffies; resp = *temp; } } return time_before(jiffies, done) ? jiffies - start : -1; } /* constants for Z80 CPU speed */ #define Z80_READY '1' /* Z80 is ready to begin */ #define LOADER_READY '2' /* driver is ready to begin */ #define Z80_SCC_OK '3' /* SCC is on board */ #define Z80_SCC_BAD '4' /* SCC was not found */ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr) { int jiffs; char data; sdla_start(dev); if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0) return -EIO; data = LOADER_READY; sdla_write(dev, 0, &data, 1); if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0) return -EIO; sdla_stop(dev); sdla_read(dev, 0, &data, 1); if (data == Z80_SCC_BAD) { printk("%s: SCC bad\n", dev->name); return -EIO; } if (data != Z80_SCC_OK) return -EINVAL; if (jiffs < 165) ifr->ifr_mtu = SDLA_CPU_16M; else if (jiffs < 220) ifr->ifr_mtu = SDLA_CPU_10M; else if (jiffs < 258) ifr->ifr_mtu = SDLA_CPU_8M; else if (jiffs < 357) ifr->ifr_mtu = SDLA_CPU_7M; else if (jiffs < 467) ifr->ifr_mtu = SDLA_CPU_5M; else ifr->ifr_mtu = SDLA_CPU_3M; return 0; } /************************************************ * * Direct interaction with the Frame Relay code * starts here. * ************************************************/ struct _dlci_stat { short dlci; char flags; } __packed; struct _frad_stat { char flags; struct _dlci_stat dlcis[SDLA_MAX_DLCI]; }; static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data) { struct _dlci_stat *pstatus; short *pdlci; int i; char *state, line[30]; switch (ret) { case SDLA_RET_MODEM: state = data; if (*state & SDLA_MODEM_DCD_LOW) netdev_info(dev, "Modem DCD unexpectedly low!\n"); if (*state & SDLA_MODEM_CTS_LOW) netdev_info(dev, "Modem CTS unexpectedly low!\n"); /* I should probably do something about this! */ break; case SDLA_RET_CHANNEL_OFF: netdev_info(dev, "Channel became inoperative!\n"); /* same here */ break; case SDLA_RET_CHANNEL_ON: netdev_info(dev, "Channel became operative!\n"); /* same here */ break; case SDLA_RET_DLCI_STATUS: netdev_info(dev, "Status change reported by Access Node\n"); len /= sizeof(struct _dlci_stat); for(pstatus = data, i=0;i < len;i++,pstatus++) { if (pstatus->flags & SDLA_DLCI_NEW) state = "new"; else if (pstatus->flags & SDLA_DLCI_DELETED) state = "deleted"; else if (pstatus->flags & SDLA_DLCI_ACTIVE) state = "active"; else { sprintf(line, "unknown status: %02X", pstatus->flags); state = line; } netdev_info(dev, "DLCI %i: %s\n", pstatus->dlci, state); /* same here */ } break; case SDLA_RET_DLCI_UNKNOWN: netdev_info(dev, "Received unknown DLCIs:"); len /= sizeof(short); for(pdlci = data,i=0;i < len;i++,pdlci++) pr_cont(" %i", *pdlci); pr_cont("\n"); break; case SDLA_RET_TIMEOUT: netdev_err(dev, "Command timed out!\n"); break; case SDLA_RET_BUF_OVERSIZE: netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n", len); break; case SDLA_RET_BUF_TOO_BIG: netdev_info(dev, "Buffer size over specified max of %i\n", len); break; case SDLA_RET_CHANNEL_INACTIVE: case SDLA_RET_DLCI_INACTIVE: case SDLA_RET_CIR_OVERFLOW: case SDLA_RET_NO_BUFS: if (cmd == SDLA_INFORMATION_WRITE) break; default: netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", cmd, ret); /* Further processing could be done here */ break; } } static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags, void *inbuf, short inlen, void *outbuf, short *outlen) { static struct _frad_stat status; struct frad_local *flp; struct sdla_cmd *cmd_buf; unsigned long pflags; unsigned long jiffs; int ret, waiting, len; long window; flp = netdev_priv(dev); window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF; cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK)); ret = 0; len = 0; jiffs = jiffies + HZ; /* 1 second is plenty */ spin_lock_irqsave(&sdla_lock, pflags); SDLA_WINDOW(dev, window); cmd_buf->cmd = cmd; cmd_buf->dlci = dlci; cmd_buf->flags = flags; if (inbuf) memcpy(cmd_buf->data, inbuf, inlen); cmd_buf->length = inlen; cmd_buf->opp_flag = 1; spin_unlock_irqrestore(&sdla_lock, pflags); waiting = 1; len = 0; while (waiting && time_before_eq(jiffies, jiffs)) { if (waiting++ % 3) { spin_lock_irqsave(&sdla_lock, pflags); SDLA_WINDOW(dev, window); waiting = ((volatile int)(cmd_buf->opp_flag)); spin_unlock_irqrestore(&sdla_lock, pflags); } } if (!waiting) { spin_lock_irqsave(&sdla_lock, pflags); SDLA_WINDOW(dev, window); ret = cmd_buf->retval; len = cmd_buf->length; if (outbuf && outlen) { *outlen = *outlen >= len ? len : *outlen; if (*outlen) memcpy(outbuf, cmd_buf->data, *outlen); } /* This is a local copy that's used for error handling */ if (ret) memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len); spin_unlock_irqrestore(&sdla_lock, pflags); } else ret = SDLA_RET_TIMEOUT; if (ret != SDLA_RET_OK) sdla_errors(dev, cmd, dlci, ret, len, &status); return ret; } /*********************************************** * * these functions are called by the DLCI driver * ***********************************************/ static int sdla_reconfig(struct net_device *dev); static int sdla_activate(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; flp = netdev_priv(slave); for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->master[i] == master) break; if (i == CONFIG_DLCI_MAX) return -ENODEV; flp->dlci[i] = abs(flp->dlci[i]); if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); return 0; } static int sdla_deactivate(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; flp = netdev_priv(slave); for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->master[i] == master) break; if (i == CONFIG_DLCI_MAX) return -ENODEV; flp->dlci[i] = -abs(flp->dlci[i]); if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); return 0; } static int sdla_assoc(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; if (master->type != ARPHRD_DLCI) return -EINVAL; flp = netdev_priv(slave); for(i=0;i<CONFIG_DLCI_MAX;i++) { if (!flp->master[i]) break; if (abs(flp->dlci[i]) == *(short *)(master->dev_addr)) return -EADDRINUSE; } if (i == CONFIG_DLCI_MAX) return -EMLINK; /* #### Alan: Comments on this ?? */ flp->master[i] = master; flp->dlci[i] = -*(short *)(master->dev_addr); master->mtu = slave->mtu; if (netif_running(slave)) { if (flp->config.station == FRAD_STATION_CPE) sdla_reconfig(slave); else sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); } return 0; } static int sdla_deassoc(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; flp = netdev_priv(slave); for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->master[i] == master) break; if (i == CONFIG_DLCI_MAX) return -ENODEV; flp->master[i] = NULL; flp->dlci[i] = 0; if (netif_running(slave)) { if (flp->config.station == FRAD_STATION_CPE) sdla_reconfig(slave); else sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); } return 0; } static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) { struct frad_local *flp; struct dlci_local *dlp; int i; short len, ret; flp = netdev_priv(slave); for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->master[i] == master) break; if (i == CONFIG_DLCI_MAX) return -ENODEV; dlp = netdev_priv(master); ret = SDLA_RET_OK; len = sizeof(struct dlci_conf); if (netif_running(slave)) { if (get) ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, NULL, 0, &dlp->config, &len); else ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL); } return ret == SDLA_RET_OK ? 0 : -EIO; } /************************** * * now for the Linux driver * **************************/ /* NOTE: the DLCI driver deals with freeing the SKB!! */ static netdev_tx_t sdla_transmit(struct sk_buff *skb, struct net_device *dev) { struct frad_local *flp; int ret, addr, accept, i; short size; unsigned long flags; struct buf_entry *pbuf; flp = netdev_priv(dev); ret = 0; accept = 1; netif_stop_queue(dev); /* * stupid GateD insists on setting up the multicast router thru us * and we're ill equipped to handle a non Frame Relay packet at this * time! */ accept = 1; switch (dev->type) { case ARPHRD_FRAD: if (skb->dev->type != ARPHRD_DLCI) { netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n", skb->dev->type); accept = 0; } break; default: netdev_warn(dev, "unknown firmware type 0x%04X\n", dev->type); accept = 0; break; } if (accept) { /* this is frame specific, but till there's a PPP module, it's the default */ switch (flp->type) { case SDLA_S502A: case SDLA_S502E: ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL); break; case SDLA_S508: size = sizeof(addr); ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size); if (ret == SDLA_RET_OK) { spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); SDLA_WINDOW(dev, addr); pbuf->opp_flag = 1; spin_unlock_irqrestore(&sdla_lock, flags); } break; } switch (ret) { case SDLA_RET_OK: dev->stats.tx_packets++; break; case SDLA_RET_CIR_OVERFLOW: case SDLA_RET_BUF_OVERSIZE: case SDLA_RET_NO_BUFS: dev->stats.tx_dropped++; break; default: dev->stats.tx_errors++; break; } } netif_wake_queue(dev); for(i=0;i<CONFIG_DLCI_MAX;i++) { if(flp->master[i]!=NULL) netif_wake_queue(flp->master[i]); } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void sdla_receive(struct net_device *dev) { struct net_device *master; struct frad_local *flp; struct dlci_local *dlp; struct sk_buff *skb; struct sdla_cmd *cmd; struct buf_info *pbufi; struct buf_entry *pbuf; unsigned long flags; int i=0, received, success, addr, buf_base, buf_top; short dlci, len, len2, split; flp = netdev_priv(dev); success = 1; received = addr = buf_top = buf_base = 0; len = dlci = 0; skb = NULL; master = NULL; cmd = NULL; pbufi = NULL; pbuf = NULL; spin_lock_irqsave(&sdla_lock, flags); switch (flp->type) { case SDLA_S502A: case SDLA_S502E: cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK)); SDLA_WINDOW(dev, SDLA_502_RCV_BUF); success = cmd->opp_flag; if (!success) break; dlci = cmd->dlci; len = cmd->length; break; case SDLA_S508: pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK)); SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO); pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK)); success = pbuf->opp_flag; if (!success) break; buf_top = pbufi->buf_top; buf_base = pbufi->buf_base; dlci = pbuf->dlci; len = pbuf->length; addr = pbuf->buf_addr; break; } /* common code, find the DLCI and get the SKB */ if (success) { for (i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i] == dlci) break; if (i == CONFIG_DLCI_MAX) { netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n", dlci); dev->stats.rx_errors++; success = 0; } } if (success) { master = flp->master[i]; skb = dev_alloc_skb(len + sizeof(struct frhdr)); if (skb == NULL) { netdev_notice(dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; success = 0; } else skb_reserve(skb, sizeof(struct frhdr)); } /* pick up the data */ switch (flp->type) { case SDLA_S502A: case SDLA_S502E: if (success) __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len); SDLA_WINDOW(dev, SDLA_502_RCV_BUF); cmd->opp_flag = 0; break; case SDLA_S508: if (success) { /* is this buffer split off the end of the internal ring buffer */ split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0; len2 = len - split; __sdla_read(dev, addr, skb_put(skb, len2), len2); if (split) __sdla_read(dev, buf_base, skb_put(skb, split), split); } /* increment the buffer we're looking at */ SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO); flp->buffer = (flp->buffer + 1) % pbufi->rse_num; pbuf->opp_flag = 0; break; } if (success) { dev->stats.rx_packets++; dlp = netdev_priv(master); (*dlp->receive)(skb, master); } spin_unlock_irqrestore(&sdla_lock, flags); } static irqreturn_t sdla_isr(int dummy, void *dev_id) { struct net_device *dev; struct frad_local *flp; char byte; dev = dev_id; flp = netdev_priv(dev); if (!flp->initialized) { netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq); return IRQ_NONE; } byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE); switch (byte) { case SDLA_INTR_RX: sdla_receive(dev); break; /* the command will get an error return, which is processed above */ case SDLA_INTR_MODEM: case SDLA_INTR_STATUS: sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL); break; case SDLA_INTR_TX: case SDLA_INTR_COMPLETE: case SDLA_INTR_TIMER: netdev_warn(dev, "invalid irq flag 0x%02X\n", byte); break; } /* the S502E requires a manual acknowledgement of the interrupt */ if (flp->type == SDLA_S502E) { flp->state &= ~SDLA_S502E_INTACK; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); flp->state |= SDLA_S502E_INTACK; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); } /* this clears the byte, informing the Z80 we're done */ byte = 0; sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte)); return IRQ_HANDLED; } static void sdla_poll(unsigned long device) { struct net_device *dev; struct frad_local *flp; dev = (struct net_device *) device; flp = netdev_priv(dev); if (sdla_byte(dev, SDLA_502_RCV_BUF)) sdla_receive(dev); flp->timer.expires = 1; add_timer(&flp->timer); } static int sdla_close(struct net_device *dev) { struct frad_local *flp; struct intr_info intr; int len, i; short dlcis[CONFIG_DLCI_MAX]; flp = netdev_priv(dev); len = 0; for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i]) dlcis[len++] = abs(flp->dlci[i]); len *= 2; if (flp->config.station == FRAD_STATION_NODE) { for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i] > 0) sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL); sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL); } memset(&intr, 0, sizeof(intr)); /* let's start up the reception */ switch(flp->type) { case SDLA_S502A: del_timer(&flp->timer); break; case SDLA_S502E: sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL); flp->state &= ~SDLA_S502E_INTACK; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; case SDLA_S507: break; case SDLA_S508: sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL); flp->state &= ~SDLA_S508_INTEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); break; } sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); netif_stop_queue(dev); return 0; } struct conf_data { struct frad_conf config; short dlci[CONFIG_DLCI_MAX]; }; static int sdla_open(struct net_device *dev) { struct frad_local *flp; struct dlci_local *dlp; struct conf_data data; struct intr_info intr; int len, i; char byte; flp = netdev_priv(dev); if (!flp->initialized) return -EPERM; if (!flp->configured) return -EPERM; /* time to send in the configuration */ len = 0; for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i]) data.dlci[len++] = abs(flp->dlci[i]); len *= 2; memcpy(&data.config, &flp->config, sizeof(struct frad_conf)); len += sizeof(struct frad_conf); sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); if (flp->type == SDLA_S508) flp->buffer = 0; sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); /* let's start up the reception */ memset(&intr, 0, sizeof(intr)); switch(flp->type) { case SDLA_S502A: flp->timer.expires = 1; add_timer(&flp->timer); break; case SDLA_S502E: flp->state |= SDLA_S502E_ENABLE; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); flp->state |= SDLA_S502E_INTACK; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); byte = 0; sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte)); intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM; sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL); break; case SDLA_S507: break; case SDLA_S508: flp->state |= SDLA_S508_INTEN; outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); byte = 0; sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte)); intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM; intr.irq = dev->irq; sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL); break; } if (flp->config.station == FRAD_STATION_CPE) { byte = SDLA_ICS_STATUS_ENQ; sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL); } else { sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL); for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i] > 0) sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL); } /* configure any specific DLCI settings */ for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i]) { dlp = netdev_priv(flp->master[i]); if (dlp->configured) sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL); } netif_start_queue(dev); return 0; } static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get) { struct frad_local *flp; struct conf_data data; int i; short size; if (dev->type == 0xFFFF) return -EUNATCH; flp = netdev_priv(dev); if (!get) { if (netif_running(dev)) return -EBUSY; if(copy_from_user(&data.config, conf, sizeof(struct frad_conf))) return -EFAULT; if (data.config.station & ~FRAD_STATION_NODE) return -EINVAL; if (data.config.flags & ~FRAD_VALID_FLAGS) return -EINVAL; if ((data.config.kbaud < 0) || ((data.config.kbaud > 128) && (flp->type != SDLA_S508))) return -EINVAL; if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232)) return -EINVAL; if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU)) return -EINVAL; if ((data.config.T391 < 5) || (data.config.T391 > 30)) return -EINVAL; if ((data.config.T392 < 5) || (data.config.T392 > 30)) return -EINVAL; if ((data.config.N391 < 1) || (data.config.N391 > 255)) return -EINVAL; if ((data.config.N392 < 1) || (data.config.N392 > 10)) return -EINVAL; if ((data.config.N393 < 1) || (data.config.N393 > 10)) return -EINVAL; memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); flp->config.flags |= SDLA_DIRECT_RECV; if (flp->type == SDLA_S508) flp->config.flags |= SDLA_TX70_RX30; if (dev->mtu != flp->config.mtu) { /* this is required to change the MTU */ dev->mtu = flp->config.mtu; for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->master[i]) flp->master[i]->mtu = flp->config.mtu; } flp->config.mtu += sizeof(struct frhdr); /* off to the races! */ if (!flp->configured) sdla_start(dev); flp->configured = 1; } else { /* no sense reading if the CPU isn't started */ if (netif_running(dev)) { size = sizeof(data); if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK) return -EIO; } else if (flp->configured) memcpy(&data.config, &flp->config, sizeof(struct frad_conf)); else memset(&data.config, 0, sizeof(struct frad_conf)); memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); data.config.flags &= FRAD_VALID_FLAGS; data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu; return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0; } return 0; } static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read) { struct sdla_mem mem; char *temp; if(copy_from_user(&mem, info, sizeof(mem))) return -EFAULT; if (read) { temp = kzalloc(mem.len, GFP_KERNEL); if (!temp) return -ENOMEM; sdla_read(dev, mem.addr, temp, mem.len); if(copy_to_user(mem.data, temp, mem.len)) { kfree(temp); return -EFAULT; } kfree(temp); } else { temp = memdup_user(mem.data, mem.len); if (IS_ERR(temp)) return PTR_ERR(temp); sdla_write(dev, mem.addr, temp, mem.len); kfree(temp); } return 0; } static int sdla_reconfig(struct net_device *dev) { struct frad_local *flp; struct conf_data data; int i, len; flp = netdev_priv(dev); len = 0; for(i=0;i<CONFIG_DLCI_MAX;i++) if (flp->dlci[i]) data.dlci[len++] = flp->dlci[i]; len *= 2; memcpy(&data, &flp->config, sizeof(struct frad_conf)); len += sizeof(struct frad_conf); sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); return 0; } static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct frad_local *flp; if(!capable(CAP_NET_ADMIN)) return -EPERM; flp = netdev_priv(dev); if (!flp->initialized) return -EINVAL; switch (cmd) { case FRAD_GET_CONF: case FRAD_SET_CONF: return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF); case SDLA_IDENTIFY: ifr->ifr_flags = flp->type; break; case SDLA_CPUSPEED: return sdla_cpuspeed(dev, ifr); /* ========================================================== NOTE: This is rather a useless action right now, as the current driver does not support protocols other than FR. However, Sangoma has modules for a number of other protocols in the works. ============================================================*/ case SDLA_PROTOCOL: if (flp->configured) return -EALREADY; switch (ifr->ifr_flags) { case ARPHRD_FRAD: dev->type = ifr->ifr_flags; break; default: return -ENOPROTOOPT; } break; case SDLA_CLEARMEM: sdla_clear(dev); break; case SDLA_WRITEMEM: case SDLA_READMEM: if(!capable(CAP_SYS_RAWIO)) return -EPERM; return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM); case SDLA_START: sdla_start(dev); break; case SDLA_STOP: sdla_stop(dev); break; default: return -EOPNOTSUPP; } return 0; } static int sdla_change_mtu(struct net_device *dev, int new_mtu) { struct frad_local *flp; flp = netdev_priv(dev); if (netif_running(dev)) return -EBUSY; /* for now, you can't change the MTU! */ return -EOPNOTSUPP; } static int sdla_set_config(struct net_device *dev, struct ifmap *map) { struct frad_local *flp; int i; char byte; unsigned base; int err = -EINVAL; flp = netdev_priv(dev); if (flp->initialized) return -EINVAL; for(i=0; i < ARRAY_SIZE(valid_port); i++) if (valid_port[i] == map->base_addr) break; if (i == ARRAY_SIZE(valid_port)) return -EINVAL; if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ pr_warn("io-port 0x%04lx in use\n", dev->base_addr); return -EINVAL; } base = map->base_addr; /* test for card types, S502A, S502E, S507, S508 */ /* these tests shut down the card completely, so clear the state */ flp->type = SDLA_UNKNOWN; flp->state = 0; for(i=1;i<SDLA_IO_EXTENTS;i++) if (inb(base + i) != 0xFF) break; if (i == SDLA_IO_EXTENTS) { outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL); if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) { outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL); if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) { outb(SDLA_HALT, base + SDLA_REG_CONTROL); flp->type = SDLA_S502E; goto got_type; } } } for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++) if (inb(base + i) != byte) break; if (i == SDLA_IO_EXTENTS) { outb(SDLA_HALT, base + SDLA_REG_CONTROL); if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) { outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL); if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) { outb(SDLA_HALT, base + SDLA_REG_CONTROL); flp->type = SDLA_S507; goto got_type; } } } outb(SDLA_HALT, base + SDLA_REG_CONTROL); if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) { outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL); if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) { outb(SDLA_HALT, base + SDLA_REG_CONTROL); flp->type = SDLA_S508; goto got_type; } } outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL); if (inb(base + SDLA_S502_STS) == 0x40) { outb(SDLA_S502A_START, base + SDLA_REG_CONTROL); if (inb(base + SDLA_S502_STS) == 0x40) { outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL); if (inb(base + SDLA_S502_STS) == 0x44) { outb(SDLA_S502A_START, base + SDLA_REG_CONTROL); flp->type = SDLA_S502A; goto got_type; } } } netdev_notice(dev, "Unknown card type\n"); err = -ENODEV; goto fail; got_type: switch(base) { case 0x270: case 0x280: case 0x380: case 0x390: if (flp->type != SDLA_S508 && flp->type != SDLA_S507) goto fail; } switch (map->irq) { case 2: if (flp->type != SDLA_S502E) goto fail; break; case 10: case 11: case 12: case 15: case 4: if (flp->type != SDLA_S508 && flp->type != SDLA_S507) goto fail; break; case 3: case 5: case 7: if (flp->type == SDLA_S502A) goto fail; break; default: goto fail; } err = -EAGAIN; if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev)) goto fail; if (flp->type == SDLA_S507) { switch(dev->irq) { case 3: flp->state = SDLA_S507_IRQ3; break; case 4: flp->state = SDLA_S507_IRQ4; break; case 5: flp->state = SDLA_S507_IRQ5; break; case 7: flp->state = SDLA_S507_IRQ7; break; case 10: flp->state = SDLA_S507_IRQ10; break; case 11: flp->state = SDLA_S507_IRQ11; break; case 12: flp->state = SDLA_S507_IRQ12; break; case 15: flp->state = SDLA_S507_IRQ15; break; } } for(i=0; i < ARRAY_SIZE(valid_mem); i++) if (valid_mem[i] == map->mem_start) break; err = -EINVAL; if (i == ARRAY_SIZE(valid_mem)) goto fail2; if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E) goto fail2; if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B) goto fail2; if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D) goto fail2; byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0; byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0)); switch(flp->type) { case SDLA_S502A: case SDLA_S502E: switch (map->mem_start >> 16) { case 0x0A: byte |= SDLA_S502_SEG_A; break; case 0x0C: byte |= SDLA_S502_SEG_C; break; case 0x0D: byte |= SDLA_S502_SEG_D; break; case 0x0E: byte |= SDLA_S502_SEG_E; break; } break; case SDLA_S507: switch (map->mem_start >> 16) { case 0x0A: byte |= SDLA_S507_SEG_A; break; case 0x0B: byte |= SDLA_S507_SEG_B; break; case 0x0C: byte |= SDLA_S507_SEG_C; break; case 0x0E: byte |= SDLA_S507_SEG_E; break; } break; case SDLA_S508: switch (map->mem_start >> 16) { case 0x0A: byte |= SDLA_S508_SEG_A; break; case 0x0C: byte |= SDLA_S508_SEG_C; break; case 0x0D: byte |= SDLA_S508_SEG_D; break; case 0x0E: byte |= SDLA_S508_SEG_E; break; } break; } /* set the memory bits, and enable access */ outb(byte, base + SDLA_REG_PC_WINDOW); switch(flp->type) { case SDLA_S502E: flp->state = SDLA_S502E_ENABLE; break; case SDLA_S507: flp->state |= SDLA_MEMEN; break; case SDLA_S508: flp->state = SDLA_MEMEN; break; } outb(flp->state, base + SDLA_REG_CONTROL); dev->irq = map->irq; dev->base_addr = base; dev->mem_start = map->mem_start; dev->mem_end = dev->mem_start + 0x2000; flp->initialized = 1; return 0; fail2: free_irq(map->irq, dev); fail: release_region(base, SDLA_IO_EXTENTS); return err; } static const struct net_device_ops sdla_netdev_ops = { .ndo_open = sdla_open, .ndo_stop = sdla_close, .ndo_do_ioctl = sdla_ioctl, .ndo_set_config = sdla_set_config, .ndo_start_xmit = sdla_transmit, .ndo_change_mtu = sdla_change_mtu, }; static void setup_sdla(struct net_device *dev) { struct frad_local *flp = netdev_priv(dev); netdev_boot_setup_check(dev); dev->netdev_ops = &sdla_netdev_ops; dev->flags = 0; dev->type = 0xFFFF; dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = SDLA_MAX_MTU; flp->activate = sdla_activate; flp->deactivate = sdla_deactivate; flp->assoc = sdla_assoc; flp->deassoc = sdla_deassoc; flp->dlci_conf = sdla_dlci_conf; init_timer(&flp->timer); flp->timer.expires = 1; flp->timer.data = (unsigned long) dev; flp->timer.function = sdla_poll; } static struct net_device *sdla; static int __init init_sdla(void) { int err; printk("%s.\n", version); sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla); if (!sdla) return -ENOMEM; err = register_netdev(sdla); if (err) free_netdev(sdla); return err; } static void __exit exit_sdla(void) { struct frad_local *flp = netdev_priv(sdla); unregister_netdev(sdla); if (flp->initialized) { free_irq(sdla->irq, sdla); release_region(sdla->base_addr, SDLA_IO_EXTENTS); } del_timer_sync(&flp->timer); free_netdev(sdla); } MODULE_LICENSE("GPL"); module_init(init_sdla); module_exit(exit_sdla);
gpl-2.0
corcor67/SMPL_M8_SENSE
arch/m32r/platforms/oaks32r/setup.c
9028
2713
/* * linux/arch/m32r/platforms/oaks32r/setup.c * * Setup routines for OAKS32R Board * * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/m32r.h> #include <asm/io.h> #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[NR_IRQS]; static void disable_oaks32r_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_oaks32r_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_oaks32r(struct irq_data *data) { disable_oaks32r_irq(data->irq); } static void unmask_oaks32r(struct irq_data *data) { enable_oaks32r_irq(data->irq); } static void shutdown_oaks32r(struct irq_data *data) { unsigned long port; port = irq2port(data->irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip oaks32r_irq_type = { .name = "OAKS32R-IRQ", .irq_shutdown = shutdown_oaks32r, .irq_mask = mask_oaks32r, .irq_unmask = unmask_oaks32r, }; void __init init_IRQ(void) { static int once = 0; if (once) return; else once++; #ifdef CONFIG_NE2000 /* INT3 : LAN controller (RTL8019AS) */ irq_set_chip_and_handler(M32R_IRQ_INT3, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_oaks32r_irq(M32R_IRQ_INT3); #endif /* CONFIG_M32R_NE2000 */ /* MFT2 : system timer */ irq_set_chip_and_handler(M32R_IRQ_MFT2, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_oaks32r_irq(M32R_IRQ_MFT2); #ifdef CONFIG_SERIAL_M32R_SIO /* SIO0_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_oaks32r_irq(M32R_IRQ_SIO0_R); /* SIO0_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_oaks32r_irq(M32R_IRQ_SIO0_S); /* SIO1_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_oaks32r_irq(M32R_IRQ_SIO1_R); /* SIO1_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &oaks32r_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_oaks32r_irq(M32R_IRQ_SIO1_S); #endif /* CONFIG_SERIAL_M32R_SIO */ }
gpl-2.0
oppo-source/Find7-5.0-kernel-source
drivers/video/console/font_sun8x16.c
10052
22584
#include <linux/font.h> #define FONTDATAMAX 4096 static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x6c,0xfe,0xfe,0xfe,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x7c,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x3c,0x3c,0xe7,0xe7,0xe7,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x3c,0x7e,0xff,0xff,0x7e,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc3,0xc3,0xe7,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x00,0x00,0x00,0x3c,0x66,0x42,0x42,0x66,0x3c,0x00,0x00,0x00,0x00,0x00, /* */ 0xff,0xff,0xff,0xff,0xff,0xc3,0x99,0xbd,0xbd,0x99,0xc3,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x1e,0x0e,0x1a,0x32,0x78,0xcc,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3c,0x66,0x66,0x66,0x66,0x3c,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3f,0x33,0x3f,0x30,0x30,0x30,0x30,0x70,0xf0,0xe0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7f,0x63,0x7f,0x63,0x63,0x63,0x63,0x67,0xe7,0xe6,0xc0,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x18,0xdb,0x3c,0xe7,0x3c,0xdb,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x80,0xc0,0xe0,0xf0,0xf8,0xfe,0xf8,0xf0,0xe0,0xc0,0x80,0x00,0x00,0x00,0x00, /* */ 0x00,0x02,0x06,0x0e,0x1e,0x3e,0xfe,0x3e,0x1e,0x0e,0x06,0x02,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x66,0x66,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7f,0xdb,0xdb,0xdb,0x7b,0x1b,0x1b,0x1b,0x1b,0x1b,0x00,0x00,0x00,0x00, /* */ 0x00,0x7c,0xc6,0x60,0x38,0x6c,0xc6,0xc6,0x6c,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xfe,0xfe,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x18,0x0c,0xfe,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x30,0x60,0xfe,0x60,0x30,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xc0,0xc0,0xfe,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x24,0x66,0xff,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x38,0x7c,0x7c,0xfe,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0xfe,0x7c,0x7c,0x38,0x38,0x10,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*!*/ 0x00,0x00,0x18,0x3c,0x3c,0x3c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /*"*/ 0x00,0x66,0x66,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*#*/ 0x00,0x00,0x00,0x6c,0x6c,0xfe,0x6c,0x6c,0x6c,0xfe,0x6c,0x6c,0x00,0x00,0x00,0x00, /*$*/ 0x18,0x18,0x7c,0xc6,0xc2,0xc0,0x7c,0x06,0x06,0x86,0xc6,0x7c,0x18,0x18,0x00,0x00, /*%*/ 0x00,0x00,0x00,0x00,0xc2,0xc6,0x0c,0x18,0x30,0x60,0xc6,0x86,0x00,0x00,0x00,0x00, /*&*/ 0x00,0x00,0x38,0x6c,0x6c,0x38,0x76,0xdc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*'*/ 0x00,0x30,0x30,0x30,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*(*/ 0x00,0x00,0x0c,0x18,0x30,0x30,0x30,0x30,0x30,0x30,0x18,0x0c,0x00,0x00,0x00,0x00, /*)*/ 0x00,0x00,0x30,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x30,0x00,0x00,0x00,0x00, /***/ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0xff,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, /*+*/ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /*,*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x18,0x30,0x00,0x00,0x00, /*-*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*.*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x02,0x06,0x0c,0x18,0x30,0x60,0xc0,0x80,0x00,0x00,0x00,0x00, /*0*/ 0x00,0x00,0x7c,0xc6,0xc6,0xce,0xde,0xf6,0xe6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*1*/ 0x00,0x00,0x18,0x38,0x78,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x00,0x00,0x00,0x00, /*2*/ 0x00,0x00,0x7c,0xc6,0x06,0x0c,0x18,0x30,0x60,0xc0,0xc6,0xfe,0x00,0x00,0x00,0x00, /*3*/ 0x00,0x00,0x7c,0xc6,0x06,0x06,0x3c,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00, /*4*/ 0x00,0x00,0x0c,0x1c,0x3c,0x6c,0xcc,0xfe,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00, /*5*/ 0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xfc,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00, /*6*/ 0x00,0x00,0x38,0x60,0xc0,0xc0,0xfc,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*7*/ 0x00,0x00,0xfe,0xc6,0x06,0x06,0x0c,0x18,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, /*8*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7c,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*9*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7e,0x06,0x06,0x06,0x0c,0x78,0x00,0x00,0x00,0x00, /*:*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, /*;*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x30,0x00,0x00,0x00,0x00, /*<*/ 0x00,0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, /*=*/ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*>*/ 0x00,0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, /*?*/ 0x00,0x00,0x7c,0xc6,0xc6,0x0c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /*@*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xde,0xde,0xde,0xdc,0xc0,0x7c,0x00,0x00,0x00,0x00, /*A*/ 0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*B*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x66,0x66,0x66,0x66,0xfc,0x00,0x00,0x00,0x00, /*C*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x00,0x00,0x00,0x00, /*D*/ 0x00,0x00,0xf8,0x6c,0x66,0x66,0x66,0x66,0x66,0x66,0x6c,0xf8,0x00,0x00,0x00,0x00, /*E*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00, /*F*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*G*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xde,0xc6,0xc6,0x66,0x3a,0x00,0x00,0x00,0x00, /*H*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*I*/ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*J*/ 0x00,0x00,0x1e,0x0c,0x0c,0x0c,0x0c,0x0c,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00, /*K*/ 0x00,0x00,0xe6,0x66,0x66,0x6c,0x78,0x78,0x6c,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*L*/ 0x00,0x00,0xf0,0x60,0x60,0x60,0x60,0x60,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00, /*M*/ 0x00,0x00,0xc3,0xe7,0xff,0xff,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3,0x00,0x00,0x00,0x00, /*N*/ 0x00,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*O*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*P*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*Q*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xd6,0xde,0x7c,0x0c,0x0e,0x00,0x00, /*R*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x6c,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*S*/ 0x00,0x00,0x7c,0xc6,0xc6,0x60,0x38,0x0c,0x06,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*T*/ 0x00,0x00,0xff,0xdb,0x99,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*U*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*V*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00, /*W*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x66,0x00,0x00,0x00,0x00, /*X*/ 0x00,0x00,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x3c,0x66,0xc3,0xc3,0x00,0x00,0x00,0x00, /*Y*/ 0x00,0x00,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*Z*/ 0x00,0x00,0xff,0xc3,0x86,0x0c,0x18,0x30,0x60,0xc1,0xc3,0xff,0x00,0x00,0x00,0x00, /*[*/ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, /*\*/ 0x00,0x00,0x00,0x80,0xc0,0xe0,0x70,0x38,0x1c,0x0e,0x06,0x02,0x00,0x00,0x00,0x00, /*]*/ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, /*^*/ 0x10,0x38,0x6c,0xc6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*_*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, /*`*/ 0x30,0x30,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*a*/ 0x00,0x00,0x00,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*b*/ 0x00,0x00,0xe0,0x60,0x60,0x78,0x6c,0x66,0x66,0x66,0x66,0x7c,0x00,0x00,0x00,0x00, /*c*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc0,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /*d*/ 0x00,0x00,0x1c,0x0c,0x0c,0x3c,0x6c,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*e*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /*f*/ 0x00,0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*g*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0xcc,0x78,0x00, /*h*/ 0x00,0x00,0xe0,0x60,0x60,0x6c,0x76,0x66,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*i*/ 0x00,0x00,0x18,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*j*/ 0x00,0x00,0x06,0x06,0x00,0x0e,0x06,0x06,0x06,0x06,0x06,0x06,0x66,0x66,0x3c,0x00, /*k*/ 0x00,0x00,0xe0,0x60,0x60,0x66,0x6c,0x78,0x78,0x6c,0x66,0xe6,0x00,0x00,0x00,0x00, /*l*/ 0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*m*/ 0x00,0x00,0x00,0x00,0x00,0xe6,0xff,0xdb,0xdb,0xdb,0xdb,0xdb,0x00,0x00,0x00,0x00, /*n*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00, /*o*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*p*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xf0,0x00, /*q*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0x0c,0x1e,0x00, /*r*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x76,0x66,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*s*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0x60,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,0x00, /*t*/ 0x00,0x00,0x10,0x30,0x30,0xfc,0x30,0x30,0x30,0x30,0x36,0x1c,0x00,0x00,0x00,0x00, /*u*/ 0x00,0x00,0x00,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*v*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00, /*w*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x00,0x00,0x00,0x00, /*x*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0x66,0x3c,0x18,0x3c,0x66,0xc3,0x00,0x00,0x00,0x00, /*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00, /*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00, /*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00, /*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00, /*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x0c,0x06,0x7c,0x00,0x00, /* */ 0x00,0x00,0xcc,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x0c,0x18,0x30,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xcc,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x38,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x3c,0x66,0x60,0x60,0x66,0x3c,0x0c,0x06,0x3c,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x66,0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x3c,0x66,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0xc6,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x38,0x6c,0x38,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x18,0x30,0x60,0x00,0xfe,0x66,0x60,0x7c,0x60,0x60,0x66,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x1b,0x7e,0xd8,0xdc,0x77,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3e,0x6c,0xcc,0xcc,0xfe,0xcc,0xcc,0xcc,0xcc,0xce,0x00,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x30,0x78,0xcc,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0x78,0x00, /* */ 0x00,0xc6,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0xc6,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x18,0x7e,0xc3,0xc0,0xc0,0xc0,0xc3,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xe6,0xfc,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc3,0x66,0x3c,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0xfc,0x66,0x66,0x7c,0x62,0x66,0x6f,0x66,0x66,0x66,0xf3,0x00,0x00,0x00,0x00, /* */ 0x00,0x0e,0x1b,0x18,0x18,0x18,0x7e,0x18,0x18,0x18,0x18,0x18,0xd8,0x70,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x0c,0x18,0x30,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x76,0xdc,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00, /* */ 0x76,0xdc,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x00,0x3c,0x6c,0x6c,0x3e,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x7c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x60,0xc0,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x60,0xce,0x9b,0x06,0x0c,0x1f,0x00,0x00, /* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x66,0xce,0x96,0x3e,0x06,0x06,0x00,0x00, /* */ 0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x3c,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x36,0x6c,0xd8,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xd8,0x6c,0x36,0x6c,0xd8,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44, /* */ 0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa, /* */ 0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x3f,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3f,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0, /* */ 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0xd8,0xd8,0xd8,0xdc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x78,0xcc,0xcc,0xcc,0xd8,0xcc,0xc6,0xc6,0xc6,0xcc,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xfe,0xc6,0xc6,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0xfe,0xc6,0x60,0x30,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xd8,0xd8,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xc0,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x76,0xdc,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x7e,0x18,0x3c,0x66,0x66,0x66,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0x6c,0x38,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x38,0x6c,0xc6,0xc6,0xc6,0x6c,0x6c,0x6c,0x6c,0xee,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x1e,0x30,0x18,0x0c,0x3e,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xdb,0xdb,0xdb,0x7e,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x03,0x06,0x7e,0xdb,0xdb,0xf3,0x7e,0x60,0xc0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x1c,0x30,0x60,0x60,0x7c,0x60,0x60,0x60,0x30,0x1c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0xff,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x00,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x00,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x0e,0x1b,0x1b,0x1b,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x0f,0x0c,0x0c,0x0c,0x0c,0x0c,0xec,0x6c,0x6c,0x3c,0x1c,0x00,0x00,0x00,0x00, /* */ 0x00,0xd8,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, }; const struct font_desc font_sun_8x16 = { .idx = SUN8x16_IDX, .name = "SUN8x16", .width = 8, .height = 16, .data = fontdata_sun8x16, #ifdef __sparc__ .pref = 10, #else .pref = -1, #endif };
gpl-2.0
PatrikKT/android_kernel_htc_a31ul
drivers/misc/sgi-xp/xpc_sn2.c
13892
69998
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition Communication (XPC) sn2-based functions. * * Architecture specific implementation of common functions. * */ #include <linux/delay.h> #include <linux/slab.h> #include <asm/uncached.h> #include <asm/sn/mspec.h> #include <asm/sn/sn_sal.h> #include "xpc.h" /* * Define the number of u64s required to represent all the C-brick nasids * as a bitmap. The cross-partition kernel modules deal only with * C-brick nasids, thus the need for bitmaps which don't account for * odd-numbered (non C-brick) nasids. */ #define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2) #define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8) #define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64) /* * Memory for XPC's amo variables is allocated by the MSPEC driver. These * pages are located in the lowest granule. The lowest granule uses 4k pages * for cached references and an alternate TLB handler to never provide a * cacheable mapping for the entire region. This will prevent speculative * reading of cached copies of our lines from being issued which will cause * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote * partitions (i.e., XPCs) consider themselves currently engaged with the * local XPC and 1 amo variable to request partition deactivation. */ #define XPC_NOTIFY_IRQ_AMOS_SN2 0 #define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \ XP_MAX_NPARTITIONS_SN2) #define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \ XP_NASID_MASK_WORDS_SN2) #define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1) /* * Buffer used to store a local copy of portions of a remote partition's * reserved page (either its header and part_nasids mask, or its vars). */ static void *xpc_remote_copy_buffer_base_sn2; static char *xpc_remote_copy_buffer_sn2; static struct xpc_vars_sn2 *xpc_vars_sn2; static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; static int xpc_setup_partitions_sn2(void) { /* nothing needs to be done */ return 0; } static void xpc_teardown_partitions_sn2(void) { /* nothing needs to be done */ } /* SH_IPI_ACCESS shub register value on startup */ static u64 xpc_sh1_IPI_access_sn2; static u64 xpc_sh2_IPI_access0_sn2; static u64 xpc_sh2_IPI_access1_sn2; static u64 xpc_sh2_IPI_access2_sn2; static u64 xpc_sh2_IPI_access3_sn2; /* * Change protections to allow IPI operations. */ static void xpc_allow_IPI_ops_sn2(void) { int node; int nasid; /* !!! The following should get moved into SAL. */ if (is_shub2()) { xpc_sh2_IPI_access0_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); xpc_sh2_IPI_access1_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); xpc_sh2_IPI_access2_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); xpc_sh2_IPI_access3_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), -1UL); } } else { xpc_sh1_IPI_access_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), -1UL); } } } /* * Restrict protections to disallow IPI operations. */ static void xpc_disallow_IPI_ops_sn2(void) { int node; int nasid; /* !!! The following should get moved into SAL. */ if (is_shub2()) { for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), xpc_sh2_IPI_access0_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), xpc_sh2_IPI_access1_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), xpc_sh2_IPI_access2_sn2); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), xpc_sh2_IPI_access3_sn2); } } else { for_each_online_node(node) { nasid = cnodeid_to_nasid(node); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), xpc_sh1_IPI_access_sn2); } } } /* * The following set of functions are used for the sending and receiving of * IRQs (also known as IPIs). There are two flavors of IRQs, one that is * associated with partition activity (SGI_XPC_ACTIVATE) and the other that * is associated with channel activity (SGI_XPC_NOTIFY). */ static u64 xpc_receive_IRQ_amo_sn2(struct amo *amo) { return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); } static enum xp_retval xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid, int vector) { int ret = 0; unsigned long irq_flags; local_irq_save(irq_flags); FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag); sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); return (ret == 0) ? xpSuccess : xpPioReadError; } static struct amo * xpc_init_IRQ_amo_sn2(int index) { struct amo *amo = xpc_vars_sn2->amos_page + index; (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */ return amo; } /* * Functions associated with SGI_XPC_ACTIVATE IRQ. */ /* * Notify the heartbeat check thread that an activate IRQ has been received. */ static irqreturn_t xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) { unsigned long irq_flags; spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); xpc_activate_IRQ_rcvd++; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); return IRQ_HANDLED; } /* * Flag the appropriate amo variable and send an IRQ to the specified node. */ static void xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, int to_nasid, int to_phys_cpuid) { struct amo *amos = (struct amo *)__va(amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)], BIT_MASK(from_nasid / 2), to_nasid, to_phys_cpuid, SGI_XPC_ACTIVATE); } static void xpc_send_local_activate_IRQ_sn2(int from_nasid) { unsigned long irq_flags; struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); /* fake the sending and receipt of an activate IRQ from remote nasid */ FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), FETCHOP_OR, BIT_MASK(from_nasid / 2)); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); xpc_activate_IRQ_rcvd++; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); } /* * Functions associated with SGI_XPC_NOTIFY IRQ. */ /* * Check to see if any chctl flags were sent from the specified partition. */ static void xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) { union xpc_channel_ctl_flags chctl; unsigned long irq_flags; chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. local_chctl_amo_va); if (chctl.all_flags == 0) return; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.all_flags |= chctl.all_flags; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags=" "0x%llx\n", XPC_PARTID(part), chctl.all_flags); xpc_wakeup_channel_mgr(part); } /* * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more * than one partition, we use an amo structure per partition to indicate * whether a partition has sent an IRQ or not. If it has, then wake up the * associated kthread to handle it. * * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC * running on other partitions. * * Noteworthy Arguments: * * irq - Interrupt ReQuest number. NOT USED. * * dev_id - partid of IRQ's potential sender. */ static irqreturn_t xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) { short partid = (short)(u64)dev_id; struct xpc_partition *part = &xpc_partitions[partid]; DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2); if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); xpc_part_deref(part); } return IRQ_HANDLED; } /* * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor * because the write to their associated amo variable completed after the IRQ * was received. */ static void xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); part_sn2->dropped_notify_IRQ_timer.expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(&part_sn2->dropped_notify_IRQ_timer); xpc_part_deref(part); } } /* * Send a notify IRQ to the remote partition that is associated with the * specified channel. */ static void xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, char *chctl_flag_string, unsigned long *irq_flags) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; union xpc_channel_ctl_flags chctl = { 0 }; enum xp_retval ret; if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) { chctl.flags[ch->number] = chctl_flag; ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va, chctl.all_flags, part_sn2->notify_IRQ_nasid, part_sn2->notify_IRQ_phys_cpuid, SGI_XPC_NOTIFY); dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", chctl_flag_string, ch->partid, ch->number, ret); if (unlikely(ret != xpSuccess)) { if (irq_flags != NULL) spin_unlock_irqrestore(&ch->lock, *irq_flags); XPC_DEACTIVATE_PARTITION(part, ret); if (irq_flags != NULL) spin_lock_irqsave(&ch->lock, *irq_flags); } } } #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \ xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) /* * Make it look like the remote partition, which is associated with the * specified channel, sent us a notify IRQ. This faked IRQ will be handled * by xpc_check_for_dropped_notify_IRQ_sn2(). */ static void xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, char *chctl_flag_string) { struct xpc_partition *part = &xpc_partitions[ch->partid]; union xpc_channel_ctl_flags chctl = { 0 }; chctl.flags[ch->number] = chctl_flag; FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va-> variable), FETCHOP_OR, chctl.all_flags); dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", chctl_flag_string, ch->partid, ch->number); } #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \ xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f) static void xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->reason = ch->reason; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); } static void xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags); } static void xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->entry_size = ch->entry_size; args->local_nentries = ch->local_nentries; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); } static void xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->remote_nentries = ch->remote_nentries; args->local_nentries = ch->local_nentries; args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue); XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); } static void xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags); } static void xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); } static void xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) { XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); } static enum xp_retval xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, unsigned long msgqueue_pa) { ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; return xpSuccess; } /* * This next set of functions are used to keep track of when a partition is * potentially engaged in accessing memory belonging to another partition. */ static void xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); } static void xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); /* * Send activate IRQ to get other side to see that we've cleared our * bit in their engaged partitions amo. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); } static void xpc_assume_partition_disengaged_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* clear bit(s) based on partid mask in our partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(partid)); } static int xpc_partition_engaged_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & BIT(partid)) != 0; } static int xpc_any_partition_engaged_sn2(void) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable */ return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; } /* original protection values for each node */ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; /* * Change protections to allow amo operations on non-Shub 1.1 systems. */ static enum xp_retval xpc_allow_amo_ops_sn2(struct amo *amos_page) { enum xp_retval ret = xpSuccess; /* * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST * collides with memory operations. On those systems we call * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. */ if (!enable_shub_wars_1_1()) ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE); return ret; } /* * Change protections to allow amo operations on Shub 1.1 systems. */ static void xpc_allow_amo_ops_shub_wars_1_1_sn2(void) { int node; int nasid; if (!enable_shub_wars_1_1()) return; for_each_online_node(node) { nasid = cnodeid_to_nasid(node); /* save current protection values */ xpc_prot_vec_sn2[node] = (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0)); /* open up everything */ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQLP_MMR_DIR_PRIVEC0), -1UL); HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_MD_DQRP_MMR_DIR_PRIVEC0), -1UL); } } static enum xp_retval xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len) { s64 status; enum xp_retval ret; status = sn_partition_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, (u64 *)len); if (status == SALRET_OK) ret = xpSuccess; else if (status == SALRET_MORE_PASSES) ret = xpNeedMoreInfo; else ret = xpSalError; return ret; } static int xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp) { struct amo *amos_page; int i; int ret; xpc_vars_sn2 = XPC_RP_VARS(rp); rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2); /* vars_part array follows immediately after vars */ xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE); /* * Before clearing xpc_vars_sn2, see if a page of amos had been * previously allocated. If not we'll need to allocate one and set * permissions so that cross-partition amos are allowed. * * The allocated amo page needs MCA reporting to remain disabled after * XPC has unloaded. To make this work, we keep a copy of the pointer * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure, * which is pointed to by the reserved page, and re-use that saved copy * on subsequent loads of XPC. This amo page is never freed, and its * memory protections are never restricted. */ amos_page = xpc_vars_sn2->amos_page; if (amos_page == NULL) { amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of amos\n"); return -ENOMEM; } /* * Open up amo-R/W to cpu. This is done on Shub 1.1 systems * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called. */ ret = xpc_allow_amo_ops_sn2(amos_page); if (ret != xpSuccess) { dev_err(xpc_part, "can't allow amo operations\n"); uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64)amos_page), 1); return -EPERM; } } /* clear xpc_vars_sn2 */ memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2)); xpc_vars_sn2->version = XPC_V_VERSION; xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2); xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ /* clear xpc_vars_part_sn2 */ memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) * XP_MAX_NPARTITIONS_SN2); /* initialize the activate IRQ related amo variables */ for (i = 0; i < xpc_nasid_mask_nlongs; i++) (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i); /* initialize the engaged remote partitions related amo variables */ (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); return 0; } static int xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask) { return test_bit(partid, heartbeating_to_mask); } static void xpc_allow_hb_sn2(short partid) { DBUG_ON(xpc_vars_sn2 == NULL); set_bit(partid, xpc_vars_sn2->heartbeating_to_mask); } static void xpc_disallow_hb_sn2(short partid) { DBUG_ON(xpc_vars_sn2 == NULL); clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask); } static void xpc_disallow_all_hbs_sn2(void) { DBUG_ON(xpc_vars_sn2 == NULL); bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions); } static void xpc_increment_heartbeat_sn2(void) { xpc_vars_sn2->heartbeat++; } static void xpc_offline_heartbeat_sn2(void) { xpc_increment_heartbeat_sn2(); xpc_vars_sn2->heartbeat_offline = 1; } static void xpc_online_heartbeat_sn2(void) { xpc_increment_heartbeat_sn2(); xpc_vars_sn2->heartbeat_offline = 0; } static void xpc_heartbeat_init_sn2(void) { DBUG_ON(xpc_vars_sn2 == NULL); bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); xpc_online_heartbeat_sn2(); } static void xpc_heartbeat_exit_sn2(void) { xpc_offline_heartbeat_sn2(); } static enum xp_retval xpc_get_remote_heartbeat_sn2(struct xpc_partition *part) { struct xpc_vars_sn2 *remote_vars; enum xp_retval ret; remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; /* pull the remote vars structure that contains the heartbeat */ ret = xp_remote_memcpy(xp_pa(remote_vars), part->sn.sn2.remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) return ret; dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, " "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part), remote_vars->heartbeat, part->last_heartbeat, remote_vars->heartbeat_offline, remote_vars->heartbeating_to_mask[0]); if ((remote_vars->heartbeat == part->last_heartbeat && !remote_vars->heartbeat_offline) || !xpc_hb_allowed_sn2(sn_partition_id, remote_vars->heartbeating_to_mask)) { ret = xpNoHeartbeat; } else { part->last_heartbeat = remote_vars->heartbeat; } return ret; } /* * Get a copy of the remote partition's XPC variables from the reserved page. * * remote_vars points to a buffer that is cacheline aligned for BTE copies and * assumed to be of size XPC_RP_VARS_SIZE. */ static enum xp_retval xpc_get_remote_vars_sn2(unsigned long remote_vars_pa, struct xpc_vars_sn2 *remote_vars) { enum xp_retval ret; if (remote_vars_pa == 0) return xpVarsNotSet; /* pull over the cross partition variables */ ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) return ret; if (XPC_VERSION_MAJOR(remote_vars->version) != XPC_VERSION_MAJOR(XPC_V_VERSION)) { return xpBadVersion; } return xpSuccess; } static void xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, unsigned long remote_rp_pa, int nasid) { xpc_send_local_activate_IRQ_sn2(nasid); } static void xpc_request_partition_reactivation_sn2(struct xpc_partition *part) { xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid); } static void xpc_request_partition_deactivation_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); /* * Send activate IRQ to get other side to see that we've set our * bit in their deactivate request amo. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); } static void xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, ~BIT(sn_partition_id)); /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we * didn't, we'd never know that the other partition is down and would * keep sending IRQs and amos to it until the heartbeat times out. */ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> variable), xp_nofault_PIOR_target)); local_irq_restore(irq_flags); } static int xpc_partition_deactivation_requested_sn2(short partid) { struct amo *amo = xpc_vars_sn2->amos_page + XPC_DEACTIVATE_REQUEST_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & BIT(partid)) != 0; } /* * Update the remote partition's info. */ static void xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, unsigned long *remote_rp_ts_jiffies, unsigned long remote_rp_pa, unsigned long remote_vars_pa, struct xpc_vars_sn2 *remote_vars) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; part->remote_rp_version = remote_rp_version; dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", part->remote_rp_version); part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies; dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n", part->remote_rp_ts_jiffies); part->remote_rp_pa = remote_rp_pa; dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa); part_sn2->remote_vars_pa = remote_vars_pa; dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", part_sn2->remote_vars_pa); part->last_heartbeat = remote_vars->heartbeat - 1; dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n", part->last_heartbeat); part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa; dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", part_sn2->remote_vars_part_pa); part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid; dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n", part_sn2->activate_IRQ_nasid); part_sn2->activate_IRQ_phys_cpuid = remote_vars->activate_IRQ_phys_cpuid; dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n", part_sn2->activate_IRQ_phys_cpuid); part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa; dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", part_sn2->remote_amos_page_pa); part_sn2->remote_vars_version = remote_vars->version; dev_dbg(xpc_part, " remote_vars_version = 0x%x\n", part_sn2->remote_vars_version); } /* * Prior code has determined the nasid which generated a activate IRQ. * Inspect that nasid to determine if its partition needs to be activated * or deactivated. * * A partition is considered "awaiting activation" if our partition * flags indicate it is not active and it has a heartbeat. A * partition is considered "awaiting deactivation" if our partition * flags indicate it is active but it has no heartbeat or it is not * sending its heartbeat to us. * * To determine the heartbeat, the remote nasid must have a properly * initialized reserved page. */ static void xpc_identify_activate_IRQ_req_sn2(int nasid) { struct xpc_rsvd_page *remote_rp; struct xpc_vars_sn2 *remote_vars; unsigned long remote_rp_pa; unsigned long remote_vars_pa; int remote_rp_version; int reactivate = 0; unsigned long remote_rp_ts_jiffies = 0; short partid; struct xpc_partition *part; struct xpc_partition_sn2 *part_sn2; enum xp_retval ret; /* pull over the reserved page structure */ remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); if (ret != xpSuccess) { dev_warn(xpc_part, "unable to get reserved page from nasid %d, " "which sent interrupt, reason=%d\n", nasid, ret); return; } remote_vars_pa = remote_rp->sn.sn2.vars_pa; remote_rp_version = remote_rp->version; remote_rp_ts_jiffies = remote_rp->ts_jiffies; partid = remote_rp->SAL_partid; part = &xpc_partitions[partid]; part_sn2 = &part->sn.sn2; /* pull over the cross partition variables */ remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars); if (ret != xpSuccess) { dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " "which sent interrupt, reason=%d\n", nasid, ret); XPC_DEACTIVATE_PARTITION(part, ret); return; } part->activate_IRQ_rcvd++; dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " "%lld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd, remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); if (xpc_partition_disengaged(part) && part->act_state == XPC_P_AS_INACTIVE) { xpc_update_partition_info_sn2(part, remote_rp_version, &remote_rp_ts_jiffies, remote_rp_pa, remote_vars_pa, remote_vars); if (xpc_partition_deactivation_requested_sn2(partid)) { /* * Other side is waiting on us to deactivate even though * we already have. */ return; } xpc_activate_partition(part); return; } DBUG_ON(part->remote_rp_version == 0); DBUG_ON(part_sn2->remote_vars_version == 0); if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) { /* the other side rebooted */ DBUG_ON(xpc_partition_engaged_sn2(partid)); DBUG_ON(xpc_partition_deactivation_requested_sn2(partid)); xpc_update_partition_info_sn2(part, remote_rp_version, &remote_rp_ts_jiffies, remote_rp_pa, remote_vars_pa, remote_vars); reactivate = 1; } if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) { /* still waiting on other side to disengage from us */ return; } if (reactivate) XPC_DEACTIVATE_PARTITION(part, xpReactivating); else if (xpc_partition_deactivation_requested_sn2(partid)) XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); } /* * Loop through the activation amo variables and process any bits * which are set. Each bit indicates a nasid sending a partition * activation or deactivation request. * * Return #of IRQs detected. */ int xpc_identify_activate_IRQ_sender_sn2(void) { int l; int b; unsigned long nasid_mask_long; u64 nasid; /* remote nasid */ int n_IRQs_detected = 0; struct amo *act_amos; act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; /* scan through activate amo variables looking for non-zero entries */ for (l = 0; l < xpc_nasid_mask_nlongs; l++) { if (xpc_exiting) break; nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]); b = find_first_bit(&nasid_mask_long, BITS_PER_LONG); if (b >= BITS_PER_LONG) { /* no IRQs from nasids in this amo variable */ continue; } dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l, nasid_mask_long); /* * If this nasid has been added to the machine since * our partition was reset, this will retain the * remote nasid in our reserved pages machine mask. * This is used in the event of module reload. */ xpc_mach_nasids[l] |= nasid_mask_long; /* locate the nasid(s) which sent interrupts */ do { n_IRQs_detected++; nasid = (l * BITS_PER_LONG + b) * 2; dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid); xpc_identify_activate_IRQ_req_sn2(nasid); b = find_next_bit(&nasid_mask_long, BITS_PER_LONG, b + 1); } while (b < BITS_PER_LONG); } return n_IRQs_detected; } static void xpc_process_activate_IRQ_rcvd_sn2(void) { unsigned long irq_flags; int n_IRQs_expected; int n_IRQs_detected; spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); n_IRQs_expected = xpc_activate_IRQ_rcvd; xpc_activate_IRQ_rcvd = 0; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); if (n_IRQs_detected < n_IRQs_expected) { /* retry once to help avoid missing amo */ (void)xpc_identify_activate_IRQ_sender_sn2(); } } /* * Setup the channel structures that are sn2 specific. */ static enum xp_retval xpc_setup_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; struct xpc_channel_sn2 *ch_sn2; enum xp_retval retval; int ret; int cpuid; int ch_number; struct timer_list *timer; short partid = XPC_PARTID(part); /* allocate all the required GET/PUT values */ part_sn2->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, &part_sn2->local_GPs_base); if (part_sn2->local_GPs == NULL) { dev_err(xpc_chan, "can't get memory for local get/put " "values\n"); return xpNoMemory; } part_sn2->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, &part_sn2->remote_GPs_base); if (part_sn2->remote_GPs == NULL) { dev_err(xpc_chan, "can't get memory for remote get/put " "values\n"); retval = xpNoMemory; goto out_1; } part_sn2->remote_GPs_pa = 0; /* allocate all the required open and close args */ part_sn2->local_openclose_args = xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, &part_sn2-> local_openclose_args_base); if (part_sn2->local_openclose_args == NULL) { dev_err(xpc_chan, "can't get memory for local connect args\n"); retval = xpNoMemory; goto out_2; } part_sn2->remote_openclose_args_pa = 0; part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); part_sn2->notify_IRQ_nasid = 0; part_sn2->notify_IRQ_phys_cpuid = 0; part_sn2->remote_chctl_amo_va = NULL; sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, IRQF_SHARED, part_sn2->notify_IRQ_owner, (void *)(u64)partid); if (ret != 0) { dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " "errno=%d\n", -ret); retval = xpLackOfResources; goto out_3; } /* Setup a timer to check for dropped notify IRQs */ timer = &part_sn2->dropped_notify_IRQ_timer; init_timer(timer); timer->function = (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; timer->data = (unsigned long)part; timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(timer); for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch_sn2 = &part->channels[ch_number].sn.sn2; ch_sn2->local_GP = &part_sn2->local_GPs[ch_number]; ch_sn2->local_openclose_args = &part_sn2->local_openclose_args[ch_number]; mutex_init(&ch_sn2->msg_to_pull_mutex); } /* * Setup the per partition specific variables required by the * remote partition to establish channel connections with us. * * The setting of the magic # indicates that these per partition * specific variables are ready to be used. */ xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); xpc_vars_part_sn2[partid].openclose_args_pa = xp_pa(part_sn2->local_openclose_args); xpc_vars_part_sn2[partid].chctl_amo_pa = xp_pa(part_sn2->local_chctl_amo_va); cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid); xpc_vars_part_sn2[partid].nchannels = part->nchannels; xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2; return xpSuccess; /* setup of ch structures failed */ out_3: kfree(part_sn2->local_openclose_args_base); part_sn2->local_openclose_args = NULL; out_2: kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; out_1: kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; return retval; } /* * Teardown the channel structures that are sn2 specific. */ static void xpc_teardown_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; short partid = XPC_PARTID(part); /* * Indicate that the variables specific to the remote partition are no * longer available for its use. */ xpc_vars_part_sn2[partid].magic = 0; /* in case we've still got outstanding timers registered... */ del_timer_sync(&part_sn2->dropped_notify_IRQ_timer); free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); kfree(part_sn2->local_openclose_args_base); part_sn2->local_openclose_args = NULL; kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; part_sn2->local_chctl_amo_va = NULL; } /* * Create a wrapper that hides the underlying mechanism for pulling a cacheline * (or multiple cachelines) from a remote partition. * * src_pa must be a cacheline aligned physical address on the remote partition. * dst must be a cacheline aligned virtual address on this partition. * cnt must be cacheline sized */ /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ static enum xp_retval xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, const unsigned long src_pa, size_t cnt) { enum xp_retval ret; DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa)); DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst)); DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); if (part->act_state == XPC_P_AS_DEACTIVATING) return part->reason; ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt); if (ret != xpSuccess) { dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," " ret=%d\n", XPC_PARTID(part), ret); } return ret; } /* * Pull the remote per partition specific variables from the specified * partition. */ static enum xp_retval xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; u8 buffer[L1_CACHE_BYTES * 2]; struct xpc_vars_part_sn2 *pulled_entry_cacheline = (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); struct xpc_vars_part_sn2 *pulled_entry; unsigned long remote_entry_cacheline_pa; unsigned long remote_entry_pa; short partid = XPC_PARTID(part); enum xp_retval ret; /* pull the cacheline that contains the variables we're interested in */ DBUG_ON(part_sn2->remote_vars_part_pa != L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa)); DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); remote_entry_pa = part_sn2->remote_vars_part_pa + sn_partition_id * sizeof(struct xpc_vars_part_sn2); remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline + (remote_entry_pa & (L1_CACHE_BYTES - 1))); ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, remote_entry_cacheline_pa, L1_CACHE_BYTES); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull XPC vars_part from " "partition %d, ret=%d\n", partid, ret); return ret; } /* see if they've been set up yet */ if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 && pulled_entry->magic != XPC_VP_MAGIC2_SN2) { if (pulled_entry->magic != 0) { dev_dbg(xpc_chan, "partition %d's XPC vars_part for " "partition %d has bad magic value (=0x%llx)\n", partid, sn_partition_id, pulled_entry->magic); return xpBadMagic; } /* they've not been initialized yet */ return xpRetry; } if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) { /* validate the variables */ if (pulled_entry->GPs_pa == 0 || pulled_entry->openclose_args_pa == 0 || pulled_entry->chctl_amo_pa == 0) { dev_err(xpc_chan, "partition %d's XPC vars_part for " "partition %d are not valid\n", partid, sn_partition_id); return xpInvalidAddress; } /* the variables we imported look to be valid */ part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; part_sn2->remote_openclose_args_pa = pulled_entry->openclose_args_pa; part_sn2->remote_chctl_amo_va = (struct amo *)__va(pulled_entry->chctl_amo_pa); part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid; part_sn2->notify_IRQ_phys_cpuid = pulled_entry->notify_IRQ_phys_cpuid; if (part->nchannels > pulled_entry->nchannels) part->nchannels = pulled_entry->nchannels; /* let the other side know that we've pulled their variables */ xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2; } if (pulled_entry->magic == XPC_VP_MAGIC1_SN2) return xpRetry; return xpSuccess; } /* * Establish first contact with the remote partititon. This involves pulling * the XPC per partition variables from the remote partition and waiting for * the remote partition to pull ours. */ static enum xp_retval xpc_make_first_contact_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; enum xp_retval ret; /* * Register the remote partition's amos with SAL so it can handle * and cleanup errors within that address range should the remote * partition go down. We don't unregister this range because it is * difficult to tell when outstanding writes to the remote partition * are finished and thus when it is safe to unregister. This should * not result in wasted space in the SAL xp_addr_region table because * we should get the same page for remote_amos_page_pa after module * reloads and system reboots. */ if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa, PAGE_SIZE, 1) < 0) { dev_warn(xpc_part, "xpc_activating(%d) failed to register " "xp_addr region\n", XPC_PARTID(part)); ret = xpPhysAddrRegFailed; XPC_DEACTIVATE_PARTITION(part, ret); return ret; } /* * Send activate IRQ to get other side to activate if they've not * already begun to do so. */ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, cnodeid_to_nasid(0), part_sn2->activate_IRQ_nasid, part_sn2->activate_IRQ_phys_cpuid); while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { if (ret != xpRetry) { XPC_DEACTIVATE_PARTITION(part, ret); return ret; } dev_dbg(xpc_part, "waiting to make first contact with " "partition %d\n", XPC_PARTID(part)); /* wait a 1/4 of a second or so */ (void)msleep_interruptible(250); if (part->act_state == XPC_P_AS_DEACTIVATING) return part->reason; } return xpSuccess; } /* * Get the chctl flags and pull the openclose args and/or remote GPs as needed. */ static u64 xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; union xpc_channel_ctl_flags chctl; enum xp_retval ret; /* * See if there are any chctl flags to be handled. */ spin_lock_irqsave(&part->chctl_lock, irq_flags); chctl = part->chctl; if (chctl.all_flags != 0) part->chctl.all_flags = 0; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); if (xpc_any_openclose_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part-> remote_openclose_args, part_sn2-> remote_openclose_args_pa, XPC_OPENCLOSE_ARGS_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); dev_dbg(xpc_chan, "failed to pull openclose args from " "partition %d, ret=%d\n", XPC_PARTID(part), ret); /* don't bother processing chctl flags anymore */ chctl.all_flags = 0; } } if (xpc_any_msg_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, part_sn2->remote_GPs_pa, XPC_GP_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); dev_dbg(xpc_chan, "failed to pull GPs from partition " "%d, ret=%d\n", XPC_PARTID(part), ret); /* don't bother processing chctl flags anymore */ chctl.all_flags = 0; } } return chctl.all_flags; } /* * Allocate the local message queue and the notify queue. */ static enum xp_retval xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; for (nentries = ch->local_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->entry_size; ch_sn2->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->local_msgqueue_base); if (ch_sn2->local_msgqueue == NULL) continue; nbytes = nentries * sizeof(struct xpc_notify_sn2); ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL); if (ch_sn2->notify_queue == NULL) { kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; continue; } spin_lock_irqsave(&ch->lock, irq_flags); if (nentries < ch->local_nentries) { dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " "partid=%d, channel=%d\n", nentries, ch->local_nentries, ch->partid, ch->number); ch->local_nentries = nentries; } spin_unlock_irqrestore(&ch->lock, irq_flags); return xpSuccess; } dev_dbg(xpc_chan, "can't get memory for local message queue and notify " "queue, partid=%d, channel=%d\n", ch->partid, ch->number); return xpNoMemory; } /* * Allocate the cached remote message queue. */ static enum xp_retval xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; DBUG_ON(ch->remote_nentries <= 0); for (nentries = ch->remote_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->entry_size; ch_sn2->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2-> remote_msgqueue_base); if (ch_sn2->remote_msgqueue == NULL) continue; spin_lock_irqsave(&ch->lock, irq_flags); if (nentries < ch->remote_nentries) { dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " "partid=%d, channel=%d\n", nentries, ch->remote_nentries, ch->partid, ch->number); ch->remote_nentries = nentries; } spin_unlock_irqrestore(&ch->lock, irq_flags); return xpSuccess; } dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " "partid=%d, channel=%d\n", ch->partid, ch->number); return xpNoMemory; } /* * Allocate message queues and other stuff associated with a channel. * * Note: Assumes all of the channel sizes are filled in. */ static enum xp_retval xpc_setup_msg_structures_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; enum xp_retval ret; DBUG_ON(ch->flags & XPC_C_SETUP); ret = xpc_allocate_local_msgqueue_sn2(ch); if (ret == xpSuccess) { ret = xpc_allocate_remote_msgqueue_sn2(ch); if (ret != xpSuccess) { kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; kfree(ch_sn2->notify_queue); ch_sn2->notify_queue = NULL; } } return ret; } /* * Free up message queues and other stuff that were allocated for the specified * channel. */ static void xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; DBUG_ON(!spin_is_locked(&ch->lock)); ch_sn2->remote_msgqueue_pa = 0; ch_sn2->local_GP->get = 0; ch_sn2->local_GP->put = 0; ch_sn2->remote_GP.get = 0; ch_sn2->remote_GP.put = 0; ch_sn2->w_local_GP.get = 0; ch_sn2->w_local_GP.put = 0; ch_sn2->w_remote_GP.get = 0; ch_sn2->w_remote_GP.put = 0; ch_sn2->next_msg_to_pull = 0; if (ch->flags & XPC_C_SETUP) { dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", ch->flags, ch->partid, ch->number); kfree(ch_sn2->local_msgqueue_base); ch_sn2->local_msgqueue = NULL; kfree(ch_sn2->remote_msgqueue_base); ch_sn2->remote_msgqueue = NULL; kfree(ch_sn2->notify_queue); ch_sn2->notify_queue = NULL; } } /* * Notify those who wanted to be notified upon delivery of their message. */ static void xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) { struct xpc_notify_sn2 *notify; u8 notify_type; s64 get = ch->sn.sn2.w_remote_GP.get - 1; while (++get < put && atomic_read(&ch->n_to_notify) > 0) { notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries]; /* * See if the notify entry indicates it was associated with * a message who's sender wants to be notified. It is possible * that it is, but someone else is doing or has done the * notification. */ notify_type = notify->type; if (notify_type == 0 || cmpxchg(&notify->type, notify_type, 0) != notify_type) { continue; } DBUG_ON(notify_type != XPC_N_CALL); atomic_dec(&ch->n_to_notify); if (notify->func != NULL) { dev_dbg(xpc_chan, "notify->func() called, notify=0x%p " "msg_number=%lld partid=%d channel=%d\n", (void *)notify, get, ch->partid, ch->number); notify->func(reason, ch->partid, ch->number, notify->key); dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p" " msg_number=%lld partid=%d channel=%d\n", (void *)notify, get, ch->partid, ch->number); } } } static void xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch) { xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put); } /* * Clear some of the msg flags in the local message queue. */ static inline void xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 get; get = ch_sn2->w_remote_GP.get; do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + (get % ch->local_nentries) * ch->entry_size); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); msg->flags = 0; } while (++get < ch_sn2->remote_GP.get); } /* * Clear some of the msg flags in the remote message queue. */ static inline void xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 put, remote_nentries = ch->remote_nentries; /* flags are zeroed when the buffer is allocated */ if (ch_sn2->remote_GP.put < remote_nentries) return; put = max(ch_sn2->w_remote_GP.put, remote_nentries); do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + (put % remote_nentries) * ch->entry_size); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(!(msg->flags & XPC_M_SN2_DONE)); DBUG_ON(msg->number != put - remote_nentries); msg->flags = 0; } while (++put < ch_sn2->remote_GP.put); } static int xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch) { return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; } static void xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) { struct xpc_channel *ch = &part->channels[ch_number]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; int npayloads_sent; ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number]; /* See what, if anything, has changed for each connected channel */ xpc_msgqueue_ref(ch); if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get && ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) { /* nothing changed since GPs were last pulled */ xpc_msgqueue_deref(ch); return; } if (!(ch->flags & XPC_C_CONNECTED)) { xpc_msgqueue_deref(ch); return; } /* * First check to see if messages recently sent by us have been * received by the other side. (The remote GET value will have * changed since we last looked at it.) */ if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) { /* * We need to notify any senders that want to be notified * that their sent messages have been received by their * intended recipients. We need to do this before updating * w_remote_GP.get so that we don't allocate the same message * queue entries prematurely (see xpc_allocate_msg()). */ if (atomic_read(&ch->n_to_notify) > 0) { /* * Notify senders that messages sent have been * received and delivered by the other side. */ xpc_notify_senders_sn2(ch, xpMsgDelivered, ch_sn2->remote_GP.get); } /* * Clear msg->flags in previously sent messages, so that * they're ready for xpc_allocate_msg(). */ xpc_clear_local_msgqueue_flags_sn2(ch); ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get; dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, " "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid, ch->number); /* * If anyone was waiting for message queue entries to become * available, wake them up. */ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) wake_up(&ch->msg_allocate_wq); } /* * Now check for newly sent messages by the other side. (The remote * PUT value will have changed since we last looked at it.) */ if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) { /* * Clear msg->flags in previously received messages, so that * they're ready for xpc_get_deliverable_payload_sn2(). */ xpc_clear_remote_msgqueue_flags_sn2(ch); smp_wmb(); /* ensure flags have been cleared before bte_copy */ ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, " "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, ch->number); npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch); if (npayloads_sent > 0) { dev_dbg(xpc_chan, "msgs waiting to be copied and " "delivered=%d, partid=%d, channel=%d\n", npayloads_sent, ch->partid, ch->number); if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) xpc_activate_kthreads(ch, npayloads_sent); } } xpc_msgqueue_deref(ch); } static struct xpc_msg_sn2 * xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long remote_msg_pa; struct xpc_msg_sn2 *msg; u32 msg_index; u32 nmsgs; u64 msg_offset; enum xp_retval ret; if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) { /* we were interrupted by a signal */ return NULL; } while (get >= ch_sn2->next_msg_to_pull) { /* pull as many messages as are ready and able to be pulled */ msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries; DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put); nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull; if (msg_index + nmsgs > ch->remote_nentries) { /* ignore the ones that wrap the msg queue for now */ nmsgs = ch->remote_nentries - msg_index; } msg_offset = msg_index * ch->entry_size; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset; ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, nmsgs * ch->entry_size); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull %d msgs starting with" " msg %lld from partition %d, channel=%d, " "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull, ch->partid, ch->number, ret); XPC_DEACTIVATE_PARTITION(part, ret); mutex_unlock(&ch_sn2->msg_to_pull_mutex); return NULL; } ch_sn2->next_msg_to_pull += nmsgs; } mutex_unlock(&ch_sn2->msg_to_pull_mutex); /* return the message we were looking for */ msg_offset = (get % ch->remote_nentries) * ch->entry_size; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); return msg; } /* * Get the next deliverable message's payload. */ static void * xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; void *payload = NULL; s64 get; do { if (ch->flags & XPC_C_DISCONNECTING) break; get = ch_sn2->w_local_GP.get; smp_rmb(); /* guarantee that .get loads before .put */ if (get == ch_sn2->w_remote_GP.put) break; /* There are messages waiting to be pulled and delivered. * We need to try to secure one for ourselves. We'll do this * by trying to increment w_local_GP.get and hope that no one * else beats us to it. If they do, we'll we'll simply have * to try again for the next one. */ if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) { /* we got the entry referenced by get */ dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, " "partid=%d, channel=%d\n", get + 1, ch->partid, ch->number); /* pull the message from the remote partition */ msg = xpc_pull_remote_msg_sn2(ch, get); if (msg != NULL) { DBUG_ON(msg->number != get); DBUG_ON(msg->flags & XPC_M_SN2_DONE); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); payload = &msg->payload; } break; } } while (1); return payload; } /* * Now we actually send the messages that are ready to be sent by advancing * the local message queue's Put value and then send a chctl msgrequest to the * recipient partition. */ static void xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 put = initial_put + 1; int send_msgrequest = 0; while (1) { while (1) { if (put == ch_sn2->w_local_GP.put) break; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> local_msgqueue + (put % ch->local_nentries) * ch->entry_size); if (!(msg->flags & XPC_M_SN2_READY)) break; put++; } if (put == initial_put) { /* nothing's changed */ break; } if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) != initial_put) { /* someone else beat us to it */ DBUG_ON(ch_sn2->local_GP->put < initial_put); break; } /* we just set the new value of local_GP->put */ dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, " "channel=%d\n", put, ch->partid, ch->number); send_msgrequest = 1; /* * We need to ensure that the message referenced by * local_GP->put is not XPC_M_SN2_READY or that local_GP->put * equals w_local_GP.put, so we'll go have a look. */ initial_put = put; } if (send_msgrequest) xpc_send_chctl_msgrequest_sn2(ch); } /* * Allocate an entry for a message from the message queue associated with the * specified channel. */ static enum xp_retval xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, struct xpc_msg_sn2 **address_of_msg) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; enum xp_retval ret; s64 put; /* * Get the next available message entry from the local message queue. * If none are available, we'll make sure that we grab the latest * GP values. */ ret = xpTimeout; while (1) { put = ch_sn2->w_local_GP.put; smp_rmb(); /* guarantee that .put loads before .get */ if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { /* There are available message entries. We need to try * to secure one for ourselves. We'll do this by trying * to increment w_local_GP.put as long as someone else * doesn't beat us to it. If they do, we'll have to * try again. */ if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) == put) { /* we got the entry referenced by put */ break; } continue; /* try again */ } /* * There aren't any available msg entries at this time. * * In waiting for a message entry to become available, * we set a timeout in case the other side is not sending * completion interrupts. This lets us fake a notify IRQ * that will cause the notify IRQ handler to fetch the latest * GP values as if an interrupt was sent by the other side. */ if (ret == xpTimeout) xpc_send_chctl_local_msgrequest_sn2(ch); if (flags & XPC_NOWAIT) return xpNoWait; ret = xpc_allocate_msg_wait(ch); if (ret != xpInterrupted && ret != xpTimeout) return ret; } /* get the message's address and initialize it */ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + (put % ch->local_nentries) * ch->entry_size); DBUG_ON(msg->flags != 0); msg->number = put; dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, " "msg_number=%lld, partid=%d, channel=%d\n", put + 1, (void *)msg, msg->number, ch->partid, ch->number); *address_of_msg = msg; return xpSuccess; } /* * Common code that does the actual sending of the message by advancing the * local message queue's Put value and sends a chctl msgrequest to the * partition the message is being sent to. */ static enum xp_retval xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, u8 notify_type, xpc_notify_func func, void *key) { enum xp_retval ret = xpSuccess; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg = msg; struct xpc_notify_sn2 *notify = notify; s64 msg_number; s64 put; DBUG_ON(notify_type == XPC_N_CALL && func == NULL); if (XPC_MSG_SIZE(payload_size) > ch->entry_size) return xpPayloadTooBig; xpc_msgqueue_ref(ch); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_1; } if (!(ch->flags & XPC_C_CONNECTED)) { ret = xpNotConnected; goto out_1; } ret = xpc_allocate_msg_sn2(ch, flags, &msg); if (ret != xpSuccess) goto out_1; msg_number = msg->number; if (notify_type != 0) { /* * Tell the remote side to send an ACK interrupt when the * message has been delivered. */ msg->flags |= XPC_M_SN2_INTERRUPT; atomic_inc(&ch->n_to_notify); notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries]; notify->func = func; notify->key = key; notify->type = notify_type; /* ??? Is a mb() needed here? */ if (ch->flags & XPC_C_DISCONNECTING) { /* * An error occurred between our last error check and * this one. We will try to clear the type field from * the notify entry. If we succeed then * xpc_disconnect_channel() didn't already process * the notify entry. */ if (cmpxchg(&notify->type, notify_type, 0) == notify_type) { atomic_dec(&ch->n_to_notify); ret = ch->reason; } goto out_1; } } memcpy(&msg->payload, payload, payload_size); msg->flags |= XPC_M_SN2_READY; /* * The preceding store of msg->flags must occur before the following * load of local_GP->put. */ smp_mb(); /* see if the message is next in line to be sent, if so send it */ put = ch_sn2->local_GP->put; if (put == msg_number) xpc_send_msgs_sn2(ch, put); out_1: xpc_msgqueue_deref(ch); return ret; } /* * Now we actually acknowledge the messages that have been delivered and ack'd * by advancing the cached remote message queue's Get value and if requested * send a chctl msgrequest to the message sender's partition. * * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition * that sent the message. */ static void xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; s64 get = initial_get + 1; int send_msgrequest = 0; while (1) { while (1) { if (get == ch_sn2->w_local_GP.get) break; msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> remote_msgqueue + (get % ch->remote_nentries) * ch->entry_size); if (!(msg->flags & XPC_M_SN2_DONE)) break; msg_flags |= msg->flags; get++; } if (get == initial_get) { /* nothing's changed */ break; } if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) != initial_get) { /* someone else beat us to it */ DBUG_ON(ch_sn2->local_GP->get <= initial_get); break; } /* we just set the new value of local_GP->get */ dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, " "channel=%d\n", get, ch->partid, ch->number); send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT); /* * We need to ensure that the message referenced by * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get * equals w_local_GP.get, so we'll go have a look. */ initial_get = get; } if (send_msgrequest) xpc_send_chctl_msgrequest_sn2(ch); } static void xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) { struct xpc_msg_sn2 *msg; s64 msg_number; s64 get; msg = container_of(payload, struct xpc_msg_sn2, payload); msg_number = msg->number; dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n", (void *)msg, msg_number, ch->partid, ch->number); DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) != msg_number % ch->remote_nentries); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(msg->flags & XPC_M_SN2_DONE); msg->flags |= XPC_M_SN2_DONE; /* * The preceding store of msg->flags must occur before the following * load of local_GP->get. */ smp_mb(); /* * See if this message is next in line to be acknowledged as having * been delivered. */ get = ch->sn.sn2.local_GP->get; if (get == msg_number) xpc_acknowledge_msgs_sn2(ch, get, msg->flags); } static struct xpc_arch_operations xpc_arch_ops_sn2 = { .setup_partitions = xpc_setup_partitions_sn2, .teardown_partitions = xpc_teardown_partitions_sn2, .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2, .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2, .setup_rsvd_page = xpc_setup_rsvd_page_sn2, .allow_hb = xpc_allow_hb_sn2, .disallow_hb = xpc_disallow_hb_sn2, .disallow_all_hbs = xpc_disallow_all_hbs_sn2, .increment_heartbeat = xpc_increment_heartbeat_sn2, .offline_heartbeat = xpc_offline_heartbeat_sn2, .online_heartbeat = xpc_online_heartbeat_sn2, .heartbeat_init = xpc_heartbeat_init_sn2, .heartbeat_exit = xpc_heartbeat_exit_sn2, .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2, .request_partition_activation = xpc_request_partition_activation_sn2, .request_partition_reactivation = xpc_request_partition_reactivation_sn2, .request_partition_deactivation = xpc_request_partition_deactivation_sn2, .cancel_partition_deactivation_request = xpc_cancel_partition_deactivation_request_sn2, .setup_ch_structures = xpc_setup_ch_structures_sn2, .teardown_ch_structures = xpc_teardown_ch_structures_sn2, .make_first_contact = xpc_make_first_contact_sn2, .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2, .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2, .send_chctl_closereply = xpc_send_chctl_closereply_sn2, .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2, .send_chctl_openreply = xpc_send_chctl_openreply_sn2, .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2, .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2, .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2, .setup_msg_structures = xpc_setup_msg_structures_sn2, .teardown_msg_structures = xpc_teardown_msg_structures_sn2, .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2, .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2, .partition_engaged = xpc_partition_engaged_sn2, .any_partition_engaged = xpc_any_partition_engaged_sn2, .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2, .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2, .send_payload = xpc_send_payload_sn2, .get_deliverable_payload = xpc_get_deliverable_payload_sn2, .received_payload = xpc_received_payload_sn2, .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2, }; int xpc_init_sn2(void) { int ret; size_t buf_size; xpc_arch_ops = xpc_arch_ops_sn2; if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " "larger than %d\n", XPC_MSG_HDR_MAX_SIZE); return -E2BIG; } buf_size = max(XPC_RP_VARS_SIZE, XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2); xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size, GFP_KERNEL, &xpc_remote_copy_buffer_base_sn2); if (xpc_remote_copy_buffer_sn2 == NULL) { dev_err(xpc_part, "can't get memory for remote copy buffer\n"); return -ENOMEM; } /* open up protections for IPI and [potentially] amo operations */ xpc_allow_IPI_ops_sn2(); xpc_allow_amo_ops_shub_wars_1_1_sn2(); /* * This is safe to do before the xpc_hb_checker thread has started * because the handler releases a wait queue. If an interrupt is * received before the thread is waiting, it will not go to sleep, * but rather immediately process the interrupt. */ ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0, "xpc hb", NULL); if (ret != 0) { dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " "errno=%d\n", -ret); xpc_disallow_IPI_ops_sn2(); kfree(xpc_remote_copy_buffer_base_sn2); } return ret; } void xpc_exit_sn2(void) { free_irq(SGI_XPC_ACTIVATE, NULL); xpc_disallow_IPI_ops_sn2(); kfree(xpc_remote_copy_buffer_base_sn2); }
gpl-2.0
Asure/Dropad-kernel-2.6.32.9
arch/mn10300/kernel/mn10300-debug.c
13892
1413
/* Debugging stuff for the MN10300-based processors * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> #include <asm/serial-regs.h> #undef MN10300_CONSOLE_ON_SERIO /* * write a string directly through one of the serial ports on-board the MN10300 */ #ifdef MN10300_CONSOLE_ON_SERIO void debug_to_serial_mnser(const char *p, int n) { char ch; for (; n > 0; n--) { ch = *p++; #if MN10300_CONSOLE_ON_SERIO == 0 while (SC0STR & (SC01STR_TBF)) continue; SC0TXB = ch; while (SC0STR & (SC01STR_TBF)) continue; if (ch == 0x0a) { SC0TXB = 0x0d; while (SC0STR & (SC01STR_TBF)) continue; } #elif MN10300_CONSOLE_ON_SERIO == 1 while (SC1STR & (SC01STR_TBF)) continue; SC1TXB = ch; while (SC1STR & (SC01STR_TBF)) continue; if (ch == 0x0a) { SC1TXB = 0x0d; while (SC1STR & (SC01STR_TBF)) continue; } #elif MN10300_CONSOLE_ON_SERIO == 2 while (SC2STR & (SC2STR_TBF)) continue; SC2TXB = ch; while (SC2STR & (SC2STR_TBF)) continue; if (ch == 0x0a) { SC2TXB = 0x0d; while (SC2STR & (SC2STR_TBF)) continue; } #endif } } #endif
gpl-2.0
SlimRoms/kernel_samsung_msm8660
arch/mn10300/kernel/mn10300-debug.c
13892
1413
/* Debugging stuff for the MN10300-based processors * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> #include <asm/serial-regs.h> #undef MN10300_CONSOLE_ON_SERIO /* * write a string directly through one of the serial ports on-board the MN10300 */ #ifdef MN10300_CONSOLE_ON_SERIO void debug_to_serial_mnser(const char *p, int n) { char ch; for (; n > 0; n--) { ch = *p++; #if MN10300_CONSOLE_ON_SERIO == 0 while (SC0STR & (SC01STR_TBF)) continue; SC0TXB = ch; while (SC0STR & (SC01STR_TBF)) continue; if (ch == 0x0a) { SC0TXB = 0x0d; while (SC0STR & (SC01STR_TBF)) continue; } #elif MN10300_CONSOLE_ON_SERIO == 1 while (SC1STR & (SC01STR_TBF)) continue; SC1TXB = ch; while (SC1STR & (SC01STR_TBF)) continue; if (ch == 0x0a) { SC1TXB = 0x0d; while (SC1STR & (SC01STR_TBF)) continue; } #elif MN10300_CONSOLE_ON_SERIO == 2 while (SC2STR & (SC2STR_TBF)) continue; SC2TXB = ch; while (SC2STR & (SC2STR_TBF)) continue; if (ch == 0x0a) { SC2TXB = 0x0d; while (SC2STR & (SC2STR_TBF)) continue; } #endif } } #endif
gpl-2.0
AccentureMobilityServices/kernel
arch/parisc/math-emu/fcnvfut.c
14148
8031
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $ * * Purpose: * Floating-point to Unsigned Fixed-point Converts with Truncation * * External Interfaces: * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /************************************************************************ * Floating-point to Unsigned Fixed-point Converts with Truncation * ************************************************************************/ /* * Convert single floating-point to single fixed-point format * with truncated result */ /*ARGSUSED*/ int sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int src, result; register int src_exponent; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Suint_from_sgl_mantissa(src,src_exponent,result); *dstptr = result; /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Single Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int src, resultp1, resultp2; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Single Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, result; register int src_exponent; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result); *dstptr = result; /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int srcp1, srcp2, resultp1, resultp2; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent, resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); }
gpl-2.0
sycolon/lge_g3_kernel
arch/ia64/sn/kernel/klconflib.c
14148
3013
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/kernel.h> #include <asm/sn/types.h> #include <asm/sn/module.h> #include <asm/sn/l1.h> char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789..."; /* * Format a module id for printing. * * There are three possible formats: * * MODULE_FORMAT_BRIEF is the brief 6-character format, including * the actual brick-type as recorded in the * moduleid_t, eg. 002c15 for a C-brick, or * 101#17 for a PX-brick. * * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15 * of rack/101/bay/17 (note that the brick * type does not appear in this format). * * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it * ensures that the module id provided appears * exactly as it would on the LCD display of * the corresponding brick, eg. still 002c15 * for a C-brick, but 101p17 for a PX-brick. * * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD) * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was * decided that all callers should assume the returned string should be what * is displayed on the brick L1 LCD. */ void format_module_id(char *buffer, moduleid_t m, int fmt) { int rack, position; unsigned char brickchar; rack = MODULE_GET_RACK(m); brickchar = MODULE_GET_BTCHAR(m); /* Be sure we use the same brick type character as displayed * on the brick's LCD */ switch (brickchar) { case L1_BRICKTYPE_GA: case L1_BRICKTYPE_OPUS_TIO: brickchar = L1_BRICKTYPE_C; break; case L1_BRICKTYPE_PX: case L1_BRICKTYPE_PE: case L1_BRICKTYPE_PA: case L1_BRICKTYPE_SA: /* we can move this to the "I's" later * if that makes more sense */ brickchar = L1_BRICKTYPE_P; break; case L1_BRICKTYPE_IX: case L1_BRICKTYPE_IA: brickchar = L1_BRICKTYPE_I; break; } position = MODULE_GET_BPOS(m); if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) { /* Brief module number format, eg. 002c15 */ /* Decompress the rack number */ *buffer++ = '0' + RACK_GET_CLASS(rack); *buffer++ = '0' + RACK_GET_GROUP(rack); *buffer++ = '0' + RACK_GET_NUM(rack); /* Add the brick type */ *buffer++ = brickchar; } else if (fmt == MODULE_FORMAT_LONG) { /* Fuller hwgraph format, eg. rack/002/bay/15 */ strcpy(buffer, "rack" "/"); buffer += strlen(buffer); *buffer++ = '0' + RACK_GET_CLASS(rack); *buffer++ = '0' + RACK_GET_GROUP(rack); *buffer++ = '0' + RACK_GET_NUM(rack); strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer); } /* Add the bay position, using at least two digits */ if (position < 10) *buffer++ = '0'; sprintf(buffer, "%d", position); }
gpl-2.0
aloisiojr/bluez-kernel
arch/parisc/math-emu/fcnvfut.c
14148
8031
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $ * * Purpose: * Floating-point to Unsigned Fixed-point Converts with Truncation * * External Interfaces: * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /************************************************************************ * Floating-point to Unsigned Fixed-point Converts with Truncation * ************************************************************************/ /* * Convert single floating-point to single fixed-point format * with truncated result */ /*ARGSUSED*/ int sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int src, result; register int src_exponent; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Suint_from_sgl_mantissa(src,src_exponent,result); *dstptr = result; /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Single Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int src, resultp1, resultp2; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Sgl_isone_sign(src)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Sgl_clear_signexponent_set_hidden(src); Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isinexact_to_unsigned(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Single Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, unsigned int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, result; register int src_exponent; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { result = 0; } else { result = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { result = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result); *dstptr = result; /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Double Unsigned Fixed */ /*ARGSUSED*/ int dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr, dbl_unsigned * dstptr, unsigned int *status) { register int src_exponent; register unsigned int srcp1, srcp2, resultp1, resultp2; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP + 1) { if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; } else { resultp1 = resultp2 = 0xffffffff; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { /* * Check sign. * If negative, trap unimplemented. */ if (Dbl_isone_sign(srcp1)) { resultp1 = resultp2 = 0; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Duint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Dbl_clear_signexponent_set_hidden(srcp1); Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent, resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Duint_setzero(resultp1,resultp2); Duint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); }
gpl-2.0
itgb/opCloudRouter
qca/src/u-boot/drivers/mtd/nand/fsmc_nand.c
69
13304
/* * (C) Copyright 2010 * Vipin Kumar, ST Microelectronics, vipin.kumar@st.com. * * (C) Copyright 2012 * Amit Virdi, ST Microelectronics, amit.virdi@st.com. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <nand.h> #include <asm/io.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/fsmc_nand.h> #include <asm/arch/hardware.h> static u32 fsmc_version; static struct fsmc_regs *const fsmc_regs_p = (struct fsmc_regs *) CONFIG_SYS_FSMC_BASE; /* * ECC4 and ECC1 have 13 bytes and 3 bytes of ecc respectively for 512 bytes of * data. ECC4 can correct up to 8 bits in 512 bytes of data while ECC1 can * correct 1 bit in 512 bytes */ static struct nand_ecclayout fsmc_ecc4_lp_layout = { .eccbytes = 104, .eccpos = { 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126 }, .oobfree = { {.offset = 15, .length = 3}, {.offset = 31, .length = 3}, {.offset = 47, .length = 3}, {.offset = 63, .length = 3}, {.offset = 79, .length = 3}, {.offset = 95, .length = 3}, {.offset = 111, .length = 3}, {.offset = 127, .length = 1} } }; /* * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118 * bytes are free for use. */ static struct nand_ecclayout fsmc_ecc4_224_layout = { .eccbytes = 104, .eccpos = { 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126 }, .oobfree = { {.offset = 15, .length = 3}, {.offset = 31, .length = 3}, {.offset = 47, .length = 3}, {.offset = 63, .length = 3}, {.offset = 79, .length = 3}, {.offset = 95, .length = 3}, {.offset = 111, .length = 3}, {.offset = 127, .length = 97} } }; /* * ECC placement definitions in oobfree type format * There are 13 bytes of ecc for every 512 byte block and it has to be read * consecutively and immediately after the 512 byte data block for hardware to * generate the error bit offsets in 512 byte data * Managing the ecc bytes in the following way makes it easier for software to * read ecc bytes consecutive to data bytes. This way is similar to * oobfree structure maintained already in u-boot nand driver */ static struct fsmc_eccplace fsmc_eccpl_lp = { .eccplace = { {.offset = 2, .length = 13}, {.offset = 18, .length = 13}, {.offset = 34, .length = 13}, {.offset = 50, .length = 13}, {.offset = 66, .length = 13}, {.offset = 82, .length = 13}, {.offset = 98, .length = 13}, {.offset = 114, .length = 13} } }; static struct nand_ecclayout fsmc_ecc4_sp_layout = { .eccbytes = 13, .eccpos = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14 }, .oobfree = { {.offset = 15, .length = 1}, } }; static struct fsmc_eccplace fsmc_eccpl_sp = { .eccplace = { {.offset = 0, .length = 4}, {.offset = 6, .length = 9} } }; static struct nand_ecclayout fsmc_ecc1_layout = { .eccbytes = 24, .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, .oobfree = { {.offset = 8, .length = 8}, {.offset = 24, .length = 8}, {.offset = 40, .length = 8}, {.offset = 56, .length = 8}, {.offset = 72, .length = 8}, {.offset = 88, .length = 8}, {.offset = 104, .length = 8}, {.offset = 120, .length = 8} } }; /* Count the number of 0's in buff upto a max of max_bits */ static int count_written_bits(uint8_t *buff, int size, int max_bits) { int k, written_bits = 0; for (k = 0; k < size; k++) { written_bits += hweight8(~buff[k]); if (written_bits > max_bits) break; } return written_bits; } static void fsmc_nand_hwcontrol(struct mtd_info *mtd, int cmd, uint ctrl) { struct nand_chip *this = mtd->priv; ulong IO_ADDR_W; if (ctrl & NAND_CTRL_CHANGE) { IO_ADDR_W = (ulong)this->IO_ADDR_W; IO_ADDR_W &= ~(CONFIG_SYS_NAND_CLE | CONFIG_SYS_NAND_ALE); if (ctrl & NAND_CLE) IO_ADDR_W |= CONFIG_SYS_NAND_CLE; if (ctrl & NAND_ALE) IO_ADDR_W |= CONFIG_SYS_NAND_ALE; if (ctrl & NAND_NCE) { writel(readl(&fsmc_regs_p->pc) | FSMC_ENABLE, &fsmc_regs_p->pc); } else { writel(readl(&fsmc_regs_p->pc) & ~FSMC_ENABLE, &fsmc_regs_p->pc); } this->IO_ADDR_W = (void *)IO_ADDR_W; } if (cmd != NAND_CMD_NONE) writeb(cmd, this->IO_ADDR_W); } static int fsmc_bch8_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { /* The calculated ecc is actually the correction index in data */ u32 err_idx[8]; u32 num_err, i; u32 ecc1, ecc2, ecc3, ecc4; num_err = (readl(&fsmc_regs_p->sts) >> 10) & 0xF; if (likely(num_err == 0)) return 0; if (unlikely(num_err > 8)) { /* * This is a temporary erase check. A newly erased page read * would result in an ecc error because the oob data is also * erased to FF and the calculated ecc for an FF data is not * FF..FF. * This is a workaround to skip performing correction in case * data is FF..FF * * Logic: * For every page, each bit written as 0 is counted until these * number of bits are greater than 8 (the maximum correction * capability of FSMC for each 512 + 13 bytes) */ int bits_ecc = count_written_bits(read_ecc, 13, 8); int bits_data = count_written_bits(dat, 512, 8); if ((bits_ecc + bits_data) <= 8) { if (bits_data) memset(dat, 0xff, 512); return bits_data + bits_ecc; } return -EBADMSG; } ecc1 = readl(&fsmc_regs_p->ecc1); ecc2 = readl(&fsmc_regs_p->ecc2); ecc3 = readl(&fsmc_regs_p->ecc3); ecc4 = readl(&fsmc_regs_p->sts); err_idx[0] = (ecc1 >> 0) & 0x1FFF; err_idx[1] = (ecc1 >> 13) & 0x1FFF; err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F); err_idx[3] = (ecc2 >> 7) & 0x1FFF; err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF); err_idx[5] = (ecc3 >> 1) & 0x1FFF; err_idx[6] = (ecc3 >> 14) & 0x1FFF; err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F); i = 0; while (i < num_err) { err_idx[i] ^= 3; if (err_idx[i] < 512 * 8) __change_bit(err_idx[i], dat); i++; } return num_err; } static int fsmc_read_hwecc(struct mtd_info *mtd, const u_char *data, u_char *ecc) { u_int ecc_tmp; int timeout = CONFIG_SYS_HZ; ulong start; switch (fsmc_version) { case FSMC_VER8: start = get_timer(0); while (get_timer(start) < timeout) { /* * Busy waiting for ecc computation * to finish for 512 bytes */ if (readl(&fsmc_regs_p->sts) & FSMC_CODE_RDY) break; } ecc_tmp = readl(&fsmc_regs_p->ecc1); ecc[0] = (u_char) (ecc_tmp >> 0); ecc[1] = (u_char) (ecc_tmp >> 8); ecc[2] = (u_char) (ecc_tmp >> 16); ecc[3] = (u_char) (ecc_tmp >> 24); ecc_tmp = readl(&fsmc_regs_p->ecc2); ecc[4] = (u_char) (ecc_tmp >> 0); ecc[5] = (u_char) (ecc_tmp >> 8); ecc[6] = (u_char) (ecc_tmp >> 16); ecc[7] = (u_char) (ecc_tmp >> 24); ecc_tmp = readl(&fsmc_regs_p->ecc3); ecc[8] = (u_char) (ecc_tmp >> 0); ecc[9] = (u_char) (ecc_tmp >> 8); ecc[10] = (u_char) (ecc_tmp >> 16); ecc[11] = (u_char) (ecc_tmp >> 24); ecc_tmp = readl(&fsmc_regs_p->sts); ecc[12] = (u_char) (ecc_tmp >> 16); break; default: ecc_tmp = readl(&fsmc_regs_p->ecc1); ecc[0] = (u_char) (ecc_tmp >> 0); ecc[1] = (u_char) (ecc_tmp >> 8); ecc[2] = (u_char) (ecc_tmp >> 16); break; } return 0; } void fsmc_enable_hwecc(struct mtd_info *mtd, int mode) { writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCPLEN_256, &fsmc_regs_p->pc); writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCEN, &fsmc_regs_p->pc); writel(readl(&fsmc_regs_p->pc) | FSMC_ECCEN, &fsmc_regs_p->pc); } /* * fsmc_read_page_hwecc * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * @page: page number to read * * This routine is needed for fsmc verison 8 as reading from NAND chip has to be * performed in a strict sequence as follows: * data(512 byte) -> ecc(13 byte) * After this read, fsmc hardware generates and reports error data bits(upto a * max of 8 bits) */ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page) { struct fsmc_eccplace *fsmc_eccpl; int i, j, s, stat, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; int off, len, group = 0; uint8_t oob[13] __attribute__ ((aligned (2))); /* Differentiate between small and large page ecc place definitions */ if (mtd->writesize == 512) fsmc_eccpl = &fsmc_eccpl_sp; else fsmc_eccpl = &fsmc_eccpl_lp; for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); chip->ecc.hwctl(mtd, NAND_ECC_READ); chip->read_buf(mtd, p, eccsize); for (j = 0; j < eccbytes;) { off = fsmc_eccpl->eccplace[group].offset; len = fsmc_eccpl->eccplace[group].length; group++; /* * length is intentionally kept a higher multiple of 2 * to read at least 13 bytes even in case of 16 bit NAND * devices */ if (chip->options & NAND_BUSWIDTH_16) len = roundup(len, 2); chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); chip->read_buf(mtd, oob + j, len); j += len; } memcpy(&ecc_code[i], oob, 13); chip->ecc.calculate(mtd, p, &ecc_calc[i]); stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); if (stat < 0) mtd->ecc_stats.failed++; else mtd->ecc_stats.corrected += stat; } return 0; } int fsmc_nand_init(struct nand_chip *nand) { static int chip_nr; struct mtd_info *mtd; int i; u32 peripid2 = readl(&fsmc_regs_p->peripid2); fsmc_version = (peripid2 >> FSMC_REVISION_SHFT) & FSMC_REVISION_MSK; writel(readl(&fsmc_regs_p->ctrl) | FSMC_WP, &fsmc_regs_p->ctrl); #if defined(CONFIG_SYS_FSMC_NAND_16BIT) writel(FSMC_DEVWID_16 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON, &fsmc_regs_p->pc); #elif defined(CONFIG_SYS_FSMC_NAND_8BIT) writel(FSMC_DEVWID_8 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON, &fsmc_regs_p->pc); #else #error Please define CONFIG_SYS_FSMC_NAND_16BIT or CONFIG_SYS_FSMC_NAND_8BIT #endif writel(readl(&fsmc_regs_p->pc) | FSMC_TCLR_1 | FSMC_TAR_1, &fsmc_regs_p->pc); writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, &fsmc_regs_p->comm); writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, &fsmc_regs_p->attrib); nand->options = 0; #if defined(CONFIG_SYS_FSMC_NAND_16BIT) nand->options |= NAND_BUSWIDTH_16; #endif nand->ecc.mode = NAND_ECC_HW; nand->ecc.size = 512; nand->ecc.calculate = fsmc_read_hwecc; nand->ecc.hwctl = fsmc_enable_hwecc; nand->cmd_ctrl = fsmc_nand_hwcontrol; nand->IO_ADDR_R = nand->IO_ADDR_W = (void __iomem *)CONFIG_SYS_NAND_BASE; nand->badblockbits = 7; mtd = &nand_info[chip_nr++]; mtd->priv = nand; switch (fsmc_version) { case FSMC_VER8: nand->ecc.bytes = 13; nand->ecc.correct = fsmc_bch8_correct_data; nand->ecc.read_page = fsmc_read_page_hwecc; if (mtd->writesize == 512) nand->ecc.layout = &fsmc_ecc4_sp_layout; else { if (mtd->oobsize == 224) nand->ecc.layout = &fsmc_ecc4_224_layout; else nand->ecc.layout = &fsmc_ecc4_lp_layout; } break; default: nand->ecc.bytes = 3; nand->ecc.layout = &fsmc_ecc1_layout; nand->ecc.correct = nand_correct_data; break; } /* Detect NAND chips */ if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) return -ENXIO; if (nand_scan_tail(mtd)) return -ENXIO; for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) if (nand_register(i)) return -ENXIO; return 0; }
gpl-2.0
ntrdma/ntrdma
drivers/staging/comedi/drivers/pcl812.c
581
32434
/* * comedi/drivers/pcl812.c * * Author: Michal Dobes <dobes@tesnet.cz> * * hardware driver for Advantech cards * card: PCL-812, PCL-812PG, PCL-813, PCL-813B * driver: pcl812, pcl812pg, pcl813, pcl813b * and for ADlink cards * card: ACL-8112DG, ACL-8112HG, ACL-8112PG, ACL-8113, ACL-8216 * driver: acl8112dg, acl8112hg, acl8112pg, acl8113, acl8216 * and for ICP DAS cards * card: ISO-813, A-821PGH, A-821PGL, A-821PGL-NDA, A-822PGH, A-822PGL, * driver: iso813, a821pgh, a-821pgl, a-821pglnda, a822pgh, a822pgl, * card: A-823PGH, A-823PGL, A-826PG * driver: a823pgh, a823pgl, a826pg */ /* * Driver: pcl812 * Description: Advantech PCL-812/PG, PCL-813/B, * ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216, * ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG, * ICP DAS ISO-813 * Author: Michal Dobes <dobes@tesnet.cz> * Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg), * PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg), * ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216), * [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl), * A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl), * A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg) * Updated: Mon, 06 Aug 2007 12:03:15 +0100 * Status: works (I hope. My board fire up under my hands * and I cann't test all features.) * * This driver supports insn and cmd interfaces. Some boards support only insn * because their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813). * Data transfer over DMA is supported only when you measure only one * channel, this is too hardware limitation of these boards. * * Options for PCL-812: * [0] - IO Base * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15) * [2] - DMA (0=disable, 1, 3) * [3] - 0=trigger source is internal 8253 with 2MHz clock * 1=trigger source is external * [4] - 0=A/D input range is +/-10V * 1=A/D input range is +/-5V * 2=A/D input range is +/-2.5V * 3=A/D input range is +/-1.25V * 4=A/D input range is +/-0.625V * 5=A/D input range is +/-0.3125V * [5] - 0=D/A outputs 0-5V (internal reference -5V) * 1=D/A outputs 0-10V (internal reference -10V) * 2=D/A outputs unknown (external reference) * * Options for PCL-812PG, ACL-8112PG: * [0] - IO Base * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15) * [2] - DMA (0=disable, 1, 3) * [3] - 0=trigger source is internal 8253 with 2MHz clock * 1=trigger source is external * [4] - 0=A/D have max +/-5V input * 1=A/D have max +/-10V input * [5] - 0=D/A outputs 0-5V (internal reference -5V) * 1=D/A outputs 0-10V (internal reference -10V) * 2=D/A outputs unknown (external reference) * * Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG: * [0] - IO Base * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15) * [2] - DMA (0=disable, 1, 3) * [3] - 0=trigger source is internal 8253 with 2MHz clock * 1=trigger source is external * [4] - 0=A/D channels are S.E. * 1=A/D channels are DIFF * [5] - 0=D/A outputs 0-5V (internal reference -5V) * 1=D/A outputs 0-10V (internal reference -10V) * 2=D/A outputs unknown (external reference) * * Options for A-821PGL/PGH: * [0] - IO Base * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) * [2] - 0=A/D channels are S.E. * 1=A/D channels are DIFF * [3] - 0=D/A output 0-5V (internal reference -5V) * 1=D/A output 0-10V (internal reference -10V) * * Options for A-821PGL-NDA: * [0] - IO Base * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7) * [2] - 0=A/D channels are S.E. * 1=A/D channels are DIFF * * Options for PCL-813: * [0] - IO Base * * Options for PCL-813B: * [0] - IO Base * [1] - 0= bipolar inputs * 1= unipolar inputs * * Options for ACL-8113, ISO-813: * [0] - IO Base * [1] - 0= 10V bipolar inputs * 1= 10V unipolar inputs * 2= 20V bipolar inputs * 3= 20V unipolar inputs */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/io.h> #include "../comedidev.h" #include "comedi_isadma.h" #include "comedi_8254.h" /* * Register I/O map */ #define PCL812_TIMER_BASE 0x00 #define PCL812_AI_LSB_REG 0x04 #define PCL812_AI_MSB_REG 0x05 #define PCL812_AI_MSB_DRDY BIT(4) #define PCL812_AO_LSB_REG(x) (0x04 + ((x) * 2)) #define PCL812_AO_MSB_REG(x) (0x05 + ((x) * 2)) #define PCL812_DI_LSB_REG 0x06 #define PCL812_DI_MSB_REG 0x07 #define PCL812_STATUS_REG 0x08 #define PCL812_STATUS_DRDY BIT(5) #define PCL812_RANGE_REG 0x09 #define PCL812_MUX_REG 0x0a #define PCL812_MUX_CHAN(x) ((x) << 0) #define PCL812_MUX_CS0 BIT(4) #define PCL812_MUX_CS1 BIT(5) #define PCL812_CTRL_REG 0x0b #define PCL812_CTRL_TRIG(x) (((x) & 0x7) << 0) #define PCL812_CTRL_DISABLE_TRIG PCL812_CTRL_TRIG(0) #define PCL812_CTRL_SOFT_TRIG PCL812_CTRL_TRIG(1) #define PCL812_CTRL_PACER_DMA_TRIG PCL812_CTRL_TRIG(2) #define PCL812_CTRL_PACER_EOC_TRIG PCL812_CTRL_TRIG(6) #define PCL812_SOFTTRIG_REG 0x0c #define PCL812_DO_LSB_REG 0x0d #define PCL812_DO_MSB_REG 0x0e #define MAX_CHANLIST_LEN 256 /* length of scan list */ static const struct comedi_lrange range_pcl812pg_ai = { 5, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), BIP_RANGE(0.3125) } }; static const struct comedi_lrange range_pcl812pg2_ai = { 5, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625) } }; static const struct comedi_lrange range812_bipolar1_25 = { 1, { BIP_RANGE(1.25) } }; static const struct comedi_lrange range812_bipolar0_625 = { 1, { BIP_RANGE(0.625) } }; static const struct comedi_lrange range812_bipolar0_3125 = { 1, { BIP_RANGE(0.3125) } }; static const struct comedi_lrange range_pcl813b_ai = { 4, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625) } }; static const struct comedi_lrange range_pcl813b2_ai = { 4, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange range_iso813_1_ai = { 5, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), BIP_RANGE(0.3125) } }; static const struct comedi_lrange range_iso813_1_2_ai = { 5, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), UNI_RANGE(0.625) } }; static const struct comedi_lrange range_iso813_2_ai = { 4, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625) } }; static const struct comedi_lrange range_iso813_2_2_ai = { 4, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange range_acl8113_1_ai = { 4, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625) } }; static const struct comedi_lrange range_acl8113_1_2_ai = { 4, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange range_acl8113_2_ai = { 3, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25) } }; static const struct comedi_lrange range_acl8113_2_2_ai = { 3, { UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5) } }; static const struct comedi_lrange range_acl8112dg_ai = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10) } }; static const struct comedi_lrange range_acl8112hg_ai = { 12, { BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), BIP_RANGE(0.005), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01), BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01) } }; static const struct comedi_lrange range_a821pgh_ai = { 4, { BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), BIP_RANGE(0.005) } }; enum pcl812_boardtype { BOARD_PCL812PG = 0, /* and ACL-8112PG */ BOARD_PCL813B = 1, BOARD_PCL812 = 2, BOARD_PCL813 = 3, BOARD_ISO813 = 5, BOARD_ACL8113 = 6, BOARD_ACL8112 = 7, /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */ BOARD_ACL8216 = 8, /* and ICP DAS A-826PG */ BOARD_A821 = 9, /* PGH, PGL, PGL/NDA versions */ }; struct pcl812_board { const char *name; enum pcl812_boardtype board_type; int n_aichan; int n_aochan; unsigned int ai_ns_min; const struct comedi_lrange *rangelist_ai; unsigned int irq_bits; unsigned int has_dma:1; unsigned int has_16bit_ai:1; unsigned int has_mpc508_mux:1; unsigned int has_dio:1; }; static const struct pcl812_board boardtypes[] = { { .name = "pcl812", .board_type = BOARD_PCL812, .n_aichan = 16, .n_aochan = 2, .ai_ns_min = 33000, .rangelist_ai = &range_bipolar10, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "pcl812pg", .board_type = BOARD_PCL812PG, .n_aichan = 16, .n_aochan = 2, .ai_ns_min = 33000, .rangelist_ai = &range_pcl812pg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "acl8112pg", .board_type = BOARD_PCL812PG, .n_aichan = 16, .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_pcl812pg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "acl8112dg", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_acl8112dg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_mpc508_mux = 1, .has_dio = 1, }, { .name = "acl8112hg", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_acl8112hg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_mpc508_mux = 1, .has_dio = 1, }, { .name = "a821pgl", .board_type = BOARD_A821, .n_aichan = 16, /* 8 differential */ .n_aochan = 1, .ai_ns_min = 10000, .rangelist_ai = &range_pcl813b_ai, .irq_bits = 0x000c, .has_dio = 1, }, { .name = "a821pglnda", .board_type = BOARD_A821, .n_aichan = 16, /* 8 differential */ .ai_ns_min = 10000, .rangelist_ai = &range_pcl813b_ai, .irq_bits = 0x000c, }, { .name = "a821pgh", .board_type = BOARD_A821, .n_aichan = 16, /* 8 differential */ .n_aochan = 1, .ai_ns_min = 10000, .rangelist_ai = &range_a821pgh_ai, .irq_bits = 0x000c, .has_dio = 1, }, { .name = "a822pgl", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_acl8112dg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "a822pgh", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_acl8112hg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "a823pgl", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 8000, .rangelist_ai = &range_acl8112dg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "a823pgh", .board_type = BOARD_ACL8112, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 8000, .rangelist_ai = &range_acl8112hg_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_dio = 1, }, { .name = "pcl813", .board_type = BOARD_PCL813, .n_aichan = 32, .rangelist_ai = &range_pcl813b_ai, }, { .name = "pcl813b", .board_type = BOARD_PCL813B, .n_aichan = 32, .rangelist_ai = &range_pcl813b_ai, }, { .name = "acl8113", .board_type = BOARD_ACL8113, .n_aichan = 32, .rangelist_ai = &range_acl8113_1_ai, }, { .name = "iso813", .board_type = BOARD_ISO813, .n_aichan = 32, .rangelist_ai = &range_iso813_1_ai, }, { .name = "acl8216", .board_type = BOARD_ACL8216, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_pcl813b2_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_16bit_ai = 1, .has_mpc508_mux = 1, .has_dio = 1, }, { .name = "a826pg", .board_type = BOARD_ACL8216, .n_aichan = 16, /* 8 differential */ .n_aochan = 2, .ai_ns_min = 10000, .rangelist_ai = &range_pcl813b2_ai, .irq_bits = 0xdcfc, .has_dma = 1, .has_16bit_ai = 1, .has_dio = 1, }, }; struct pcl812_private { struct comedi_isadma *dma; unsigned char range_correction; /* =1 we must add 1 to range number */ unsigned int last_ai_chanspec; unsigned char mode_reg_int; /* stored INT number for some cards */ unsigned int ai_poll_ptr; /* how many samples transfer poll */ unsigned int max_812_ai_mode0_rangewait; /* settling time for gain */ unsigned int use_diff:1; unsigned int use_mpc508:1; unsigned int use_ext_trg:1; unsigned int ai_dma:1; unsigned int ai_eos:1; }; static void pcl812_ai_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int unread_samples) { struct pcl812_private *devpriv = dev->private; struct comedi_isadma *dma = devpriv->dma; struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma]; unsigned int bytes; unsigned int max_samples; unsigned int nsamples; comedi_isadma_disable(dma->chan); /* if using EOS, adapt DMA buffer to one scan */ bytes = devpriv->ai_eos ? comedi_bytes_per_scan(s) : desc->maxsize; max_samples = comedi_bytes_to_samples(s, bytes); /* * Determine dma size based on the buffer size plus the number of * unread samples and the number of samples remaining in the command. */ nsamples = comedi_nsamples_left(s, max_samples + unread_samples); if (nsamples > unread_samples) { nsamples -= unread_samples; desc->size = comedi_samples_to_bytes(s, nsamples); comedi_isadma_program(desc); } } static void pcl812_ai_set_chan_range(struct comedi_device *dev, unsigned int chanspec, char wait) { struct pcl812_private *devpriv = dev->private; unsigned int chan = CR_CHAN(chanspec); unsigned int range = CR_RANGE(chanspec); unsigned int mux = 0; if (chanspec == devpriv->last_ai_chanspec) return; devpriv->last_ai_chanspec = chanspec; if (devpriv->use_mpc508) { if (devpriv->use_diff) { mux |= PCL812_MUX_CS0 | PCL812_MUX_CS1; } else { if (chan < 8) mux |= PCL812_MUX_CS0; else mux |= PCL812_MUX_CS1; } } outb(mux | PCL812_MUX_CHAN(chan), dev->iobase + PCL812_MUX_REG); outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG); if (wait) /* * XXX this depends on selected range and can be very long for * some high gain ranges! */ udelay(devpriv->max_812_ai_mode0_rangewait); } static void pcl812_ai_clear_eoc(struct comedi_device *dev) { /* writing any value clears the interrupt request */ outb(0, dev->iobase + PCL812_STATUS_REG); } static void pcl812_ai_soft_trig(struct comedi_device *dev) { /* writing any value triggers a software conversion */ outb(255, dev->iobase + PCL812_SOFTTRIG_REG); } static unsigned int pcl812_ai_get_sample(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int val; val = inb(dev->iobase + PCL812_AI_MSB_REG) << 8; val |= inb(dev->iobase + PCL812_AI_LSB_REG); return val & s->maxdata; } static int pcl812_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; if (s->maxdata > 0x0fff) { status = inb(dev->iobase + PCL812_STATUS_REG); if ((status & PCL812_STATUS_DRDY) == 0) return 0; } else { status = inb(dev->iobase + PCL812_AI_MSB_REG); if ((status & PCL812_AI_MSB_DRDY) == 0) return 0; } return -EBUSY; } static int pcl812_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { const struct pcl812_board *board = dev->board_ptr; struct pcl812_private *devpriv = dev->private; int err = 0; unsigned int flags; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW); if (devpriv->use_ext_trg) flags = TRIG_EXT; else flags = TRIG_TIMER; err |= comedi_check_trigger_src(&cmd->convert_src, flags); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); if (cmd->convert_src == TRIG_TIMER) { err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_ns_min); } else { /* TRIG_EXT */ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); } err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { unsigned int arg = cmd->convert_arg; comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); } if (err) return 4; return 0; } static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct pcl812_private *devpriv = dev->private; struct comedi_isadma *dma = devpriv->dma; struct comedi_cmd *cmd = &s->async->cmd; unsigned int ctrl = 0; unsigned int i; pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1); if (dma) { /* check if we can use DMA transfer */ devpriv->ai_dma = 1; for (i = 1; i < cmd->chanlist_len; i++) if (cmd->chanlist[0] != cmd->chanlist[i]) { /* we cann't use DMA :-( */ devpriv->ai_dma = 0; break; } } else { devpriv->ai_dma = 0; } devpriv->ai_poll_ptr = 0; /* don't we want wake up every scan? */ if (cmd->flags & CMDF_WAKE_EOS) { devpriv->ai_eos = 1; /* DMA is useless for this situation */ if (cmd->chanlist_len == 1) devpriv->ai_dma = 0; } if (devpriv->ai_dma) { /* setup and enable dma for the first buffer */ dma->cur_dma = 0; pcl812_ai_setup_dma(dev, s, 0); } switch (cmd->convert_src) { case TRIG_TIMER: comedi_8254_update_divisors(dev->pacer); comedi_8254_pacer_enable(dev->pacer, 1, 2, true); break; } if (devpriv->ai_dma) ctrl |= PCL812_CTRL_PACER_DMA_TRIG; else ctrl |= PCL812_CTRL_PACER_EOC_TRIG; outb(devpriv->mode_reg_int | ctrl, dev->iobase + PCL812_CTRL_REG); return 0; } static bool pcl812_ai_next_chan(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; if (cmd->stop_src == TRIG_COUNT && s->async->scans_done >= cmd->stop_arg) { s->async->events |= COMEDI_CB_EOA; return false; } return true; } static void pcl812_handle_eoc(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; unsigned int chan = s->async->cur_chan; unsigned int next_chan; unsigned short val; if (pcl812_ai_eoc(dev, s, NULL, 0)) { dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n"); s->async->events |= COMEDI_CB_ERROR; return; } val = pcl812_ai_get_sample(dev, s); comedi_buf_write_samples(s, &val, 1); /* Set up next channel. Added by abbotti 2010-01-20, but untested. */ next_chan = s->async->cur_chan; if (cmd->chanlist[chan] != cmd->chanlist[next_chan]) pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0); pcl812_ai_next_chan(dev, s); } static void transfer_from_dma_buf(struct comedi_device *dev, struct comedi_subdevice *s, unsigned short *ptr, unsigned int bufptr, unsigned int len) { unsigned int i; unsigned short val; for (i = len; i; i--) { val = ptr[bufptr++]; comedi_buf_write_samples(s, &val, 1); if (!pcl812_ai_next_chan(dev, s)) break; } } static void pcl812_handle_dma(struct comedi_device *dev, struct comedi_subdevice *s) { struct pcl812_private *devpriv = dev->private; struct comedi_isadma *dma = devpriv->dma; struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma]; unsigned int nsamples; int bufptr; nsamples = comedi_bytes_to_samples(s, desc->size) - devpriv->ai_poll_ptr; bufptr = devpriv->ai_poll_ptr; devpriv->ai_poll_ptr = 0; /* restart dma with the next buffer */ dma->cur_dma = 1 - dma->cur_dma; pcl812_ai_setup_dma(dev, s, nsamples); transfer_from_dma_buf(dev, s, desc->virt_addr, bufptr, nsamples); } static irqreturn_t pcl812_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct pcl812_private *devpriv = dev->private; if (!dev->attached) { pcl812_ai_clear_eoc(dev); return IRQ_HANDLED; } if (devpriv->ai_dma) pcl812_handle_dma(dev, s); else pcl812_handle_eoc(dev, s); pcl812_ai_clear_eoc(dev); comedi_handle_events(dev, s); return IRQ_HANDLED; } static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s) { struct pcl812_private *devpriv = dev->private; struct comedi_isadma *dma = devpriv->dma; struct comedi_isadma_desc *desc; unsigned long flags; unsigned int poll; int ret; /* poll is valid only for DMA transfer */ if (!devpriv->ai_dma) return 0; spin_lock_irqsave(&dev->spinlock, flags); poll = comedi_isadma_poll(dma); poll = comedi_bytes_to_samples(s, poll); if (poll > devpriv->ai_poll_ptr) { desc = &dma->desc[dma->cur_dma]; transfer_from_dma_buf(dev, s, desc->virt_addr, devpriv->ai_poll_ptr, poll - devpriv->ai_poll_ptr); /* new buffer position */ devpriv->ai_poll_ptr = poll; ret = comedi_buf_n_bytes_ready(s); } else { /* no new samples */ ret = 0; } spin_unlock_irqrestore(&dev->spinlock, flags); return ret; } static int pcl812_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct pcl812_private *devpriv = dev->private; if (devpriv->ai_dma) comedi_isadma_disable(devpriv->dma->chan); outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG, dev->iobase + PCL812_CTRL_REG); comedi_8254_pacer_enable(dev->pacer, 1, 2, false); pcl812_ai_clear_eoc(dev); return 0; } static int pcl812_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pcl812_private *devpriv = dev->private; int ret = 0; int i; outb(devpriv->mode_reg_int | PCL812_CTRL_SOFT_TRIG, dev->iobase + PCL812_CTRL_REG); pcl812_ai_set_chan_range(dev, insn->chanspec, 1); for (i = 0; i < insn->n; i++) { pcl812_ai_clear_eoc(dev); pcl812_ai_soft_trig(dev); ret = comedi_timeout(dev, s, insn, pcl812_ai_eoc, 0); if (ret) break; data[i] = pcl812_ai_get_sample(dev, s); } outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG, dev->iobase + PCL812_CTRL_REG); pcl812_ai_clear_eoc(dev); return ret ? ret : insn->n; } static int pcl812_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int val = s->readback[chan]; int i; for (i = 0; i < insn->n; i++) { val = data[i]; outb(val & 0xff, dev->iobase + PCL812_AO_LSB_REG(chan)); outb((val >> 8) & 0x0f, dev->iobase + PCL812_AO_MSB_REG(chan)); } s->readback[chan] = val; return insn->n; } static int pcl812_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inb(dev->iobase + PCL812_DI_LSB_REG) | (inb(dev->iobase + PCL812_DI_MSB_REG) << 8); return insn->n; } static int pcl812_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) { outb(s->state & 0xff, dev->iobase + PCL812_DO_LSB_REG); outb((s->state >> 8), dev->iobase + PCL812_DO_MSB_REG); } data[1] = s->state; return insn->n; } static void pcl812_reset(struct comedi_device *dev) { const struct pcl812_board *board = dev->board_ptr; struct pcl812_private *devpriv = dev->private; unsigned int chan; /* disable analog input trigger */ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG, dev->iobase + PCL812_CTRL_REG); pcl812_ai_clear_eoc(dev); /* * Invalidate last_ai_chanspec then set analog input to * known channel/range. */ devpriv->last_ai_chanspec = CR_PACK(16, 0, 0); pcl812_ai_set_chan_range(dev, CR_PACK(0, 0, 0), 0); /* set analog output channels to 0V */ for (chan = 0; chan < board->n_aochan; chan++) { outb(0, dev->iobase + PCL812_AO_LSB_REG(chan)); outb(0, dev->iobase + PCL812_AO_MSB_REG(chan)); } /* set all digital outputs low */ if (board->has_dio) { outb(0, dev->iobase + PCL812_DO_MSB_REG); outb(0, dev->iobase + PCL812_DO_LSB_REG); } } static void pcl812_set_ai_range_table(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_devconfig *it) { const struct pcl812_board *board = dev->board_ptr; struct pcl812_private *devpriv = dev->private; switch (board->board_type) { case BOARD_PCL812PG: if (it->options[4] == 1) s->range_table = &range_pcl812pg2_ai; else s->range_table = board->rangelist_ai; break; case BOARD_PCL812: switch (it->options[4]) { case 0: s->range_table = &range_bipolar10; break; case 1: s->range_table = &range_bipolar5; break; case 2: s->range_table = &range_bipolar2_5; break; case 3: s->range_table = &range812_bipolar1_25; break; case 4: s->range_table = &range812_bipolar0_625; break; case 5: s->range_table = &range812_bipolar0_3125; break; default: s->range_table = &range_bipolar10; break; } break; case BOARD_PCL813B: if (it->options[1] == 1) s->range_table = &range_pcl813b2_ai; else s->range_table = board->rangelist_ai; break; case BOARD_ISO813: switch (it->options[1]) { case 0: s->range_table = &range_iso813_1_ai; break; case 1: s->range_table = &range_iso813_1_2_ai; break; case 2: s->range_table = &range_iso813_2_ai; devpriv->range_correction = 1; break; case 3: s->range_table = &range_iso813_2_2_ai; devpriv->range_correction = 1; break; default: s->range_table = &range_iso813_1_ai; break; } break; case BOARD_ACL8113: switch (it->options[1]) { case 0: s->range_table = &range_acl8113_1_ai; break; case 1: s->range_table = &range_acl8113_1_2_ai; break; case 2: s->range_table = &range_acl8113_2_ai; devpriv->range_correction = 1; break; case 3: s->range_table = &range_acl8113_2_2_ai; devpriv->range_correction = 1; break; default: s->range_table = &range_acl8113_1_ai; break; } break; default: s->range_table = board->rangelist_ai; break; } } static void pcl812_alloc_dma(struct comedi_device *dev, unsigned int dma_chan) { struct pcl812_private *devpriv = dev->private; /* only DMA channels 3 and 1 are valid */ if (!(dma_chan == 3 || dma_chan == 1)) return; /* DMA uses two 8K buffers */ devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan, PAGE_SIZE * 2, COMEDI_ISADMA_READ); } static void pcl812_free_dma(struct comedi_device *dev) { struct pcl812_private *devpriv = dev->private; if (devpriv) comedi_isadma_free(devpriv->dma); } static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) { const struct pcl812_board *board = dev->board_ptr; struct pcl812_private *devpriv; struct comedi_subdevice *s; int n_subdevices; int subdev; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_request_region(dev, it->options[0], 0x10); if (ret) return ret; if (board->irq_bits) { dev->pacer = comedi_8254_init(dev->iobase + PCL812_TIMER_BASE, I8254_OSC_BASE_2MHZ, I8254_IO8, 0); if (!dev->pacer) return -ENOMEM; if ((1 << it->options[1]) & board->irq_bits) { ret = request_irq(it->options[1], pcl812_interrupt, 0, dev->board_name, dev); if (ret == 0) dev->irq = it->options[1]; } } /* we need an IRQ to do DMA on channel 3 or 1 */ if (dev->irq && board->has_dma) pcl812_alloc_dma(dev, it->options[2]); /* differential analog inputs? */ switch (board->board_type) { case BOARD_A821: if (it->options[2] == 1) devpriv->use_diff = 1; break; case BOARD_ACL8112: case BOARD_ACL8216: if (it->options[4] == 1) devpriv->use_diff = 1; break; default: break; } n_subdevices = 1; /* all boardtypes have analog inputs */ if (board->n_aochan > 0) n_subdevices++; if (board->has_dio) n_subdevices += 2; ret = comedi_alloc_subdevices(dev, n_subdevices); if (ret) return ret; subdev = 0; /* Analog Input subdevice */ s = &dev->subdevices[subdev]; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE; if (devpriv->use_diff) { s->subdev_flags |= SDF_DIFF; s->n_chan = board->n_aichan / 2; } else { s->subdev_flags |= SDF_GROUND; s->n_chan = board->n_aichan; } s->maxdata = board->has_16bit_ai ? 0xffff : 0x0fff; pcl812_set_ai_range_table(dev, s, it); s->insn_read = pcl812_ai_insn_read; if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->len_chanlist = MAX_CHANLIST_LEN; s->do_cmdtest = pcl812_ai_cmdtest; s->do_cmd = pcl812_ai_cmd; s->poll = pcl812_ai_poll; s->cancel = pcl812_ai_cancel; } devpriv->use_mpc508 = board->has_mpc508_mux; subdev++; /* analog output */ if (board->n_aochan > 0) { s = &dev->subdevices[subdev]; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = board->n_aochan; s->maxdata = 0xfff; switch (board->board_type) { case BOARD_A821: if (it->options[3] == 1) s->range_table = &range_unipolar10; else s->range_table = &range_unipolar5; break; case BOARD_PCL812: case BOARD_ACL8112: case BOARD_PCL812PG: case BOARD_ACL8216: switch (it->options[5]) { case 1: s->range_table = &range_unipolar10; break; case 2: s->range_table = &range_unknown; break; default: s->range_table = &range_unipolar5; break; } break; default: s->range_table = &range_unipolar5; break; } s->insn_write = pcl812_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; subdev++; } if (board->has_dio) { /* Digital Input subdevice */ s = &dev->subdevices[subdev]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pcl812_di_insn_bits; subdev++; /* Digital Output subdevice */ s = &dev->subdevices[subdev]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pcl812_do_insn_bits; subdev++; } switch (board->board_type) { case BOARD_ACL8216: case BOARD_PCL812PG: case BOARD_PCL812: case BOARD_ACL8112: devpriv->max_812_ai_mode0_rangewait = 1; if (it->options[3] > 0) /* we use external trigger */ devpriv->use_ext_trg = 1; break; case BOARD_A821: devpriv->max_812_ai_mode0_rangewait = 1; devpriv->mode_reg_int = (dev->irq << 4) & 0xf0; break; case BOARD_PCL813B: case BOARD_PCL813: case BOARD_ISO813: case BOARD_ACL8113: /* maybe there must by greatest timeout */ devpriv->max_812_ai_mode0_rangewait = 5; break; } pcl812_reset(dev); return 0; } static void pcl812_detach(struct comedi_device *dev) { pcl812_free_dma(dev); comedi_legacy_detach(dev); } static struct comedi_driver pcl812_driver = { .driver_name = "pcl812", .module = THIS_MODULE, .attach = pcl812_attach, .detach = pcl812_detach, .board_name = &boardtypes[0].name, .num_names = ARRAY_SIZE(boardtypes), .offset = sizeof(struct pcl812_board), }; module_comedi_driver(pcl812_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
jashasweejena/VibeKernel
arch/x86/ia32/ia32_aout.c
1093
13049
/* * a.out loader for x86-64 * * Copyright (C) 1991, 1992, 1996 Linus Torvalds * Hacked together by Andi Kleen */ #include <linux/module.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/a.out.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/binfmts.h> #include <linux/personality.h> #include <linux/init.h> #include <linux/jiffies.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/user32.h> #include <asm/ia32.h> #undef WARN_OLD #undef CORE_DUMP /* definitely broken */ static int load_aout_binary(struct linux_binprm *); static int load_aout_library(struct file *); #ifdef CORE_DUMP static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); /* * fill in the user structure for a core dump.. */ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) { u32 fs, gs; /* changed the size calculations - should hopefully work better. lbt */ dump->magic = CMAGIC; dump->start_code = 0; dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; dump->u_debugreg[0] = current->thread.debugreg0; dump->u_debugreg[1] = current->thread.debugreg1; dump->u_debugreg[2] = current->thread.debugreg2; dump->u_debugreg[3] = current->thread.debugreg3; dump->u_debugreg[4] = 0; dump->u_debugreg[5] = 0; dump->u_debugreg[6] = current->thread.debugreg6; dump->u_debugreg[7] = current->thread.debugreg7; if (dump->start_stack < 0xc0000000) { unsigned long tmp; tmp = (unsigned long) (0xc0000000 - dump->start_stack); dump->u_ssize = tmp >> PAGE_SHIFT; } dump->regs.bx = regs->bx; dump->regs.cx = regs->cx; dump->regs.dx = regs->dx; dump->regs.si = regs->si; dump->regs.di = regs->di; dump->regs.bp = regs->bp; dump->regs.ax = regs->ax; dump->regs.ds = current->thread.ds; dump->regs.es = current->thread.es; savesegment(fs, fs); dump->regs.fs = fs; savesegment(gs, gs); dump->regs.gs = gs; dump->regs.orig_ax = regs->orig_ax; dump->regs.ip = regs->ip; dump->regs.cs = regs->cs; dump->regs.flags = regs->flags; dump->regs.sp = regs->sp; dump->regs.ss = regs->ss; #if 1 /* FIXME */ dump->u_fpvalid = 0; #else dump->u_fpvalid = dump_fpu(regs, &dump->i387); #endif } #endif static struct linux_binfmt aout_format = { .module = THIS_MODULE, .load_binary = load_aout_binary, .load_shlib = load_aout_library, #ifdef CORE_DUMP .core_dump = aout_core_dump, #endif .min_coredump = PAGE_SIZE }; static void set_brk(unsigned long start, unsigned long end) { start = PAGE_ALIGN(start); end = PAGE_ALIGN(end); if (end <= start) return; vm_brk(start, end - start); } #ifdef CORE_DUMP /* * These are the only things you should do on a core-file: use only these * macros to write out all the necessary info. */ #include <linux/coredump.h> #define DUMP_WRITE(addr, nr) \ if (!dump_write(file, (void *)(addr), (nr))) \ goto end_coredump; #define DUMP_SEEK(offset) \ if (!dump_seek(file, offset)) \ goto end_coredump; #define START_DATA() (u.u_tsize << PAGE_SHIFT) #define START_STACK(u) (u.start_stack) /* * Routine writes a core dump image in the current directory. * Currently only a stub-function. * * Note that setuid/setgid files won't make a core-dump if the uid/gid * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" * field, which also makes sure the core-dumps won't be recursive if the * dumping of the process results in another error.. */ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit) { mm_segment_t fs; int has_dumped = 0; unsigned long dump_start, dump_size; struct user32 dump; fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; strncpy(dump.u_comm, current->comm, sizeof(current->comm)); dump.u_ar0 = offsetof(struct user32, regs); dump.signal = signr; dump_thread32(regs, &dump); /* * If the size of the dump file exceeds the rlimit, then see * what would happen if we wrote the stack, but not the data * area. */ if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit) dump.u_dsize = 0; /* Make sure we have enough room to write the stack and data areas. */ if ((dump.u_ssize + 1) * PAGE_SIZE > limit) dump.u_ssize = 0; /* make sure we actually have a data and stack area to dump */ set_fs(USER_DS); if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) dump.u_dsize = 0; if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) dump.u_ssize = 0; set_fs(KERNEL_DS); /* struct user */ DUMP_WRITE(&dump, sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ DUMP_SEEK(PAGE_SIZE - sizeof(dump)); /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ if (dump.u_dsize != 0) { dump_start = START_DATA(dump); dump_size = dump.u_dsize << PAGE_SHIFT; DUMP_WRITE(dump_start, dump_size); } /* Now prepare to dump the stack area */ if (dump.u_ssize != 0) { dump_start = START_STACK(dump); dump_size = dump.u_ssize << PAGE_SHIFT; DUMP_WRITE(dump_start, dump_size); } end_coredump: set_fs(fs); return has_dumped; } #endif /* * create_aout_tables() parses the env- and arg-strings in new user * memory and creates the pointer tables from them, and puts their * addresses on the "stack", returning the new stack pointer value. */ static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm) { u32 __user *argv, *envp, *sp; int argc = bprm->argc, envc = bprm->envc; sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p); sp -= envc+1; envp = sp; sp -= argc+1; argv = sp; put_user((unsigned long) envp, --sp); put_user((unsigned long) argv, --sp); put_user(argc, --sp); current->mm->arg_start = (unsigned long) p; while (argc-- > 0) { char c; put_user((u32)(unsigned long)p, argv++); do { get_user(c, p++); } while (c); } put_user(0, argv); current->mm->arg_end = current->mm->env_start = (unsigned long) p; while (envc-- > 0) { char c; put_user((u32)(unsigned long)p, envp++); do { get_user(c, p++); } while (c); } put_user(0, envp); current->mm->env_end = (unsigned long) p; return sp; } /* * These are the functions used to load a.out style executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_aout_binary(struct linux_binprm *bprm) { unsigned long error, fd_offset, rlim; struct pt_regs *regs = current_pt_regs(); struct exec ex; int retval; ex = *((struct exec *) bprm->buf); /* exec-header */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || i_size_read(file_inode(bprm->file)) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { return -ENOEXEC; } fd_offset = N_TXTOFF(ex); /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = rlimit(RLIMIT_DATA); if (rlim >= RLIM_INFINITY) rlim = ~0; if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) return retval; /* OK, This is the point of no return */ set_personality(PER_LINUX); set_personality_ia32(false); setup_new_exec(bprm); regs->cs = __USER32_CS; regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); current->mm->end_data = ex.a_data + (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = TASK_UNMAPPED_BASE; current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { /* Someone check-me: is this error path enough? */ send_sig(SIGKILL, current, 0); return retval; } install_exec_creds(bprm); if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; text_addr = N_TXTADDR(ex); map_size = ex.a_text+ex.a_data; error = vm_brk(text_addr & PAGE_MASK, map_size); if (error != (text_addr & PAGE_MASK)) { send_sig(SIGKILL, current, 0); return error; } error = read_code(bprm->file, text_addr, 32, ex.a_text + ex.a_data); if ((signed long)error < 0) { send_sig(SIGKILL, current, 0); return error; } } else { #ifdef WARN_OLD static unsigned long error_time, error_time2; if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && (N_MAGIC(ex) != NMAGIC) && time_after(jiffies, error_time2 + 5*HZ)) { printk(KERN_NOTICE "executable not page aligned\n"); error_time2 = jiffies; } if ((fd_offset & ~PAGE_MASK) != 0 && time_after(jiffies, error_time + 5*HZ)) { printk(KERN_WARNING "fd_offset is not page aligned. Please convert " "program: %s\n", bprm->file->f_path.dentry->d_name.name); error_time = jiffies; } #endif if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); read_code(bprm->file, N_TXTADDR(ex), fd_offset, ex.a_text+ex.a_data); goto beyond_if; } error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT, fd_offset); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT, fd_offset + ex.a_text); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } } beyond_if: set_binfmt(&aout_format); set_brk(current->mm->start_brk, current->mm->brk); current->mm->start_stack = (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); /* start thread */ loadsegment(fs, 0); loadsegment(ds, __USER32_DS); loadsegment(es, __USER32_DS); load_gs_index(0); (regs)->ip = ex.a_entry; (regs)->sp = current->mm->start_stack; (regs)->flags = 0x200; (regs)->cs = __USER32_CS; (regs)->ss = __USER32_DS; regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; set_fs(USER_DS); return 0; } static int load_aout_library(struct file *file) { unsigned long bss, start_addr, len, error; int retval; struct exec ex; retval = -ENOEXEC; error = kernel_read(file, 0, (char *) &ex, sizeof(ex)); if (error != sizeof(ex)) goto out; /* We come in here for the regular a.out style of shared libraries */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) || i_size_read(file_inode(file)) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { goto out; } if (N_FLAGS(ex)) goto out; /* For QMAGIC, the starting address is 0x20 into the page. We mask this off to get the starting address for the page */ start_addr = ex.a_entry & 0xfffff000; if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) { #ifdef WARN_OLD static unsigned long error_time; if (time_after(jiffies, error_time + 5*HZ)) { printk(KERN_WARNING "N_TXTOFF is not page aligned. Please convert " "library: %s\n", file->f_path.dentry->d_name.name); error_time = jiffies; } #endif vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); read_code(file, start_addr, N_TXTOFF(ex), ex.a_text + ex.a_data); retval = 0; goto out; } /* Now use mmap to map the library into memory. */ error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, N_TXTOFF(ex)); retval = error; if (error != start_addr) goto out; len = PAGE_ALIGN(ex.a_text + ex.a_data); bss = ex.a_text + ex.a_data + ex.a_bss; if (bss > len) { error = vm_brk(start_addr + len, bss - len); retval = error; if (error != start_addr + len) goto out; } retval = 0; out: return retval; } static int __init init_aout_binfmt(void) { register_binfmt(&aout_format); return 0; } static void __exit exit_aout_binfmt(void) { unregister_binfmt(&aout_format); } module_init(init_aout_binfmt); module_exit(exit_aout_binfmt); MODULE_LICENSE("GPL");
gpl-2.0
Dee-UK/D33_KK_RK3066
drivers/block/brd.c
2117
15590
/* * Ram backed block device driver. * * Copyright (C) 2007 Nick Piggin * Copyright (C) 2007 Novell Inc. * * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright * of their respective owners. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/radix-tree.h> #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ #include <linux/slab.h> #include <asm/uaccess.h> #define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) /* * Each block ramdisk device has a radix_tree brd_pages of pages that stores * the pages containing the block device's contents. A brd page's ->index is * its offset in PAGE_SIZE units. This is similar to, but in no way connected * with, the kernel's pagecache or buffer cache (which sit above our block * device). */ struct brd_device { int brd_number; struct request_queue *brd_queue; struct gendisk *brd_disk; struct list_head brd_list; /* * Backing store of pages and lock to protect it. This is the contents * of the block device. */ spinlock_t brd_lock; struct radix_tree_root brd_pages; }; /* * Look up and return a brd's page for a given sector. */ static DEFINE_MUTEX(brd_mutex); static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; /* * The page lifetime is protected by the fact that we have opened the * device node -- brd pages will never be deleted under us, so we * don't need any further locking or refcounting. * * This is strictly true for the radix-tree nodes as well (ie. we * don't actually need the rcu_read_lock()), however that is not a * documented feature of the radix-tree API so it is better to be * safe here (we don't have total exclusion from radix tree updates * here, only deletes). */ rcu_read_lock(); idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ page = radix_tree_lookup(&brd->brd_pages, idx); rcu_read_unlock(); BUG_ON(page && page->index != idx); return page; } /* * Look up and return a brd's page for a given sector. * If one does not exist, allocate an empty page, and insert that. Then * return it. */ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; gfp_t gfp_flags; page = brd_lookup_page(brd, sector); if (page) return page; /* * Must use NOIO because we don't want to recurse back into the * block or filesystem layers from page reclaim. * * Cannot support XIP and highmem, because our ->direct_access * routine for XIP must return memory that is always addressable. * If XIP was reworked to use pfns and kmap throughout, this * restriction might be able to be lifted. */ gfp_flags = GFP_NOIO | __GFP_ZERO; #ifndef CONFIG_BLK_DEV_XIP gfp_flags |= __GFP_HIGHMEM; #endif page = alloc_page(gfp_flags); if (!page) return NULL; if (radix_tree_preload(GFP_NOIO)) { __free_page(page); return NULL; } spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); } else page->index = idx; spin_unlock(&brd->brd_lock); radix_tree_preload_end(); return page; } static void brd_free_page(struct brd_device *brd, sector_t sector) { struct page *page; pgoff_t idx; spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; page = radix_tree_delete(&brd->brd_pages, idx); spin_unlock(&brd->brd_lock); if (page) __free_page(page); } static void brd_zero_page(struct brd_device *brd, sector_t sector) { struct page *page; page = brd_lookup_page(brd, sector); if (page) clear_highpage(page); } /* * Free all backing store pages and radix tree. This must only be called when * there are no other users of the device. */ #define FREE_BATCH 16 static void brd_free_pages(struct brd_device *brd) { unsigned long pos = 0; struct page *pages[FREE_BATCH]; int nr_pages; do { int i; nr_pages = radix_tree_gang_lookup(&brd->brd_pages, (void **)pages, pos, FREE_BATCH); for (i = 0; i < nr_pages; i++) { void *ret; BUG_ON(pages[i]->index < pos); pos = pages[i]->index; ret = radix_tree_delete(&brd->brd_pages, pos); BUG_ON(!ret || ret != pages[i]); __free_page(pages[i]); } pos++; /* * This assumes radix_tree_gang_lookup always returns as * many pages as possible. If the radix-tree code changes, * so will this have to. */ } while (nr_pages == FREE_BATCH); } /* * copy_to_brd_setup must be called before copy_to_brd. It may sleep. */ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) { unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); if (!brd_insert_page(brd, sector)) return -ENOMEM; if (copy < n) { sector += copy >> SECTOR_SHIFT; if (!brd_insert_page(brd, sector)) return -ENOMEM; } return 0; } static void discard_from_brd(struct brd_device *brd, sector_t sector, size_t n) { while (n >= PAGE_SIZE) { /* * Don't want to actually discard pages here because * re-allocating the pages can result in writeback * deadlocks under heavy load. */ if (0) brd_free_page(brd, sector); else brd_zero_page(brd, sector); sector += PAGE_SIZE >> SECTOR_SHIFT; n -= PAGE_SIZE; } } /* * Copy n bytes from src to the brd starting at sector. Does not sleep. */ static void copy_to_brd(struct brd_device *brd, const void *src, sector_t sector, size_t n) { struct page *page; void *dst; unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); page = brd_lookup_page(brd, sector); BUG_ON(!page); dst = kmap_atomic(page, KM_USER1); memcpy(dst + offset, src, copy); kunmap_atomic(dst, KM_USER1); if (copy < n) { src += copy; sector += copy >> SECTOR_SHIFT; copy = n - copy; page = brd_lookup_page(brd, sector); BUG_ON(!page); dst = kmap_atomic(page, KM_USER1); memcpy(dst, src, copy); kunmap_atomic(dst, KM_USER1); } } /* * Copy n bytes to dst from the brd starting at sector. Does not sleep. */ static void copy_from_brd(void *dst, struct brd_device *brd, sector_t sector, size_t n) { struct page *page; void *src; unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); page = brd_lookup_page(brd, sector); if (page) { src = kmap_atomic(page, KM_USER1); memcpy(dst, src + offset, copy); kunmap_atomic(src, KM_USER1); } else memset(dst, 0, copy); if (copy < n) { dst += copy; sector += copy >> SECTOR_SHIFT; copy = n - copy; page = brd_lookup_page(brd, sector); if (page) { src = kmap_atomic(page, KM_USER1); memcpy(dst, src, copy); kunmap_atomic(src, KM_USER1); } else memset(dst, 0, copy); } } /* * Process a single bvec of a bio. */ static int brd_do_bvec(struct brd_device *brd, struct page *page, unsigned int len, unsigned int off, int rw, sector_t sector) { void *mem; int err = 0; if (rw != READ) { err = copy_to_brd_setup(brd, sector, len); if (err) goto out; } mem = kmap_atomic(page, KM_USER0); if (rw == READ) { copy_from_brd(mem + off, brd, sector, len); flush_dcache_page(page); } else { flush_dcache_page(page); copy_to_brd(brd, mem + off, sector, len); } kunmap_atomic(mem, KM_USER0); out: return err; } static int brd_make_request(struct request_queue *q, struct bio *bio) { struct block_device *bdev = bio->bi_bdev; struct brd_device *brd = bdev->bd_disk->private_data; int rw; struct bio_vec *bvec; sector_t sector; int i; int err = -EIO; sector = bio->bi_sector; if (sector + (bio->bi_size >> SECTOR_SHIFT) > get_capacity(bdev->bd_disk)) goto out; if (unlikely(bio->bi_rw & REQ_DISCARD)) { err = 0; discard_from_brd(brd, sector, bio->bi_size); goto out; } rw = bio_rw(bio); if (rw == READA) rw = READ; bio_for_each_segment(bvec, bio, i) { unsigned int len = bvec->bv_len; err = brd_do_bvec(brd, bvec->bv_page, len, bvec->bv_offset, rw, sector); if (err) break; sector += len >> SECTOR_SHIFT; } out: bio_endio(bio, err); return 0; } #ifdef CONFIG_BLK_DEV_XIP static int brd_direct_access(struct block_device *bdev, sector_t sector, void **kaddr, unsigned long *pfn) { struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; if (!brd) return -ENODEV; if (sector & (PAGE_SECTORS-1)) return -EINVAL; if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk)) return -ERANGE; page = brd_insert_page(brd, sector); if (!page) return -ENOMEM; *kaddr = page_address(page); *pfn = page_to_pfn(page); return 0; } #endif static int brd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int error; struct brd_device *brd = bdev->bd_disk->private_data; if (cmd != BLKFLSBUF) return -ENOTTY; /* * ram device BLKFLSBUF has special semantics, we want to actually * release and destroy the ramdisk data. */ mutex_lock(&brd_mutex); mutex_lock(&bdev->bd_mutex); error = -EBUSY; if (bdev->bd_openers <= 1) { /* * Invalidate the cache first, so it isn't written * back to the device. * * Another thread might instantiate more buffercache here, * but there is not much we can do to close that race. */ invalidate_bh_lrus(); truncate_inode_pages(bdev->bd_inode->i_mapping, 0); brd_free_pages(brd); error = 0; } mutex_unlock(&bdev->bd_mutex); mutex_unlock(&brd_mutex); return error; } static const struct block_device_operations brd_fops = { .owner = THIS_MODULE, .ioctl = brd_ioctl, #ifdef CONFIG_BLK_DEV_XIP .direct_access = brd_direct_access, #endif }; /* * And now the modules code and kernel interface. */ static int rd_nr; int rd_size = CONFIG_BLK_DEV_RAM_SIZE; static int max_part; static int part_shift; module_param(rd_nr, int, S_IRUGO); MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); module_param(rd_size, int, S_IRUGO); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); module_param(max_part, int, S_IRUGO); MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); MODULE_ALIAS("rd"); #ifndef MODULE /* Legacy boot options - nonmodular */ static int __init ramdisk_size(char *str) { rd_size = simple_strtol(str, NULL, 0); return 1; } __setup("ramdisk_size=", ramdisk_size); #endif /* * The device scheme is derived from loop.c. Keep them in synch where possible * (should share code eventually). */ static LIST_HEAD(brd_devices); static DEFINE_MUTEX(brd_devices_mutex); static struct brd_device *brd_alloc(int i) { struct brd_device *brd; struct gendisk *disk; brd = kzalloc(sizeof(*brd), GFP_KERNEL); if (!brd) goto out; brd->brd_number = i; spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); brd->brd_queue = blk_alloc_queue(GFP_KERNEL); if (!brd->brd_queue) goto out_free_dev; blk_queue_make_request(brd->brd_queue, brd_make_request); blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); brd->brd_queue->limits.discard_granularity = PAGE_SIZE; brd->brd_queue->limits.max_discard_sectors = UINT_MAX; brd->brd_queue->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); disk = brd->brd_disk = alloc_disk(1 << part_shift); if (!disk) goto out_free_queue; disk->major = RAMDISK_MAJOR; disk->first_minor = i << part_shift; disk->fops = &brd_fops; disk->private_data = brd; disk->queue = brd->brd_queue; disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); return brd; out_free_queue: blk_cleanup_queue(brd->brd_queue); out_free_dev: kfree(brd); out: return NULL; } static void brd_free(struct brd_device *brd) { put_disk(brd->brd_disk); blk_cleanup_queue(brd->brd_queue); brd_free_pages(brd); kfree(brd); } static struct brd_device *brd_init_one(int i) { struct brd_device *brd; list_for_each_entry(brd, &brd_devices, brd_list) { if (brd->brd_number == i) goto out; } brd = brd_alloc(i); if (brd) { add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } out: return brd; } static void brd_del_one(struct brd_device *brd) { list_del(&brd->brd_list); del_gendisk(brd->brd_disk); brd_free(brd); } static struct kobject *brd_probe(dev_t dev, int *part, void *data) { struct brd_device *brd; struct kobject *kobj; mutex_lock(&brd_devices_mutex); brd = brd_init_one(MINOR(dev) >> part_shift); kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); mutex_unlock(&brd_devices_mutex); *part = 0; return kobj; } static int __init brd_init(void) { int i, nr; unsigned long range; struct brd_device *brd, *next; /* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. * However, this will not work well with user space tool that doesn't * know about such "feature". In order to not break any existing * tool, we do the following: * * (1) if rd_nr is specified, create that many upfront, and this * also becomes a hard limit. * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT * (default 16) rd device on module load, user can further * extend brd device by create dev node themselves and have * kernel automatically instantiate actual device on-demand. */ part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can decide correct minor number * if [s]he want to create more devices. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) return -EINVAL; if (rd_nr > 1UL << (MINORBITS - part_shift)) return -EINVAL; if (rd_nr) { nr = rd_nr; range = rd_nr << part_shift; } else { nr = CONFIG_BLK_DEV_RAM_COUNT; range = 1UL << MINORBITS; } if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) return -EIO; for (i = 0; i < nr; i++) { brd = brd_alloc(i); if (!brd) goto out_free; list_add_tail(&brd->brd_list, &brd_devices); } /* point of no return */ list_for_each_entry(brd, &brd_devices, brd_list) add_disk(brd->brd_disk); blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range, THIS_MODULE, brd_probe, NULL, NULL); printk(KERN_INFO "brd: module loaded\n"); return 0; out_free: list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { list_del(&brd->brd_list); brd_free(brd); } unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); return -ENOMEM; } static void __exit brd_exit(void) { unsigned long range; struct brd_device *brd, *next; range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS; list_for_each_entry_safe(brd, next, &brd_devices, brd_list) brd_del_one(brd); blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range); unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); } module_init(brd_init); module_exit(brd_exit);
gpl-2.0
CPA-Poke/android_kernel_moto_shamu
fs/ubifs/io.c
2117
34136
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter * Zoltan Sogor */ /* * This file implements UBIFS I/O subsystem which provides various I/O-related * helper functions (reading/writing/checking/validating nodes) and implements * write-buffering support. Write buffers help to save space which otherwise * would have been wasted for padding to the nearest minimal I/O unit boundary. * Instead, data first goes to the write-buffer and is flushed when the * buffer is full or when it is not used for some time (by timer). This is * similar to the mechanism is used by JFFS2. * * UBIFS distinguishes between minimum write size (@c->min_io_size) and maximum * write size (@c->max_write_size). The latter is the maximum amount of bytes * the underlying flash is able to program at a time, and writing in * @c->max_write_size units should presumably be faster. Obviously, * @c->min_io_size <= @c->max_write_size. Write-buffers are of * @c->max_write_size bytes in size for maximum performance. However, when a * write-buffer is flushed, only the portion of it (aligned to @c->min_io_size * boundary) which contains data is written, not the whole write-buffer, * because this is more space-efficient. * * This optimization adds few complications to the code. Indeed, on the one * hand, we want to write in optimal @c->max_write_size bytes chunks, which * also means aligning writes at the @c->max_write_size bytes offsets. On the * other hand, we do not want to waste space when synchronizing the write * buffer, so during synchronization we writes in smaller chunks. And this makes * the next write offset to be not aligned to @c->max_write_size bytes. So the * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned * to @c->max_write_size bytes again. We do this by temporarily shrinking * write-buffer size (@wbuf->size). * * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by * mutexes defined inside these objects. Since sometimes upper-level code * has to lock the write-buffer (e.g. journal space reservation code), many * functions related to write-buffers have "nolock" suffix which means that the * caller has to lock the write-buffer before calling this function. * * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not * aligned, UBIFS starts the next node from the aligned address, and the padded * bytes may contain any rubbish. In other words, UBIFS does not put padding * bytes in those small gaps. Common headers of nodes store real node lengths, * not aligned lengths. Indexing nodes also store real lengths in branches. * * UBIFS uses padding when it pads to the next min. I/O unit. In this case it * uses padding nodes or padding bytes, if the padding node does not fit. * * All UBIFS nodes are protected by CRC checksums and UBIFS checks CRC when * they are read from the flash media. */ #include <linux/crc32.h> #include <linux/slab.h> #include "ubifs.h" /** * ubifs_ro_mode - switch UBIFS to read read-only mode. * @c: UBIFS file-system description object * @err: error code which is the reason of switching to R/O mode */ void ubifs_ro_mode(struct ubifs_info *c, int err) { if (!c->ro_error) { c->ro_error = 1; c->no_chk_data_crc = 0; c->vfs_sb->s_flags |= MS_RDONLY; ubifs_warn("switched to read-only mode, error %d", err); dump_stack(); } } /* * Below are simple wrappers over UBI I/O functions which include some * additional checks and UBIFS debugging stuff. See corresponding UBI function * for more information. */ int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, int len, int even_ebadmsg) { int err; err = ubi_read(c->ubi, lnum, buf, offs, len); /* * In case of %-EBADMSG print the error message only if the * @even_ebadmsg is true. */ if (err && (err != -EBADMSG || even_ebadmsg)) { ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", len, lnum, offs, err); dump_stack(); } return err; } int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, int len) { int err; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_write(c->ubi, lnum, buf, offs, len); else err = dbg_leb_write(c, lnum, buf, offs, len); if (err) { ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", len, lnum, offs, err); ubifs_ro_mode(c, err); dump_stack(); } return err; } int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) { int err; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_change(c->ubi, lnum, buf, len); else err = dbg_leb_change(c, lnum, buf, len); if (err) { ubifs_err("changing %d bytes in LEB %d failed, error %d", len, lnum, err); ubifs_ro_mode(c, err); dump_stack(); } return err; } int ubifs_leb_unmap(struct ubifs_info *c, int lnum) { int err; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_unmap(c->ubi, lnum); else err = dbg_leb_unmap(c, lnum); if (err) { ubifs_err("unmap LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } return err; } int ubifs_leb_map(struct ubifs_info *c, int lnum) { int err; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; if (!dbg_is_tst_rcvry(c)) err = ubi_leb_map(c->ubi, lnum); else err = dbg_leb_map(c, lnum); if (err) { ubifs_err("mapping LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); dump_stack(); } return err; } int ubifs_is_mapped(const struct ubifs_info *c, int lnum) { int err; err = ubi_is_mapped(c->ubi, lnum); if (err < 0) { ubifs_err("ubi_is_mapped failed for LEB %d, error %d", lnum, err); dump_stack(); } return err; } /** * ubifs_check_node - check node. * @c: UBIFS file-system description object * @buf: node to check * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @quiet: print no messages * @must_chk_crc: indicates whether to always check the CRC * * This function checks node magic number and CRC checksum. This function also * validates node length to prevent UBIFS from becoming crazy when an attacker * feeds it a file-system image with incorrect nodes. For example, too large * node length in the common header could cause UBIFS to read memory outside of * allocated buffer when checking the CRC checksum. * * This function may skip data nodes CRC checking if @c->no_chk_data_crc is * true, which is controlled by corresponding UBIFS mount option. However, if * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC * is checked. This is because during mounting or re-mounting from R/O mode to * R/W mode we may read journal nodes (when replying the journal or doing the * recovery) and the journal nodes may potentially be corrupted, so checking is * required. * * This function returns zero in case of success and %-EUCLEAN in case of bad * CRC or magic. */ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { int err = -EINVAL, type, node_len; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf; ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); magic = le32_to_cpu(ch->magic); if (magic != UBIFS_NODE_MAGIC) { if (!quiet) ubifs_err("bad magic %#08x, expected %#08x", magic, UBIFS_NODE_MAGIC); err = -EUCLEAN; goto out; } type = ch->node_type; if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { if (!quiet) ubifs_err("bad node type %d", type); goto out; } node_len = le32_to_cpu(ch->len); if (node_len + offs > c->leb_size) goto out_len; if (c->ranges[type].max_len == 0) { if (node_len != c->ranges[type].len) goto out_len; } else if (node_len < c->ranges[type].min_len || node_len > c->ranges[type].max_len) goto out_len; if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && !c->remounting_rw && c->no_chk_data_crc) return 0; crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) { if (!quiet) ubifs_err("bad CRC: calculated %#08x, read %#08x", crc, node_crc); err = -EUCLEAN; goto out; } return 0; out_len: if (!quiet) ubifs_err("bad node length %d", node_len); out: if (!quiet) { ubifs_err("bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); } return err; } /** * ubifs_pad - pad flash space. * @c: UBIFS file-system description object * @buf: buffer to put padding to * @pad: how many bytes to pad * * The flash media obliges us to write only in chunks of %c->min_io_size and * when we have to write less data we add padding node to the write-buffer and * pad it to the next minimal I/O unit's boundary. Padding nodes help when the * media is being scanned. If the amount of wasted space is not enough to fit a * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes * pattern (%UBIFS_PADDING_BYTE). * * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is * used. */ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) { uint32_t crc; ubifs_assert(pad >= 0 && !(pad & 7)); if (pad >= UBIFS_PAD_NODE_SZ) { struct ubifs_ch *ch = buf; struct ubifs_pad_node *pad_node = buf; ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->node_type = UBIFS_PAD_NODE; ch->group_type = UBIFS_NO_NODE_GROUP; ch->padding[0] = ch->padding[1] = 0; ch->sqnum = 0; ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ); pad -= UBIFS_PAD_NODE_SZ; pad_node->pad_len = cpu_to_le32(pad); crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8); ch->crc = cpu_to_le32(crc); memset(buf + UBIFS_PAD_NODE_SZ, 0, pad); } else if (pad > 0) /* Too little space, padding node won't fit */ memset(buf, UBIFS_PADDING_BYTE, pad); } /** * next_sqnum - get next sequence number. * @c: UBIFS file-system description object */ static unsigned long long next_sqnum(struct ubifs_info *c) { unsigned long long sqnum; spin_lock(&c->cnt_lock); sqnum = ++c->max_sqnum; spin_unlock(&c->cnt_lock); if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { if (sqnum >= SQNUM_WATERMARK) { ubifs_err("sequence number overflow %llu, end of life", sqnum); ubifs_ro_mode(c, -EINVAL); } ubifs_warn("running out of sequence numbers, end of life soon"); } return sqnum; } /** * ubifs_prepare_node - prepare node to be written to flash. * @c: UBIFS file-system description object * @node: the node to pad * @len: node length * @pad: if the buffer has to be padded * * This function prepares node at @node to be written to the media - it * calculates node CRC, fills the common header, and adds proper padding up to * the next minimum I/O unit if @pad is not zero. */ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) { uint32_t crc; struct ubifs_ch *ch = node; unsigned long long sqnum = next_sqnum(c); ubifs_assert(len >= UBIFS_CH_SZ); ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->len = cpu_to_le32(len); ch->group_type = UBIFS_NO_NODE_GROUP; ch->sqnum = cpu_to_le64(sqnum); ch->padding[0] = ch->padding[1] = 0; crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); ch->crc = cpu_to_le32(crc); if (pad) { len = ALIGN(len, 8); pad = ALIGN(len, c->min_io_size) - len; ubifs_pad(c, node + len, pad); } } /** * ubifs_prep_grp_node - prepare node of a group to be written to flash. * @c: UBIFS file-system description object * @node: the node to pad * @len: node length * @last: indicates the last node of the group * * This function prepares node at @node to be written to the media - it * calculates node CRC and fills the common header. */ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) { uint32_t crc; struct ubifs_ch *ch = node; unsigned long long sqnum = next_sqnum(c); ubifs_assert(len >= UBIFS_CH_SZ); ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->len = cpu_to_le32(len); if (last) ch->group_type = UBIFS_LAST_OF_NODE_GROUP; else ch->group_type = UBIFS_IN_NODE_GROUP; ch->sqnum = cpu_to_le64(sqnum); ch->padding[0] = ch->padding[1] = 0; crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); ch->crc = cpu_to_le32(crc); } /** * wbuf_timer_callback - write-buffer timer callback function. * @data: timer data (write-buffer descriptor) * * This function is called when the write-buffer timer expires. */ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) { struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); dbg_io("jhead %s", dbg_jhead(wbuf->jhead)); wbuf->need_sync = 1; wbuf->c->need_wbuf_sync = 1; ubifs_wake_up_bgt(wbuf->c); return HRTIMER_NORESTART; } /** * new_wbuf_timer - start new write-buffer timer. * @wbuf: write-buffer descriptor */ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { ubifs_assert(!hrtimer_active(&wbuf->timer)); if (wbuf->no_timer) return; dbg_io("set timer for jhead %s, %llu-%llu millisecs", dbg_jhead(wbuf->jhead), div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, USEC_PER_SEC)); hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, HRTIMER_MODE_REL); } /** * cancel_wbuf_timer - cancel write-buffer timer. * @wbuf: write-buffer descriptor */ static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { if (wbuf->no_timer) return; wbuf->need_sync = 0; hrtimer_cancel(&wbuf->timer); } /** * ubifs_wbuf_sync_nolock - synchronize write-buffer. * @wbuf: write-buffer to synchronize * * This function synchronizes write-buffer @buf and returns zero in case of * success or a negative error code in case of failure. * * Note, although write-buffers are of @c->max_write_size, this function does * not necessarily writes all @c->max_write_size bytes to the flash. Instead, * if the write-buffer is only partially filled with data, only the used part * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized. * This way we waste less space. */ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) { struct ubifs_info *c = wbuf->c; int err, dirt, sync_len; cancel_wbuf_timer_nolock(wbuf); if (!wbuf->used || wbuf->lnum == -1) /* Write-buffer is empty or not seeked */ return 0; dbg_io("LEB %d:%d, %d bytes, jhead %s", wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); ubifs_assert(!(wbuf->avail & 7)); ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size); ubifs_assert(wbuf->size >= c->min_io_size); ubifs_assert(wbuf->size <= c->max_write_size); ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(!c->ro_media && !c->ro_mount); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); if (c->ro_error) return -EROFS; /* * Do not write whole write buffer but write only the minimum necessary * amount of min. I/O units. */ sync_len = ALIGN(wbuf->used, c->min_io_size); dirt = sync_len - wbuf->used; if (dirt) ubifs_pad(c, wbuf->buf + wbuf->used, dirt); err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); if (err) return err; spin_lock(&wbuf->lock); wbuf->offs += sync_len; /* * Now @wbuf->offs is not necessarily aligned to @c->max_write_size. * But our goal is to optimize writes and make sure we write in * @c->max_write_size chunks and to @c->max_write_size-aligned offset. * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make * sure that @wbuf->offs + @wbuf->size is aligned to * @c->max_write_size. This way we make sure that after next * write-buffer flush we are again at the optimal offset (aligned to * @c->max_write_size). */ if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; else if (wbuf->offs & (c->max_write_size - 1)) wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; wbuf->used = 0; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); if (wbuf->sync_callback) err = wbuf->sync_callback(c, wbuf->lnum, c->leb_size - wbuf->offs, dirt); return err; } /** * ubifs_wbuf_seek_nolock - seek write-buffer. * @wbuf: write-buffer * @lnum: logical eraseblock number to seek to * @offs: logical eraseblock offset to seek to * * This function targets the write-buffer to logical eraseblock @lnum:@offs. * The write-buffer has to be empty. Returns zero in case of success and a * negative error code in case of failure. */ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); ubifs_assert(lnum != wbuf->lnum); ubifs_assert(wbuf->used == 0); spin_lock(&wbuf->lock); wbuf->lnum = lnum; wbuf->offs = offs; if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; else if (wbuf->offs & (c->max_write_size - 1)) wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; wbuf->used = 0; spin_unlock(&wbuf->lock); return 0; } /** * ubifs_bg_wbufs_sync - synchronize write-buffers. * @c: UBIFS file-system description object * * This function is called by background thread to synchronize write-buffers. * Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_bg_wbufs_sync(struct ubifs_info *c) { int err, i; ubifs_assert(!c->ro_media && !c->ro_mount); if (!c->need_wbuf_sync) return 0; c->need_wbuf_sync = 0; if (c->ro_error) { err = -EROFS; goto out_timers; } dbg_io("synchronize"); for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; cond_resched(); /* * If the mutex is locked then wbuf is being changed, so * synchronization is not necessary. */ if (mutex_is_locked(&wbuf->io_mutex)) continue; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (!wbuf->need_sync) { mutex_unlock(&wbuf->io_mutex); continue; } err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { ubifs_err("cannot sync write-buffer, error %d", err); ubifs_ro_mode(c, err); goto out_timers; } } return 0; out_timers: /* Cancel all timers to prevent repeated errors */ for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); cancel_wbuf_timer_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); } return err; } /** * ubifs_wbuf_write_nolock - write data to flash via write-buffer. * @wbuf: write-buffer * @buf: node to write * @len: node length * * This function writes data to flash via write-buffer @wbuf. This means that * the last piece of the node won't reach the flash media immediately if it * does not take whole max. write unit (@c->max_write_size). Instead, the node * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or * because more data are appended to the write-buffer). * * This function returns zero in case of success and a negative error code in * case of failure. If the node cannot be written because there is no more * space in this logical eraseblock, %-ENOSPC is returned. */ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) { struct ubifs_info *c = wbuf->c; int err, written, n, aligned_len = ALIGN(len, 8); dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, dbg_ntype(((struct ubifs_ch *)buf)->node_type), dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size); ubifs_assert(wbuf->size >= c->min_io_size); ubifs_assert(wbuf->size <= c->max_write_size); ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->space_fixup); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { err = -ENOSPC; goto out; } cancel_wbuf_timer_nolock(wbuf); if (c->ro_error) return -EROFS; if (aligned_len <= wbuf->avail) { /* * The node is not very large and fits entirely within * write-buffer. */ memcpy(wbuf->buf + wbuf->used, buf, len); if (aligned_len == wbuf->avail) { dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, wbuf->size); if (err) goto out; spin_lock(&wbuf->lock); wbuf->offs += wbuf->size; if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; else wbuf->size = c->leb_size - wbuf->offs; wbuf->avail = wbuf->size; wbuf->used = 0; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); } else { spin_lock(&wbuf->lock); wbuf->avail -= aligned_len; wbuf->used += aligned_len; spin_unlock(&wbuf->lock); } goto exit; } written = 0; if (wbuf->used) { /* * The node is large enough and does not fit entirely within * current available space. We have to fill and flush * write-buffer and switch to the next max. write unit. */ dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, wbuf->size); if (err) goto out; wbuf->offs += wbuf->size; len -= wbuf->avail; aligned_len -= wbuf->avail; written += wbuf->avail; } else if (wbuf->offs & (c->max_write_size - 1)) { /* * The write-buffer offset is not aligned to * @c->max_write_size and @wbuf->size is less than * @c->max_write_size. Write @wbuf->size bytes to make sure the * following writes are done in optimal @c->max_write_size * chunks. */ dbg_io("write %d bytes to LEB %d:%d", wbuf->size, wbuf->lnum, wbuf->offs); err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, wbuf->size); if (err) goto out; wbuf->offs += wbuf->size; len -= wbuf->size; aligned_len -= wbuf->size; written += wbuf->size; } /* * The remaining data may take more whole max. write units, so write the * remains multiple to max. write unit size directly to the flash media. * We align node length to 8-byte boundary because we anyway flash wbuf * if the remaining space is less than 8 bytes. */ n = aligned_len >> c->max_write_shift; if (n) { n <<= c->max_write_shift; dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, wbuf->offs); err = ubifs_leb_write(c, wbuf->lnum, buf + written, wbuf->offs, n); if (err) goto out; wbuf->offs += n; aligned_len -= n; len -= n; written += n; } spin_lock(&wbuf->lock); if (aligned_len) /* * And now we have what's left and what does not take whole * max. write unit, so write it to the write-buffer and we are * done. */ memcpy(wbuf->buf, buf + written, len); if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; else wbuf->size = c->leb_size - wbuf->offs; wbuf->avail = wbuf->size - aligned_len; wbuf->used = aligned_len; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); exit: if (wbuf->sync_callback) { int free = c->leb_size - wbuf->offs - wbuf->used; err = wbuf->sync_callback(c, wbuf->lnum, free, 0); if (err) goto out; } if (wbuf->used) new_wbuf_timer_nolock(wbuf); return 0; out: ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", len, wbuf->lnum, wbuf->offs, err); ubifs_dump_node(c, buf); dump_stack(); ubifs_dump_leb(c, wbuf->lnum); return err; } /** * ubifs_write_node - write node to the media. * @c: UBIFS file-system description object * @buf: the node to write * @len: node length * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * * This function automatically fills node magic number, assigns sequence * number, and calculates node CRC checksum. The length of the @buf buffer has * to be aligned to the minimal I/O unit size. This function automatically * appends padding node and padding bytes if needed. Returns zero in case of * success and a negative error code in case of failure. */ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, int offs) { int err, buf_len = ALIGN(len, c->min_io_size); dbg_io("LEB %d:%d, %s, length %d (aligned %d)", lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, buf_len); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->space_fixup); if (c->ro_error) return -EROFS; ubifs_prepare_node(c, buf, len, 1); err = ubifs_leb_write(c, lnum, buf, offs, buf_len); if (err) ubifs_dump_node(c, buf); return err; } /** * ubifs_read_node_wbuf - read node from the media or write-buffer. * @wbuf: wbuf to check for un-written data * @buf: buffer to read to * @type: node type * @len: node length * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * * This function reads a node of known type and length, checks it and stores * in @buf. If the node partially or fully sits in the write-buffer, this * function takes data from the buffer, otherwise it reads the flash media. * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative * error code in case of failure. */ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; int err, rlen, overlap; struct ubifs_ch *ch = buf; dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); spin_lock(&wbuf->lock); overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); return ubifs_read_node(c, buf, type, len, lnum, offs); } /* Don't read under wbuf */ rlen = wbuf->offs - offs; if (rlen < 0) rlen = 0; /* Copy the rest from the write-buffer */ memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); spin_unlock(&wbuf->lock); if (rlen > 0) { /* Read everything that goes before write-buffer */ err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); if (err && err != -EBADMSG) return err; } if (type != ch->node_type) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; } rlen = le32_to_cpu(ch->len); if (rlen != len) { ubifs_err("bad node length %d, expected %d", rlen, len); goto out; } return 0; out: ubifs_err("bad node at LEB %d:%d", lnum, offs); ubifs_dump_node(c, buf); dump_stack(); return -EINVAL; } /** * ubifs_read_node - read node. * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type * @len: node length (not aligned) * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * * This function reads a node of known type and and length, checks it and * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched * and a negative error code in case of failure. */ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) { int err, l; struct ubifs_ch *ch = buf; dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); err = ubifs_leb_read(c, lnum, buf, offs, len, 0); if (err && err != -EBADMSG) return err; if (type != ch->node_type) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; } l = le32_to_cpu(ch->len); if (l != len) { ubifs_err("bad node length %d, expected %d", l, len); goto out; } return 0; out: ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, ubi_is_mapped(c->ubi, lnum)); ubifs_dump_node(c, buf); dump_stack(); return -EINVAL; } /** * ubifs_wbuf_init - initialize write-buffer. * @c: UBIFS file-system description object * @wbuf: write-buffer to initialize * * This function initializes write-buffer. Returns zero in case of success * %-ENOMEM in case of failure. */ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) { size_t size; wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); if (!wbuf->buf) return -ENOMEM; size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); wbuf->inodes = kmalloc(size, GFP_KERNEL); if (!wbuf->inodes) { kfree(wbuf->buf); wbuf->buf = NULL; return -ENOMEM; } wbuf->used = 0; wbuf->lnum = wbuf->offs = -1; /* * If the LEB starts at the max. write size aligned address, then * write-buffer size has to be set to @c->max_write_size. Otherwise, * set it to something smaller so that it ends at the closest max. * write size boundary. */ size = c->max_write_size - (c->leb_start % c->max_write_size); wbuf->avail = wbuf->size = size; wbuf->sync_callback = NULL; mutex_init(&wbuf->io_mutex); spin_lock_init(&wbuf->lock); wbuf->c = c; wbuf->next_ino = 0; hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wbuf->timer.function = wbuf_timer_callback_nolock; wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0); wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT; wbuf->delta *= 1000000000ULL; ubifs_assert(wbuf->delta <= ULONG_MAX); return 0; } /** * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. * @wbuf: the write-buffer where to add * @inum: the inode number * * This function adds an inode number to the inode array of the write-buffer. */ void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum) { if (!wbuf->buf) /* NOR flash or something similar */ return; spin_lock(&wbuf->lock); if (wbuf->used) wbuf->inodes[wbuf->next_ino++] = inum; spin_unlock(&wbuf->lock); } /** * wbuf_has_ino - returns if the wbuf contains data from the inode. * @wbuf: the write-buffer * @inum: the inode number * * This function returns with %1 if the write-buffer contains some data from the * given inode otherwise it returns with %0. */ static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum) { int i, ret = 0; spin_lock(&wbuf->lock); for (i = 0; i < wbuf->next_ino; i++) if (inum == wbuf->inodes[i]) { ret = 1; break; } spin_unlock(&wbuf->lock); return ret; } /** * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode. * @c: UBIFS file-system description object * @inode: inode to synchronize * * This function synchronizes write-buffers which contain nodes belonging to * @inode. Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) { int i, err = 0; for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; if (i == GCHD) /* * GC head is special, do not look at it. Even if the * head contains something related to this inode, it is * a _copy_ of corresponding on-flash node which sits * somewhere else. */ continue; if (!wbuf_has_ino(wbuf, inode->i_ino)) continue; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (wbuf_has_ino(wbuf, inode->i_ino)) err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { ubifs_ro_mode(c, err); return err; } } return 0; }
gpl-2.0
wetek-enigma/linux-wetek-3.10.y
fs/nfsd/nfs3xdr.c
2373
27389
/* * XDR support for nfsd/protocol version 3. * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> * * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()! */ #include <linux/namei.h> #include <linux/sunrpc/svc_xprt.h> #include "xdr3.h" #include "auth.h" #include "netns.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs3_ftypes[] = { NF3NON, NF3FIFO, NF3CHR, NF3BAD, NF3DIR, NF3BAD, NF3BLK, NF3BAD, NF3REG, NF3BAD, NF3LNK, NF3BAD, NF3SOCK, NF3BAD, NF3LNK, NF3BAD, }; /* * XDR functions for basic NFS types */ static __be32 * encode_time3(__be32 *p, struct timespec *time) { *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); return p; } static __be32 * decode_time3(__be32 *p, struct timespec *time) { time->tv_sec = ntohl(*p++); time->tv_nsec = ntohl(*p++); return p; } static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size; fh_init(fhp, NFS3_FHSIZE); size = ntohl(*p++); if (size > NFS3_FHSIZE) return NULL; memcpy(&fhp->fh_handle.fh_base, p, size); fhp->fh_handle.fh_size = size; return p + XDR_QUADLEN(size); } /* Helper function for NFSv3 ACL code */ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size = fhp->fh_handle.fh_size; *p++ = htonl(size); if (size) p[XDR_QUADLEN(size)-1]=0; memcpy(p, &fhp->fh_handle.fh_base, size); return p + XDR_QUADLEN(size); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; iap->ia_valid = 0; if (*p++) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = ntohl(*p++); } if (*p++) { iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++)); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if (*p++) { iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++)); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } if (*p++) { u64 newsize; iap->ia_valid |= ATTR_SIZE; p = xdr_decode_hyper(p, &newsize); if (newsize <= NFS_OFFSET_MAX) iap->ia_size = newsize; else iap->ia_size = NFS_OFFSET_MAX; } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_ATIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = ntohl(*p++); iap->ia_atime.tv_nsec = ntohl(*p++); } if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ iap->ia_valid |= ATTR_MTIME; } else if (tmp == 2) { /* set to client time */ iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = ntohl(*p++); iap->ia_mtime.tv_nsec = ntohl(*p++); } return p; } static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp) { u64 f; switch(fsid_source(fhp)) { default: case FSIDSOURCE_DEV: p = xdr_encode_hyper(p, (u64)huge_encode_dev (fhp->fh_dentry->d_inode->i_sb->s_dev)); break; case FSIDSOURCE_FSID: p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u64*)fhp->fh_export->ex_uuid)[0]; f ^= ((u64*)fhp->fh_export->ex_uuid)[1]; p = xdr_encode_hyper(p, f); break; } return p; } static __be32 * encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { p = xdr_encode_hyper(p, (u64) stat->size); } p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9); *p++ = htonl((u32) MAJOR(stat->rdev)); *p++ = htonl((u32) MINOR(stat->rdev)); p = encode_fsid(p, fhp); p = xdr_encode_hyper(p, stat->ino); p = encode_time3(p, &stat->atime); p = encode_time3(p, &stat->mtime); p = encode_time3(p, &stat->ctime); return p; } static __be32 * encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { /* Attributes to follow */ *p++ = xdr_one; return encode_fattr3(rqstp, p, fhp, &fhp->fh_post_attr); } /* * Encode post-operation attributes. * The inode may be NULL if the call failed because of a stale file * handle. In this case, no attributes are returned. */ static __be32 * encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && dentry->d_inode) { __be32 err; struct kstat stat; err = fh_getattr(fhp, &stat); if (!err) { *p++ = xdr_one; /* attributes follow */ lease_get_mtime(dentry->d_inode, &stat.mtime); return encode_fattr3(rqstp, p, fhp, &stat); } } *p++ = xdr_zero; return p; } /* Helper for NFSv3 ACLs */ __be32 * nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { return encode_post_op_attr(rqstp, p, fhp); } /* * Enocde weak cache consistency data */ static __be32 * encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct dentry *dentry = fhp->fh_dentry; if (dentry && dentry->d_inode && fhp->fh_post_saved) { if (fhp->fh_pre_saved) { *p++ = xdr_one; p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size); p = encode_time3(p, &fhp->fh_pre_mtime); p = encode_time3(p, &fhp->fh_pre_ctime); } else { *p++ = xdr_zero; } return encode_saved_post_attr(rqstp, p, fhp); } /* no pre- or post-attrs */ *p++ = xdr_zero; return encode_post_op_attr(rqstp, p, fhp); } /* * Fill in the post_op attr for the wcc data */ void fill_post_wcc(struct svc_fh *fhp) { __be32 err; if (fhp->fh_post_saved) printk("nfsd: inode locked twice during operation.\n"); err = fh_getattr(fhp, &fhp->fh_post_attr); fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version; if (err) { fhp->fh_post_saved = 0; /* Grab the ctime anyway - set_change_info might use it */ fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime; } else fhp->fh_post_saved = 1; } /* * XDR decode functions */ int nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { if (!(p = decode_fh(p, &args->fh))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_sattrargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; p = decode_sattr3(p, &args->attrs); if ((args->check_guard = ntohl(*p++)) != 0) { struct timespec time; p = decode_time3(p, &time); args->guardtime = time.tv_sec; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); if (!(p = decode_fh(p, &args->fh))) return 0; p = xdr_decode_hyper(p, &args->offset); len = args->count = ntohl(*p++); if (len > max_blocksize) len = max_blocksize; /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE; len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeargs *args) { unsigned int len, v, hdr, dlen; u32 max_blocksize = svc_max_payload(rqstp); if (!(p = decode_fh(p, &args->fh))) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); args->stable = ntohl(*p++); len = args->len = ntohl(*p++); /* * The count must equal the amount of data passed. */ if (args->count != args->len) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; if (args->count > max_blocksize) { args->count = max_blocksize; len = args->len = max_blocksize; } rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; switch (args->createmode = ntohl(*p++)) { case NFS3_CREATE_UNCHECKED: case NFS3_CREATE_GUARDED: p = decode_sattr3(p, &args->attrs); break; case NFS3_CREATE_EXCLUSIVE: args->verf = p; p += 2; break; default: return 0; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_createargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr3(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_symlinkargs *args) { unsigned int len, avail; char *old, *new; struct kvec *vec; if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) ) return 0; p = decode_sattr3(p, &args->attrs); /* now decode the pathname, which might be larger than the first page. * As we have to check for nul's anyway, we copy it into a new page * This page appears in the rq_res.pages list, but as pages_len is always * 0, it won't get in the way */ len = ntohl(*p++); if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE) return 0; args->tname = new = page_address(*(rqstp->rq_next_page++)); args->tlen = len; /* first copy and check from the first page */ old = (char*)p; vec = &rqstp->rq_arg.head[0]; avail = vec->iov_len - (old - (char*)vec->iov_base); while (len && avail && *old) { *new++ = *old++; len--; avail--; } /* now copy next page if there is one */ if (len && !avail && rqstp->rq_arg.page_len) { avail = rqstp->rq_arg.page_len; if (avail > PAGE_SIZE) avail = PAGE_SIZE; old = page_address(rqstp->rq_arg.pages[0]); } while (len && avail && *old) { *new++ = *old++; len--; avail--; } *new = '\0'; if (len) return 0; return 1; } int nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_mknodargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; args->ftype = ntohl(*p++); if (args->ftype == NF3BLK || args->ftype == NF3CHR || args->ftype == NF3SOCK || args->ftype == NF3FIFO) p = decode_sattr3(p, &args->attrs); if (args->ftype == NF3BLK || args->ftype == NF3CHR) { args->major = ntohl(*p++); args->minor = ntohl(*p++); } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ~0; args->count = ntohl(*p++); if (args->count > PAGE_SIZE) args->count = PAGE_SIZE; args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { int len; u32 max_blocksize = svc_max_payload(rqstp); if (!(p = decode_fh(p, &args->fh))) return 0; p = xdr_decode_hyper(p, &args->cookie); args->verf = p; p += 2; args->dircount = ntohl(*p++); args->count = ntohl(*p++); len = (args->count > max_blocksize) ? max_blocksize : args->count; args->count = len; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); if (!args->buffer) args->buffer = page_address(p); len -= PAGE_SIZE; } return xdr_argsize_check(rqstp, p); } int nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ /* * There must be an encoding function for void results so svc_process * will work properly. */ int nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } /* GETATTR */ int nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { if (resp->status == 0) { lease_get_mtime(resp->fh.fh_dentry->d_inode, &resp->stat.mtime); p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat); } return xdr_ressize_check(rqstp, p); } /* SETATTR, REMOVE, RMDIR */ int nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { p = encode_wcc_data(rqstp, p, &resp->fh); return xdr_ressize_check(rqstp, p); } /* LOOKUP */ int nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_post_op_attr(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* ACCESS */ int nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) *p++ = htonl(resp->access); return xdr_ressize_check(rqstp, p); } /* READLINK */ int nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readlinkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* READ */ int nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->eof); *p++ = htonl(resp->count); /* xdr opaque count */ xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3); } return 1; } else return xdr_ressize_check(rqstp, p); } /* WRITE */ int nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_writeres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->committed); *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* CREATE, MKDIR, SYMLINK, MKNOD */ int nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_diropres *resp) { if (resp->status == 0) { *p++ = xdr_one; p = encode_fh(p, &resp->fh); p = encode_post_op_attr(rqstp, p, &resp->fh); } p = encode_wcc_data(rqstp, p, &resp->dirfh); return xdr_ressize_check(rqstp, p); } /* RENAME */ int nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_renameres *resp) { p = encode_wcc_data(rqstp, p, &resp->ffh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* LINK */ int nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_linkres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); p = encode_wcc_data(rqstp, p, &resp->tfh); return xdr_ressize_check(rqstp, p); } /* READDIR */ int nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirres *resp) { p = encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0) { /* stupid readdir cookie */ memcpy(p, resp->verf, 8); p += 2; xdr_ressize_check(rqstp, p); if (rqstp->rq_res.head[0].iov_len + (2<<2) > PAGE_SIZE) return 1; /*No room for trailer */ rqstp->rq_res.page_len = (resp->count) << 2; /* add the 'tail' to the end of the 'head' page - page 0. */ rqstp->rq_res.tail[0].iov_base = p; *p++ = 0; /* no more entries */ *p++ = htonl(resp->common.err == nfserr_eof); rqstp->rq_res.tail[0].iov_len = 2<<2; return 1; } else return xdr_ressize_check(rqstp, p); } static __be32 * encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino) { *p++ = xdr_one; /* mark entry present */ p = xdr_encode_hyper(p, ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ p = xdr_encode_hyper(p, NFS_OFFSET_MAX);/* offset of next entry */ return p; } static __be32 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, const char *name, int namlen) { struct svc_export *exp; struct dentry *dparent, *dchild; __be32 rv = nfserr_noent; dparent = cd->fh.fh_dentry; exp = cd->fh.fh_export; if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); /* filesystem root - cannot return filehandle for ".." */ if (dchild == dparent) goto out; } else dchild = dget(dparent); } else dchild = lookup_one_len(name, dparent, namlen); if (IS_ERR(dchild)) return rv; if (d_mountpoint(dchild)) goto out; if (!dchild->d_inode) goto out; rv = fh_compose(fhp, exp, dchild, &cd->fh); out: dput(dchild); return rv; } static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen) { struct svc_fh fh; __be32 err; fh_init(&fh, NFS3_FHSIZE); err = compose_entry_fh(cd, &fh, name, namlen); if (err) { *p++ = 0; *p++ = 0; goto out; } p = encode_post_op_attr(cd->rqstp, p, &fh); *p++ = xdr_one; /* yes, a file handle follows */ p = encode_fh(p, &fh); out: fh_put(&fh); return p; } /* * Encode a directory entry. This one works for both normal readdir * and readdirplus. * The normal readdir reply requires 2 (fileid) + 1 (stringlen) * + string + 2 (cookie) + 1 (next) words, i.e. 6 + strlen. * * The readdirplus baggage is 1+21 words for post_op_attr, plus the * file handle. */ #define NFS3_ENTRY_BAGGAGE (2 + 1 + 2 + 1) #define NFS3_ENTRYPLUS_BAGGAGE (1 + 21 + 1 + (NFS3_FHSIZE >> 2)) static int encode_entry(struct readdir_cd *ccd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type, int plus) { struct nfsd3_readdirres *cd = container_of(ccd, struct nfsd3_readdirres, common); __be32 *p = cd->buffer; caddr_t curr_page_addr = NULL; struct page ** page; int slen; /* string (name) length */ int elen; /* estimated entry length in words */ int num_entry_words = 0; /* actual number of words */ if (cd->offset) { u64 offset64 = offset; if (unlikely(cd->offset1)) { /* we ended up with offset on a page boundary */ *cd->offset = htonl(offset64 >> 32); *cd->offset1 = htonl(offset64 & 0xffffffff); cd->offset1 = NULL; } else { xdr_encode_hyper(cd->offset, offset64); } } /* dprintk("encode_entry(%.*s @%ld%s)\n", namlen, name, (long) offset, plus? " plus" : ""); */ /* truncate filename if too long */ if (namlen > NFS3_MAXNAMLEN) namlen = NFS3_MAXNAMLEN; slen = XDR_QUADLEN(namlen); elen = slen + NFS3_ENTRY_BAGGAGE + (plus? NFS3_ENTRYPLUS_BAGGAGE : 0); if (cd->buflen < elen) { cd->common.err = nfserr_toosmall; return -EINVAL; } /* determine which page in rq_respages[] we are currently filling */ for (page = cd->rqstp->rq_respages + 1; page < cd->rqstp->rq_next_page; page++) { curr_page_addr = page_address(*page); if (((caddr_t)cd->buffer >= curr_page_addr) && ((caddr_t)cd->buffer < curr_page_addr + PAGE_SIZE)) break; } if ((caddr_t)(cd->buffer + elen) < (curr_page_addr + PAGE_SIZE)) { /* encode entry in current page */ p = encode_entry_baggage(cd, p, name, namlen, ino); if (plus) p = encode_entryplus_baggage(cd, p, name, namlen); num_entry_words = p - cd->buffer; } else if (*(page+1) != NULL) { /* temporarily encode entry into next page, then move back to * current and next page in rq_respages[] */ __be32 *p1, *tmp; int len1, len2; /* grab next page for temporary storage of entry */ p1 = tmp = page_address(*(page+1)); p1 = encode_entry_baggage(cd, p1, name, namlen, ino); if (plus) p1 = encode_entryplus_baggage(cd, p1, name, namlen); /* determine entry word length and lengths to go in pages */ num_entry_words = p1 - tmp; len1 = curr_page_addr + PAGE_SIZE - (caddr_t)cd->buffer; if ((num_entry_words << 2) < len1) { /* the actual number of words in the entry is less * than elen and can still fit in the current page */ memmove(p, tmp, num_entry_words << 2); p += num_entry_words; /* update offset */ cd->offset = cd->buffer + (cd->offset - tmp); } else { unsigned int offset_r = (cd->offset - tmp) << 2; /* update pointer to offset location. * This is a 64bit quantity, so we need to * deal with 3 cases: * - entirely in first page * - entirely in second page * - 4 bytes in each page */ if (offset_r + 8 <= len1) { cd->offset = p + (cd->offset - tmp); } else if (offset_r >= len1) { cd->offset -= len1 >> 2; } else { /* sitting on the fence */ BUG_ON(offset_r != len1 - 4); cd->offset = p + (cd->offset - tmp); cd->offset1 = tmp; } len2 = (num_entry_words << 2) - len1; /* move from temp page to current and next pages */ memmove(p, tmp, len1); memmove(tmp, (caddr_t)tmp+len1, len2); p = tmp + (len2 >> 2); } } else { cd->common.err = nfserr_toosmall; return -EINVAL; } cd->buflen -= num_entry_words; cd->buffer = p; cd->common.err = nfs_ok; return 0; } int nfs3svc_encode_entry(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 0); } int nfs3svc_encode_entry_plus(void *cd, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { return encode_entry(cd, name, namlen, offset, ino, d_type, 1); } /* FSSTAT */ int nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsstatres *resp) { struct kstatfs *s = &resp->stats; u64 bs = s->f_bsize; *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { p = xdr_encode_hyper(p, bs * s->f_blocks); /* total bytes */ p = xdr_encode_hyper(p, bs * s->f_bfree); /* free bytes */ p = xdr_encode_hyper(p, bs * s->f_bavail); /* user available bytes */ p = xdr_encode_hyper(p, s->f_files); /* total inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* free inodes */ p = xdr_encode_hyper(p, s->f_ffree); /* user available inodes */ *p++ = htonl(resp->invarsec); /* mean unchanged time */ } return xdr_ressize_check(rqstp, p); } /* FSINFO */ int nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fsinfores *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->f_rtmax); *p++ = htonl(resp->f_rtpref); *p++ = htonl(resp->f_rtmult); *p++ = htonl(resp->f_wtmax); *p++ = htonl(resp->f_wtpref); *p++ = htonl(resp->f_wtmult); *p++ = htonl(resp->f_dtpref); p = xdr_encode_hyper(p, resp->f_maxfilesize); *p++ = xdr_one; *p++ = xdr_zero; *p++ = htonl(resp->f_properties); } return xdr_ressize_check(rqstp, p); } /* PATHCONF */ int nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_pathconfres *resp) { *p++ = xdr_zero; /* no post_op_attr */ if (resp->status == 0) { *p++ = htonl(resp->p_link_max); *p++ = htonl(resp->p_name_max); *p++ = htonl(resp->p_no_trunc); *p++ = htonl(resp->p_chown_restricted); *p++ = htonl(resp->p_case_insensitive); *p++ = htonl(resp->p_case_preserving); } return xdr_ressize_check(rqstp, p); } /* COMMIT */ int nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_commitres *resp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); p = encode_wcc_data(rqstp, p, &resp->fh); /* Write verifier */ if (resp->status == 0) { *p++ = htonl(nn->nfssvc_boot.tv_sec); *p++ = htonl(nn->nfssvc_boot.tv_usec); } return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ int nfs3svc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { fh_put(&resp->fh); return 1; } int nfs3svc_release_fhandle2(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_fhandle_pair *resp) { fh_put(&resp->fh1); fh_put(&resp->fh2); return 1; }
gpl-2.0
nicholaschw/jared-rA
drivers/dma/mv_xor.c
2373
36307
/* * offload engine driver for the Marvell XOR engine * Copyright (C) 2007, 2008, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <plat/mv_xor.h> #include "mv_xor.h" static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ container_of(chan, struct mv_xor_chan, common) #define to_mv_xor_device(dev) \ container_of(dev, struct mv_xor_device, common) #define to_mv_xor_slot(tx) \ container_of(tx, struct mv_xor_desc_slot, async_tx) static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->status = (1 << 31); hw_desc->phy_next_desc = 0; hw_desc->desc_command = (1 << 31); } static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_dest_addr; } static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, int src_idx) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_src_addr[src_idx]; } static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->byte_count = byte_count; } static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; BUG_ON(hw_desc->phy_next_desc); hw_desc->phy_next_desc = next_desc_addr; } static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_next_desc = 0; } static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) { desc->value = val; } static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_dest_addr = addr; } static int mv_chan_memset_slot_count(size_t len) { return 1; } #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_src_addr[index] = addr; if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { return __raw_readl(XOR_CURR_DESC(chan)); } static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); } static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) { __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); } static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) { __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); } static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) { __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); } static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { u32 val = __raw_readl(XOR_INTR_MASK(chan)); val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); __raw_writel(val, XOR_INTR_MASK(chan)); } static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } static int mv_is_err_intr(u32 intr_cause) { if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) return 1; return 0; } static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static int mv_can_chain(struct mv_xor_desc_slot *desc) { struct mv_xor_desc_slot *chain_old_tail = list_entry( desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); if (chain_old_tail->type != desc->type) return 0; if (desc->type == DMA_MEMSET) return 0; return 1; } static void mv_set_mode(struct mv_xor_chan *chan, enum dma_transaction_type type) { u32 op_mode; u32 config = __raw_readl(XOR_CONFIG(chan)); switch (type) { case DMA_XOR: op_mode = XOR_OPERATION_MODE_XOR; break; case DMA_MEMCPY: op_mode = XOR_OPERATION_MODE_MEMCPY; break; case DMA_MEMSET: op_mode = XOR_OPERATION_MODE_MEMSET; break; default: dev_printk(KERN_ERR, chan->device->common.dev, "error: unsupported operation %d.\n", type); BUG(); return; } config &= ~0x7; config |= op_mode; __raw_writel(config, XOR_CONFIG(chan)); chan->current_type = type; } static void mv_chan_activate(struct mv_xor_chan *chan) { u32 activation; dev_dbg(chan->device->common.dev, " activate chan.\n"); activation = __raw_readl(XOR_ACTIVATION(chan)); activation |= 0x1; __raw_writel(activation, XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) { u32 state = __raw_readl(XOR_ACTIVATION(chan)); state = (state >> 4) & 0x3; return (state == 1) ? 1 : 0; } static int mv_chan_xor_slot_count(size_t len, int src_cnt) { return 1; } /** * mv_xor_free_slots - flags descriptor slots for reuse * @slot: Slot to free * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *slot) { dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", __func__, __LINE__, slot); slot->slots_per_op = 0; } /* * mv_xor_start_new_chain - program the engine to operate on new chain headed by * sw_desc * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *sw_desc) { dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", __func__, __LINE__, sw_desc); if (sw_desc->type != mv_chan->current_type) mv_set_mode(mv_chan, sw_desc->type); if (sw_desc->type == DMA_MEMSET) { /* for memset requests we need to program the engine, no * descriptors used. */ struct mv_xor_desc *hw_desc = sw_desc->hw_desc; mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); mv_chan_set_value(mv_chan, sw_desc->value); } else { /* set the hardware chain */ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); } mv_chan->pending += sw_desc->slot_cnt; mv_xor_issue_pending(&mv_chan->common); } static dma_cookie_t mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan, dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; /* call the callback (must not sleep or submit new * operations to this channel) */ if (desc->async_tx.callback) desc->async_tx.callback( desc->async_tx.callback_param); /* unmap dma addresses * (unmap_single vs unmap_page?) */ if (desc->group_head && desc->unmap_len) { struct mv_xor_desc_slot *unmap = desc->group_head; struct device *dev = &mv_chan->device->pdev->dev; u32 len = unmap->unmap_len; enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; dma_addr_t dest; src_cnt = unmap->unmap_src_cnt; dest = mv_desc_get_dest_addr(unmap); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { enum dma_data_direction dir; if (src_cnt > 1) /* is xor ? */ dir = DMA_BIDIRECTIONAL; else dir = DMA_FROM_DEVICE; dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { while (src_cnt--) { addr = mv_desc_get_src_addr(unmap, src_cnt); if (addr == dest) continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } } desc->group_head = NULL; } } /* run dependent operations */ dma_run_dependencies(&desc->async_tx); return cookie; } static int mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { if (async_tx_test_ack(&iter->async_tx)) { list_del(&iter->completed_node); mv_xor_free_slots(mv_chan, iter); } } return 0; } static int mv_xor_clean_slot(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan) { dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", __func__, __LINE__, desc, desc->async_tx.flags); list_del(&desc->chain_node); /* the client is allowed to attach dependent operations * until 'ack' is set */ if (!async_tx_test_ack(&desc->async_tx)) { /* move this slot to the completed_slots */ list_add_tail(&desc->completed_node, &mv_chan->completed_slots); return 0; } mv_xor_free_slots(mv_chan, desc); return 0; } static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); int seen_current = 0; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); mv_xor_clean_completed_slots(mv_chan); /* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { prefetch(_iter); prefetch(&_iter->async_tx); /* do not advance past the current descriptor loaded into the * hardware channel, subsequent descriptors are either in * process or have not been submitted */ if (seen_current) break; /* stop the search if we reach the current descriptor and the * channel is busy */ if (iter->async_tx.phys == current_desc) { seen_current = 1; if (busy) break; } cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); if (mv_xor_clean_slot(iter, mv_chan)) break; } if ((busy == 0) && !list_empty(&mv_chan->chain)) { struct mv_xor_desc_slot *chain_head; chain_head = list_entry(mv_chan->chain.next, struct mv_xor_desc_slot, chain_node); mv_xor_start_new_chain(mv_chan, chain_head); } if (cookie > 0) mv_chan->completed_cookie = cookie; } static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { spin_lock_bh(&mv_chan->lock); __mv_xor_slot_cleanup(mv_chan); spin_unlock_bh(&mv_chan->lock); } static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; mv_xor_slot_cleanup(chan); } static struct mv_xor_desc_slot * mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, int slots_per_op) { struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; LIST_HEAD(chain); int slots_found, retry = 0; /* start search from the last allocated descrtiptor * if a contiguous allocation can not be found start searching * from the beginning of the list */ retry: slots_found = 0; if (retry == 0) iter = mv_chan->last_used; else iter = list_entry(&mv_chan->all_slots, struct mv_xor_desc_slot, slot_node); list_for_each_entry_safe_continue( iter, _iter, &mv_chan->all_slots, slot_node) { prefetch(_iter); prefetch(&_iter->async_tx); if (iter->slots_per_op) { /* give up after finding the first busy slot * on the second pass through the list */ if (retry) break; slots_found = 0; continue; } /* start the allocation if the slot is correctly aligned */ if (!slots_found++) alloc_start = iter; if (slots_found == num_slots) { struct mv_xor_desc_slot *alloc_tail = NULL; struct mv_xor_desc_slot *last_used = NULL; iter = alloc_start; while (num_slots) { int i; /* pre-ack all but the last descriptor */ async_tx_ack(&iter->async_tx); list_add_tail(&iter->chain_node, &chain); alloc_tail = iter; iter->async_tx.cookie = 0; iter->slot_cnt = num_slots; iter->xor_check_result = NULL; for (i = 0; i < slots_per_op; i++) { iter->slots_per_op = slots_per_op - i; last_used = iter; iter = list_entry(iter->slot_node.next, struct mv_xor_desc_slot, slot_node); } num_slots -= slots_per_op; } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; list_splice(&chain, &alloc_tail->tx_list); mv_chan->last_used = last_used; mv_desc_clear_next_desc(alloc_start); mv_desc_clear_next_desc(alloc_tail); return alloc_tail; } } if (!retry++) goto retry; /* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); return NULL; } static dma_cookie_t mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *desc) { dma_cookie_t cookie = mv_chan->common.cookie; if (++cookie < 0) cookie = 1; mv_chan->common.cookie = desc->async_tx.cookie = cookie; return cookie; } /************************ DMA engine API functions ****************************/ static dma_cookie_t mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) { struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); struct mv_xor_desc_slot *grp_start, *old_chain_tail; dma_cookie_t cookie; int new_hw_chain = 1; dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p: async_tx %p\n", __func__, sw_desc, &sw_desc->async_tx); grp_start = sw_desc->group_head; spin_lock_bh(&mv_chan->lock); cookie = mv_desc_assign_cookie(mv_chan, sw_desc); if (list_empty(&mv_chan->chain)) list_splice_init(&sw_desc->tx_list, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, chain_node); list_splice_init(&grp_start->tx_list, &old_chain_tail->chain_node); if (!mv_can_chain(grp_start)) goto submit_done; dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); /* if the channel is not busy */ if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* * and the curren desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) new_hw_chain = 1; } } if (new_hw_chain) mv_xor_start_new_chain(mv_chan, grp_start); submit_done: spin_unlock_bh(&mv_chan->lock); return cookie; } /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { char *hw_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; struct mv_xor_platform_data *plat_data = mv_chan->device->pdev->dev.platform_data; int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; /* Allocate descriptor slots */ idx = mv_chan->slots_allocated; while (idx < num_descs_in_pool) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { printk(KERN_INFO "MV XOR Channel only initialized" " %d descriptor slots", idx); break; } hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; list_add_tail(&slot->slot_node, &mv_chan->all_slots); spin_unlock_bh(&mv_chan->lock); } if (mv_chan->slots_allocated && !mv_chan->last_used) mv_chan->last_used = list_entry(mv_chan->all_slots.next, struct mv_xor_desc_slot, slot_node); dev_dbg(mv_chan->device->common.dev, "allocated %d descriptor slots last_used: %p\n", mv_chan->slots_allocated, mv_chan->last_used); return mv_chan->slots_allocated ? : -ENOMEM; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x src %x len: %u flags: %ld\n", __func__, dest, src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memcpy_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMCPY; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_src_addr(grp_start, 0, src); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p\n", __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x len: %u flags: %ld\n", __func__, dest, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memset_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMSET; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_block_fill_val(grp_start, value); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan->device->common.dev, "%s src_cnt: %d len: dest %x %u flags: %ld\n", __func__, src_cnt, len, dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); /* the byte count field is the same as in memcpy desc*/ mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_len = len; while (src_cnt--) mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; mv_xor_slot_cleanup(mv_chan); spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; list_del(&iter->chain_node); } list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { in_use_descs++; list_del(&iter->completed_node); } list_for_each_entry_safe_reverse( iter, _iter, &mv_chan->all_slots, slot_node) { list_del(&iter->slot_node); kfree(iter); mv_chan->slots_allocated--; } mv_chan->last_used = NULL; dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", __func__, mv_chan->slots_allocated); spin_unlock_bh(&mv_chan->lock); if (in_use_descs) dev_err(mv_chan->device->common.dev, "freeing %d in use descriptors!\n", in_use_descs); } /** * mv_xor_status - poll the status of an XOR transaction * @chan: XOR channel handle * @cookie: XOR transaction identifier * @txstate: XOR transactions state holder (or NULL) */ static enum dma_status mv_xor_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; enum dma_status ret; last_used = chan->cookie; last_complete = mv_chan->completed_cookie; mv_chan->is_complete_cookie = cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) { mv_xor_clean_completed_slots(mv_chan); return ret; } mv_xor_slot_cleanup(mv_chan); last_used = chan->cookie; last_complete = mv_chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); } static void mv_dump_xor_regs(struct mv_xor_chan *chan) { u32 val; val = __raw_readl(XOR_CONFIG(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "config 0x%08x.\n", val); val = __raw_readl(XOR_ACTIVATION(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "activation 0x%08x.\n", val); val = __raw_readl(XOR_INTR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr cause 0x%08x.\n", val); val = __raw_readl(XOR_INTR_MASK(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr mask 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error cause 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_ADDR(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error addr 0x%08x.\n", val); } static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, u32 intr_cause) { if (intr_cause & (1 << 4)) { dev_dbg(chan->device->common.dev, "ignore this error\n"); return; } dev_printk(KERN_ERR, chan->device->common.dev, "error on chan %d. intr cause 0x%08x.\n", chan->idx, intr_cause); mv_dump_xor_regs(chan); BUG(); } static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) { struct mv_xor_chan *chan = data; u32 intr_cause = mv_chan_get_intr_cause(chan); dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); if (mv_is_err_intr(intr_cause)) mv_xor_err_interrupt_handler(chan, intr_cause); tasklet_schedule(&chan->irq_tasklet); mv_xor_device_clear_eoc_cause(chan); return IRQ_HANDLED; } static void mv_xor_issue_pending(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); if (mv_chan->pending >= MV_XOR_THRESHOLD) { mv_chan->pending = 0; mv_chan_activate(mv_chan); } } /* * Perform a transaction to verify the HW works. */ #define MV_XOR_TEST_SIZE 2000 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) { int i; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; int err = 0; struct mv_xor_chan *mv_chan; src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ for (i = 0; i < MV_XOR_TEST_SIZE; i++) ((u8 *) src)[i] = (u8)i; /* Start copy, using first DMA channel */ dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } dest_dma = dma_map_single(dma_chan->device->dev, dest, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); src_dma = dma_map_single(dma_chan->device->dev, src, MV_XOR_TEST_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, MV_XOR_TEST_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: mv_xor_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); return err; } #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ static int __devinit mv_xor_xor_self_test(struct mv_xor_device *device) { int i, src_idx; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; struct mv_xor_chan *mv_chan; for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } } dest = alloc_page(GFP_KERNEL); if (!dest) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } /* test xor */ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor failed compare, disabling." " index %d, data %x, expected %x\n", i, ptr[i], cmp_word); err = -ENODEV; goto free_resources; } } free_resources: mv_xor_free_chan_resources(dma_chan); out: src_idx = MV_XOR_NUM_SRC_TEST; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } static int __devexit mv_xor_remove(struct platform_device *dev) { struct mv_xor_device *device = platform_get_drvdata(dev); struct dma_chan *chan, *_chan; struct mv_xor_chan *mv_chan; struct mv_xor_platform_data *plat_data = dev->dev.platform_data; dma_async_device_unregister(&device->common); dma_free_coherent(&dev->dev, plat_data->pool_size, device->dma_desc_pool_virt, device->dma_desc_pool); list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { mv_chan = to_mv_xor_chan(chan); list_del(&chan->device_node); } return 0; } static int __devinit mv_xor_probe(struct platform_device *pdev) { int ret = 0; int irq; struct mv_xor_device *adev; struct mv_xor_chan *mv_chan; struct dma_device *dma_dev; struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; dma_dev = &adev->common; /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, plat_data->pool_size, &adev->dma_desc_pool, GFP_KERNEL); if (!adev->dma_desc_pool_virt) return -ENOMEM; adev->id = plat_data->hw_id; /* discover transaction capabilites from the platform data */ dma_dev->cap_mask = plat_data->cap_mask; adev->pdev = pdev; platform_set_drvdata(pdev, adev); adev->shared = platform_get_drvdata(plat_data->shared); INIT_LIST_HEAD(&dma_dev->channels); /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); if (!mv_chan) { ret = -ENOMEM; goto err_free_dma; } mv_chan->device = adev; mv_chan->idx = plat_data->hw_id; mv_chan->mmr_base = adev->shared->xor_base; if (!mv_chan->mmr_base) { ret = -ENOMEM; goto err_free_dma; } tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); /* clear errors before enabling interrupts */ mv_xor_device_clear_err_status(mv_chan); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_free_dma; } ret = devm_request_irq(&pdev->dev, irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); if (ret) goto err_free_dma; mv_chan_unmask_interrupts(mv_chan); mv_set_mode(mv_chan, DMA_MEMCPY); spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); INIT_LIST_HEAD(&mv_chan->all_slots); mv_chan->common.device = dma_dev; list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { ret = mv_xor_memcpy_self_test(adev); dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); if (ret) goto err_free_dma; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { ret = mv_xor_xor_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_dma; } dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " "( %s%s%s%s)\n", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); goto out; err_free_dma: dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, adev->dma_desc_pool_virt, adev->dma_desc_pool); out: return ret; } static void mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, struct mbus_dram_target_info *dram) { void __iomem *base = msp->xor_base; u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); } static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, .remove = __devexit_p(mv_xor_remove), .driver = { .owner = THIS_MODULE, .name = MV_XOR_NAME, }, }; static int mv_xor_shared_probe(struct platform_device *pdev) { struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; struct mv_xor_shared_private *msp; struct resource *res; dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (!msp) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; msp->xor_base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1); if (!msp->xor_base) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -ENODEV; msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1); if (!msp->xor_high_base) return -EBUSY; platform_set_drvdata(pdev, msp); /* * (Re-)program MBUS remapping windows if we are asked to. */ if (msd != NULL && msd->dram != NULL) mv_xor_conf_mbus_windows(msp, msd->dram); return 0; } static int mv_xor_shared_remove(struct platform_device *pdev) { return 0; } static struct platform_driver mv_xor_shared_driver = { .probe = mv_xor_shared_probe, .remove = mv_xor_shared_remove, .driver = { .owner = THIS_MODULE, .name = MV_XOR_SHARED_NAME, }, }; static int __init mv_xor_init(void) { int rc; rc = platform_driver_register(&mv_xor_shared_driver); if (!rc) { rc = platform_driver_register(&mv_xor_driver); if (rc) platform_driver_unregister(&mv_xor_shared_driver); } return rc; } module_init(mv_xor_init); /* it's currently unsafe to unload this module */ #if 0 static void __exit mv_xor_exit(void) { platform_driver_unregister(&mv_xor_driver); platform_driver_unregister(&mv_xor_shared_driver); return; } module_exit(mv_xor_exit); #endif MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL");
gpl-2.0
ngochai/s2kernel
drivers/usb/misc/ftdi-elan.c
2373
120706
/* * USB FTDI client driver for Elan Digital Systems's Uxxx adapters * * Copyright(C) 2006 Elan Digital Systems Limited * http://www.elandigitalsystems.com * * Author and Maintainer - Tony Olech - Elan Digital Systems * tony.olech@elandigitalsystems.com * * This program is free software;you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com) * based on various USB client drivers in the 2.6.15 linux kernel * with constant reference to the 3rd Edition of Linux Device Drivers * published by O'Reilly * * The U132 adapter is a USB to CardBus adapter specifically designed * for PC cards that contain an OHCI host controller. Typical PC cards * are the Orange Mobile 3G Option GlobeTrotter Fusion card. * * The U132 adapter will *NOT *work with PC cards that do not contain * an OHCI controller. A simple way to test whether a PC card has an * OHCI controller as an interface is to insert the PC card directly * into a laptop(or desktop) with a CardBus slot and if "lspci" shows * a new USB controller and "lsusb -v" shows a new OHCI Host Controller * then there is a good chance that the U132 adapter will support the * PC card.(you also need the specific client driver for the PC card) * * Please inform the Author and Maintainer about any PC cards that * contain OHCI Host Controller and work when directly connected to * an embedded CardBus slot but do not work when they are connected * via an ELAN U132 adapter. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/ioctl.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/platform_device.h> MODULE_AUTHOR("Tony Olech"); MODULE_DESCRIPTION("FTDI ELAN driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444) static int distrust_firmware = 1; module_param(distrust_firmware, bool, 0); MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren" "t setup"); extern struct platform_driver u132_platform_driver; static struct workqueue_struct *status_queue; static struct workqueue_struct *command_queue; static struct workqueue_struct *respond_queue; /* * ftdi_module_lock exists to protect access to global variables * */ static struct mutex ftdi_module_lock; static int ftdi_instances = 0; static struct list_head ftdi_static_list; /* * end of the global variables protected by ftdi_module_lock */ #include "usb_u132.h" #include <asm/io.h> #include <linux/usb/hcd.h> /* FIXME ohci.h is ONLY for internal use by the OHCI driver. * If you're going to try stuff like this, you need to split * out shareable stuff (register declarations?) into its own * file, maybe name <linux/usb/ohci.h> */ #include "../host/ohci.h" /* Define these values to match your devices*/ #define USB_FTDI_ELAN_VENDOR_ID 0x0403 #define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea /* table of devices that work with this driver*/ static const struct usb_device_id ftdi_elan_table[] = { {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)}, { /* Terminating entry */ } }; MODULE_DEVICE_TABLE(usb, ftdi_elan_table); /* only the jtag(firmware upgrade device) interface requires * a device file and corresponding minor number, but the * interface is created unconditionally - I suppose it could * be configured or not according to a module parameter. * But since we(now) require one interface per device, * and since it unlikely that a normal installation would * require more than a couple of elan-ftdi devices, 8 seems * like a reasonable limit to have here, and if someone * really requires more than 8 devices, then they can frig the * code and recompile */ #define USB_FTDI_ELAN_MINOR_BASE 192 #define COMMAND_BITS 5 #define COMMAND_SIZE (1<<COMMAND_BITS) #define COMMAND_MASK (COMMAND_SIZE-1) struct u132_command { u8 header; u16 length; u8 address; u8 width; u32 value; int follows; void *buffer; }; #define RESPOND_BITS 5 #define RESPOND_SIZE (1<<RESPOND_BITS) #define RESPOND_MASK (RESPOND_SIZE-1) struct u132_respond { u8 header; u8 address; u32 *value; int *result; struct completion wait_completion; }; struct u132_target { void *endp; struct urb *urb; int toggle_bits; int error_count; int condition_code; int repeat_number; int halted; int skipped; int actual; int non_null; int active; int abandoning; void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null); }; /* Structure to hold all of our device specific stuff*/ struct usb_ftdi { struct list_head ftdi_list; struct mutex u132_lock; int command_next; int command_head; struct u132_command command[COMMAND_SIZE]; int respond_next; int respond_head; struct u132_respond respond[RESPOND_SIZE]; struct u132_target target[4]; char device_name[16]; unsigned synchronized:1; unsigned enumerated:1; unsigned registered:1; unsigned initialized:1; unsigned card_ejected:1; int function; int sequence_num; int disconnected; int gone_away; int stuck_status; int status_queue_delay; struct semaphore sw_lock; struct usb_device *udev; struct usb_interface *interface; struct usb_class_driver *class; struct delayed_work status_work; struct delayed_work command_work; struct delayed_work respond_work; struct u132_platform_data platform_data; struct resource resources[0]; struct platform_device platform_dev; unsigned char *bulk_in_buffer; size_t bulk_in_size; size_t bulk_in_last; size_t bulk_in_left; __u8 bulk_in_endpointAddr; __u8 bulk_out_endpointAddr; struct kref kref; u32 controlreg; u8 response[4 + 1024]; int expected; int recieved; int ed_found; }; #define kref_to_usb_ftdi(d) container_of(d, struct usb_ftdi, kref) #define platform_device_to_usb_ftdi(d) container_of(d, struct usb_ftdi, \ platform_dev) static struct usb_driver ftdi_elan_driver; static void ftdi_elan_delete(struct kref *kref) { struct usb_ftdi *ftdi = kref_to_usb_ftdi(kref); dev_warn(&ftdi->udev->dev, "FREEING ftdi=%p\n", ftdi); usb_put_dev(ftdi->udev); ftdi->disconnected += 1; mutex_lock(&ftdi_module_lock); list_del_init(&ftdi->ftdi_list); ftdi_instances -= 1; mutex_unlock(&ftdi_module_lock); kfree(ftdi->bulk_in_buffer); ftdi->bulk_in_buffer = NULL; } static void ftdi_elan_put_kref(struct usb_ftdi *ftdi) { kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_elan_get_kref(struct usb_ftdi *ftdi) { kref_get(&ftdi->kref); } static void ftdi_elan_init_kref(struct usb_ftdi *ftdi) { kref_init(&ftdi->kref); } static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (!queue_delayed_work(status_queue, &ftdi->status_work, delta)) kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) kref_get(&ftdi->kref); } static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) { if (cancel_delayed_work(&ftdi->status_work)) kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (!queue_delayed_work(command_queue, &ftdi->command_work, delta)) kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (queue_delayed_work(command_queue, &ftdi->command_work, delta)) kref_get(&ftdi->kref); } static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) { if (cancel_delayed_work(&ftdi->command_work)) kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) kref_put(&ftdi->kref, ftdi_elan_delete); } static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) { if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) kref_get(&ftdi->kref); } static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) { if (cancel_delayed_work(&ftdi->respond_work)) kref_put(&ftdi->kref, ftdi_elan_delete); } void ftdi_elan_gone_away(struct platform_device *pdev) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); ftdi->gone_away += 1; ftdi_elan_put_kref(ftdi); } EXPORT_SYMBOL_GPL(ftdi_elan_gone_away); static void ftdi_release_platform_dev(struct device *dev) { dev->parent = NULL; } static void ftdi_elan_do_callback(struct usb_ftdi *ftdi, struct u132_target *target, u8 *buffer, int length); static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi); static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi); static int ftdi_elan_setupOHCI(struct usb_ftdi *ftdi); static int ftdi_elan_checkingPCI(struct usb_ftdi *ftdi); static int ftdi_elan_enumeratePCI(struct usb_ftdi *ftdi); static int ftdi_elan_synchronize(struct usb_ftdi *ftdi); static int ftdi_elan_stuck_waiting(struct usb_ftdi *ftdi); static int ftdi_elan_command_engine(struct usb_ftdi *ftdi); static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi); static int ftdi_elan_hcd_init(struct usb_ftdi *ftdi) { int result; if (ftdi->platform_dev.dev.parent) return -EBUSY; ftdi_elan_get_kref(ftdi); ftdi->platform_data.potpg = 100; ftdi->platform_data.reset = NULL; ftdi->platform_dev.id = ftdi->sequence_num; ftdi->platform_dev.resource = ftdi->resources; ftdi->platform_dev.num_resources = ARRAY_SIZE(ftdi->resources); ftdi->platform_dev.dev.platform_data = &ftdi->platform_data; ftdi->platform_dev.dev.parent = NULL; ftdi->platform_dev.dev.release = ftdi_release_platform_dev; ftdi->platform_dev.dev.dma_mask = NULL; snprintf(ftdi->device_name, sizeof(ftdi->device_name), "u132_hcd"); ftdi->platform_dev.name = ftdi->device_name; dev_info(&ftdi->udev->dev, "requesting module '%s'\n", "u132_hcd"); request_module("u132_hcd"); dev_info(&ftdi->udev->dev, "registering '%s'\n", ftdi->platform_dev.name); result = platform_device_register(&ftdi->platform_dev); return result; } static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi) { mutex_lock(&ftdi->u132_lock); while (ftdi->respond_next > ftdi->respond_head) { struct u132_respond *respond = &ftdi->respond[RESPOND_MASK & ftdi->respond_head++]; *respond->result = -ESHUTDOWN; *respond->value = 0; complete(&respond->wait_completion); } mutex_unlock(&ftdi->u132_lock); } static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi) { int ed_number = 4; mutex_lock(&ftdi->u132_lock); while (ed_number-- > 0) { struct u132_target *target = &ftdi->target[ed_number]; if (target->active == 1) { target->condition_code = TD_DEVNOTRESP; mutex_unlock(&ftdi->u132_lock); ftdi_elan_do_callback(ftdi, target, NULL, 0); mutex_lock(&ftdi->u132_lock); } } ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; mutex_unlock(&ftdi->u132_lock); } static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) { int ed_number = 4; mutex_lock(&ftdi->u132_lock); while (ed_number-- > 0) { struct u132_target *target = &ftdi->target[ed_number]; target->abandoning = 1; wait_1:if (target->active == 1) { int command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x80 | (ed_number << 5) | 0x4; command->length = 0x00; command->address = 0x00; command->width = 0x00; command->follows = 0; command->value = 0; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); } else { mutex_unlock(&ftdi->u132_lock); msleep(100); mutex_lock(&ftdi->u132_lock); goto wait_1; } } wait_2:if (target->active == 1) { int command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x90 | (ed_number << 5); command->length = 0x00; command->address = 0x00; command->width = 0x00; command->follows = 0; command->value = 0; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); } else { mutex_unlock(&ftdi->u132_lock); msleep(100); mutex_lock(&ftdi->u132_lock); goto wait_2; } } } ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; mutex_unlock(&ftdi->u132_lock); } static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi) { int ed_number = 4; mutex_lock(&ftdi->u132_lock); while (ed_number-- > 0) { struct u132_target *target = &ftdi->target[ed_number]; target->abandoning = 1; wait:if (target->active == 1) { int command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x80 | (ed_number << 5) | 0x4; command->length = 0x00; command->address = 0x00; command->width = 0x00; command->follows = 0; command->value = 0; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); } else { mutex_unlock(&ftdi->u132_lock); msleep(100); mutex_lock(&ftdi->u132_lock); goto wait; } } } ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; mutex_unlock(&ftdi->u132_lock); } static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) { ftdi_command_queue_work(ftdi, 0); } static void ftdi_elan_command_work(struct work_struct *work) { struct usb_ftdi *ftdi = container_of(work, struct usb_ftdi, command_work.work); if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; } else { int retval = ftdi_elan_command_engine(ftdi); if (retval == -ESHUTDOWN) { ftdi->disconnected += 1; } else if (retval == -ENODEV) { ftdi->disconnected += 1; } else if (retval) dev_err(&ftdi->udev->dev, "command error %d\n", retval); ftdi_command_requeue_work(ftdi, msecs_to_jiffies(10)); return; } } static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi) { ftdi_respond_queue_work(ftdi, 0); } static void ftdi_elan_respond_work(struct work_struct *work) { struct usb_ftdi *ftdi = container_of(work, struct usb_ftdi, respond_work.work); if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; } else { int retval = ftdi_elan_respond_engine(ftdi); if (retval == 0) { } else if (retval == -ESHUTDOWN) { ftdi->disconnected += 1; } else if (retval == -ENODEV) { ftdi->disconnected += 1; } else if (retval == -EILSEQ) { ftdi->disconnected += 1; } else { ftdi->disconnected += 1; dev_err(&ftdi->udev->dev, "respond error %d\n", retval); } if (ftdi->disconnected > 0) { ftdi_elan_abandon_completions(ftdi); ftdi_elan_abandon_targets(ftdi); } ftdi_response_requeue_work(ftdi, msecs_to_jiffies(10)); return; } } /* * the sw_lock is initially held and will be freed * after the FTDI has been synchronized * */ static void ftdi_elan_status_work(struct work_struct *work) { struct usb_ftdi *ftdi = container_of(work, struct usb_ftdi, status_work.work); int work_delay_in_msec = 0; if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; } else if (ftdi->synchronized == 0) { down(&ftdi->sw_lock); if (ftdi_elan_synchronize(ftdi) == 0) { ftdi->synchronized = 1; ftdi_command_queue_work(ftdi, 1); ftdi_respond_queue_work(ftdi, 1); up(&ftdi->sw_lock); work_delay_in_msec = 100; } else { dev_err(&ftdi->udev->dev, "synchronize failed\n"); up(&ftdi->sw_lock); work_delay_in_msec = 10 *1000; } } else if (ftdi->stuck_status > 0) { if (ftdi_elan_stuck_waiting(ftdi) == 0) { ftdi->stuck_status = 0; ftdi->synchronized = 0; } else if ((ftdi->stuck_status++ % 60) == 1) { dev_err(&ftdi->udev->dev, "WRONG type of card inserted " "- please remove\n"); } else dev_err(&ftdi->udev->dev, "WRONG type of card inserted " "- checked %d times\n", ftdi->stuck_status); work_delay_in_msec = 100; } else if (ftdi->enumerated == 0) { if (ftdi_elan_enumeratePCI(ftdi) == 0) { ftdi->enumerated = 1; work_delay_in_msec = 250; } else work_delay_in_msec = 1000; } else if (ftdi->initialized == 0) { if (ftdi_elan_setupOHCI(ftdi) == 0) { ftdi->initialized = 1; work_delay_in_msec = 500; } else { dev_err(&ftdi->udev->dev, "initialized failed - trying " "again in 10 seconds\n"); work_delay_in_msec = 1 *1000; } } else if (ftdi->registered == 0) { work_delay_in_msec = 10; if (ftdi_elan_hcd_init(ftdi) == 0) { ftdi->registered = 1; } else dev_err(&ftdi->udev->dev, "register failed\n"); work_delay_in_msec = 250; } else { if (ftdi_elan_checkingPCI(ftdi) == 0) { work_delay_in_msec = 250; } else if (ftdi->controlreg & 0x00400000) { if (ftdi->gone_away > 0) { dev_err(&ftdi->udev->dev, "PCI device eject con" "firmed platform_dev.dev.parent=%p plat" "form_dev.dev=%p\n", ftdi->platform_dev.dev.parent, &ftdi->platform_dev.dev); platform_device_unregister(&ftdi->platform_dev); ftdi->platform_dev.dev.parent = NULL; ftdi->registered = 0; ftdi->enumerated = 0; ftdi->card_ejected = 0; ftdi->initialized = 0; ftdi->gone_away = 0; } else ftdi_elan_flush_targets(ftdi); work_delay_in_msec = 250; } else { dev_err(&ftdi->udev->dev, "PCI device has disappeared\n" ); ftdi_elan_cancel_targets(ftdi); work_delay_in_msec = 500; ftdi->enumerated = 0; ftdi->initialized = 0; } } if (ftdi->disconnected > 0) { ftdi_elan_put_kref(ftdi); return; } else { ftdi_status_requeue_work(ftdi, msecs_to_jiffies(work_delay_in_msec)); return; } } /* * file_operations for the jtag interface * * the usage count for the device is incremented on open() * and decremented on release() */ static int ftdi_elan_open(struct inode *inode, struct file *file) { int subminor; struct usb_interface *interface; subminor = iminor(inode); interface = usb_find_interface(&ftdi_elan_driver, subminor); if (!interface) { printk(KERN_ERR "can't find device for minor %d\n", subminor); return -ENODEV; } else { struct usb_ftdi *ftdi = usb_get_intfdata(interface); if (!ftdi) { return -ENODEV; } else { if (down_interruptible(&ftdi->sw_lock)) { return -EINTR; } else { ftdi_elan_get_kref(ftdi); file->private_data = ftdi; return 0; } } } } static int ftdi_elan_release(struct inode *inode, struct file *file) { struct usb_ftdi *ftdi = file->private_data; if (ftdi == NULL) return -ENODEV; up(&ftdi->sw_lock); /* decrement the count on our device */ ftdi_elan_put_kref(ftdi); return 0; } /* * * blocking bulk reads are used to get data from the device * */ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { char data[30 *3 + 4]; char *d = data; int m = (sizeof(data) - 1) / 3; int bytes_read = 0; int retry_on_empty = 10; int retry_on_timeout = 5; struct usb_ftdi *ftdi = file->private_data; if (ftdi->disconnected > 0) { return -ENODEV; } data[0] = 0; have:if (ftdi->bulk_in_left > 0) { if (count-- > 0) { char *p = ++ftdi->bulk_in_last + ftdi->bulk_in_buffer; ftdi->bulk_in_left -= 1; if (bytes_read < m) { d += sprintf(d, " %02X", 0x000000FF & *p); } else if (bytes_read > m) { } else d += sprintf(d, " .."); if (copy_to_user(buffer++, p, 1)) { return -EFAULT; } else { bytes_read += 1; goto have; } } else return bytes_read; } more:if (count > 0) { int packet_bytes = 0; int retval = usb_bulk_msg(ftdi->udev, usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr), ftdi->bulk_in_buffer, ftdi->bulk_in_size, &packet_bytes, 50); if (packet_bytes > 2) { ftdi->bulk_in_left = packet_bytes - 2; ftdi->bulk_in_last = 1; goto have; } else if (retval == -ETIMEDOUT) { if (retry_on_timeout-- > 0) { goto more; } else if (bytes_read > 0) { return bytes_read; } else return retval; } else if (retval == 0) { if (retry_on_empty-- > 0) { goto more; } else return bytes_read; } else return retval; } else return bytes_read; } static void ftdi_elan_write_bulk_callback(struct urb *urb) { struct usb_ftdi *ftdi = urb->context; int status = urb->status; if (status && !(status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN)) { dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %" "d\n", urb, status); } usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); } static int fill_buffer_with_all_queued_commands(struct usb_ftdi *ftdi, char *buf, int command_size, int total_size) { int ed_commands = 0; int b = 0; int I = command_size; int i = ftdi->command_head; while (I-- > 0) { struct u132_command *command = &ftdi->command[COMMAND_MASK & i++]; int F = command->follows; u8 *f = command->buffer; if (command->header & 0x80) { ed_commands |= 1 << (0x3 & (command->header >> 5)); } buf[b++] = command->header; buf[b++] = (command->length >> 0) & 0x00FF; buf[b++] = (command->length >> 8) & 0x00FF; buf[b++] = command->address; buf[b++] = command->width; while (F-- > 0) { buf[b++] = *f++; } } return ed_commands; } static int ftdi_elan_total_command_size(struct usb_ftdi *ftdi, int command_size) { int total_size = 0; int I = command_size; int i = ftdi->command_head; while (I-- > 0) { struct u132_command *command = &ftdi->command[COMMAND_MASK & i++]; total_size += 5 + command->follows; } return total_size; } static int ftdi_elan_command_engine(struct usb_ftdi *ftdi) { int retval; char *buf; int ed_commands; int total_size; struct urb *urb; int command_size = ftdi->command_next - ftdi->command_head; if (command_size == 0) return 0; total_size = ftdi_elan_total_command_size(ftdi, command_size); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&ftdi->udev->dev, "could not get a urb to write %d comm" "ands totaling %d bytes to the Uxxx\n", command_size, total_size); return -ENOMEM; } buf = usb_alloc_coherent(ftdi->udev, total_size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { dev_err(&ftdi->udev->dev, "could not get a buffer to write %d c" "ommands totaling %d bytes to the Uxxx\n", command_size, total_size); usb_free_urb(urb); return -ENOMEM; } ed_commands = fill_buffer_with_all_queued_commands(ftdi, buf, command_size, total_size); usb_fill_bulk_urb(urb, ftdi->udev, usb_sndbulkpipe(ftdi->udev, ftdi->bulk_out_endpointAddr), buf, total_size, ftdi_elan_write_bulk_callback, ftdi); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (ed_commands) { char diag[40 *3 + 4]; char *d = diag; int m = total_size; u8 *c = buf; int s = (sizeof(diag) - 1) / 3; diag[0] = 0; while (s-- > 0 && m-- > 0) { if (s > 0 || m == 0) { d += sprintf(d, " %02X", *c++); } else d += sprintf(d, " .."); } } retval = usb_submit_urb(urb, GFP_KERNEL); if (retval) { dev_err(&ftdi->udev->dev, "failed %d to submit urb %p to write " "%d commands totaling %d bytes to the Uxxx\n", retval, urb, command_size, total_size); usb_free_coherent(ftdi->udev, total_size, buf, urb->transfer_dma); usb_free_urb(urb); return retval; } usb_free_urb(urb); /* release our reference to this urb, the USB core will eventually free it entirely */ ftdi->command_head += command_size; ftdi_elan_kick_respond_queue(ftdi); return 0; } static void ftdi_elan_do_callback(struct usb_ftdi *ftdi, struct u132_target *target, u8 *buffer, int length) { struct urb *urb = target->urb; int halted = target->halted; int skipped = target->skipped; int actual = target->actual; int non_null = target->non_null; int toggle_bits = target->toggle_bits; int error_count = target->error_count; int condition_code = target->condition_code; int repeat_number = target->repeat_number; void (*callback) (void *, struct urb *, u8 *, int, int, int, int, int, int, int, int, int) = target->callback; target->active -= 1; target->callback = NULL; (*callback) (target->endp, urb, buffer, length, toggle_bits, error_count, condition_code, repeat_number, halted, skipped, actual, non_null); } static char *have_ed_set_response(struct usb_ftdi *ftdi, struct u132_target *target, u16 ed_length, int ed_number, int ed_type, char *b) { int payload = (ed_length >> 0) & 0x07FF; mutex_lock(&ftdi->u132_lock); target->actual = 0; target->non_null = (ed_length >> 15) & 0x0001; target->repeat_number = (ed_length >> 11) & 0x000F; if (ed_type == 0x02) { if (payload == 0 || target->abandoning > 0) { target->abandoning = 0; mutex_unlock(&ftdi->u132_lock); ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, payload); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; return ftdi->response; } else { ftdi->expected = 4 + payload; ftdi->ed_found = 1; mutex_unlock(&ftdi->u132_lock); return b; } } else if (ed_type == 0x03) { if (payload == 0 || target->abandoning > 0) { target->abandoning = 0; mutex_unlock(&ftdi->u132_lock); ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, payload); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; return ftdi->response; } else { ftdi->expected = 4 + payload; ftdi->ed_found = 1; mutex_unlock(&ftdi->u132_lock); return b; } } else if (ed_type == 0x01) { target->abandoning = 0; mutex_unlock(&ftdi->u132_lock); ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, payload); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; return ftdi->response; } else { target->abandoning = 0; mutex_unlock(&ftdi->u132_lock); ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, payload); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; return ftdi->response; } } static char *have_ed_get_response(struct usb_ftdi *ftdi, struct u132_target *target, u16 ed_length, int ed_number, int ed_type, char *b) { mutex_lock(&ftdi->u132_lock); target->condition_code = TD_DEVNOTRESP; target->actual = (ed_length >> 0) & 0x01FF; target->non_null = (ed_length >> 15) & 0x0001; target->repeat_number = (ed_length >> 11) & 0x000F; mutex_unlock(&ftdi->u132_lock); if (target->active) ftdi_elan_do_callback(ftdi, target, NULL, 0); target->abandoning = 0; ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; return ftdi->response; } /* * The engine tries to empty the FTDI fifo * * all responses found in the fifo data are dispatched thus * the response buffer can only ever hold a maximum sized * response from the Uxxx. * */ static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi) { u8 *b = ftdi->response + ftdi->recieved; int bytes_read = 0; int retry_on_empty = 1; int retry_on_timeout = 3; int empty_packets = 0; read:{ int packet_bytes = 0; int retval = usb_bulk_msg(ftdi->udev, usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr), ftdi->bulk_in_buffer, ftdi->bulk_in_size, &packet_bytes, 500); char diag[30 *3 + 4]; char *d = diag; int m = packet_bytes; u8 *c = ftdi->bulk_in_buffer; int s = (sizeof(diag) - 1) / 3; diag[0] = 0; while (s-- > 0 && m-- > 0) { if (s > 0 || m == 0) { d += sprintf(d, " %02X", *c++); } else d += sprintf(d, " .."); } if (packet_bytes > 2) { ftdi->bulk_in_left = packet_bytes - 2; ftdi->bulk_in_last = 1; goto have; } else if (retval == -ETIMEDOUT) { if (retry_on_timeout-- > 0) { dev_err(&ftdi->udev->dev, "TIMED OUT with packe" "t_bytes = %d with total %d bytes%s\n", packet_bytes, bytes_read, diag); goto more; } else if (bytes_read > 0) { dev_err(&ftdi->udev->dev, "ONLY %d bytes%s\n", bytes_read, diag); return -ENOMEM; } else { dev_err(&ftdi->udev->dev, "TIMED OUT with packe" "t_bytes = %d with total %d bytes%s\n", packet_bytes, bytes_read, diag); return -ENOMEM; } } else if (retval == -EILSEQ) { dev_err(&ftdi->udev->dev, "error = %d with packet_bytes" " = %d with total %d bytes%s\n", retval, packet_bytes, bytes_read, diag); return retval; } else if (retval) { dev_err(&ftdi->udev->dev, "error = %d with packet_bytes" " = %d with total %d bytes%s\n", retval, packet_bytes, bytes_read, diag); return retval; } else if (packet_bytes == 2) { unsigned char s0 = ftdi->bulk_in_buffer[0]; unsigned char s1 = ftdi->bulk_in_buffer[1]; empty_packets += 1; if (s0 == 0x31 && s1 == 0x60) { if (retry_on_empty-- > 0) { goto more; } else return 0; } else if (s0 == 0x31 && s1 == 0x00) { if (retry_on_empty-- > 0) { goto more; } else return 0; } else { if (retry_on_empty-- > 0) { goto more; } else return 0; } } else if (packet_bytes == 1) { if (retry_on_empty-- > 0) { goto more; } else return 0; } else { if (retry_on_empty-- > 0) { goto more; } else return 0; } } more:{ goto read; } have:if (ftdi->bulk_in_left > 0) { u8 c = ftdi->bulk_in_buffer[++ftdi->bulk_in_last]; bytes_read += 1; ftdi->bulk_in_left -= 1; if (ftdi->recieved == 0 && c == 0xFF) { goto have; } else *b++ = c; if (++ftdi->recieved < ftdi->expected) { goto have; } else if (ftdi->ed_found) { int ed_number = (ftdi->response[0] >> 5) & 0x03; u16 ed_length = (ftdi->response[2] << 8) | ftdi->response[1]; struct u132_target *target = &ftdi->target[ed_number]; int payload = (ed_length >> 0) & 0x07FF; char diag[30 *3 + 4]; char *d = diag; int m = payload; u8 *c = 4 + ftdi->response; int s = (sizeof(diag) - 1) / 3; diag[0] = 0; while (s-- > 0 && m-- > 0) { if (s > 0 || m == 0) { d += sprintf(d, " %02X", *c++); } else d += sprintf(d, " .."); } ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, payload); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; b = ftdi->response; goto have; } else if (ftdi->expected == 8) { u8 buscmd; int respond_head = ftdi->respond_head++; struct u132_respond *respond = &ftdi->respond[ RESPOND_MASK & respond_head]; u32 data = ftdi->response[7]; data <<= 8; data |= ftdi->response[6]; data <<= 8; data |= ftdi->response[5]; data <<= 8; data |= ftdi->response[4]; *respond->value = data; *respond->result = 0; complete(&respond->wait_completion); ftdi->recieved = 0; ftdi->expected = 4; ftdi->ed_found = 0; b = ftdi->response; buscmd = (ftdi->response[0] >> 0) & 0x0F; if (buscmd == 0x00) { } else if (buscmd == 0x02) { } else if (buscmd == 0x06) { } else if (buscmd == 0x0A) { } else dev_err(&ftdi->udev->dev, "Uxxx unknown(%0X) va" "lue = %08X\n", buscmd, data); goto have; } else { if ((ftdi->response[0] & 0x80) == 0x00) { ftdi->expected = 8; goto have; } else { int ed_number = (ftdi->response[0] >> 5) & 0x03; int ed_type = (ftdi->response[0] >> 0) & 0x03; u16 ed_length = (ftdi->response[2] << 8) | ftdi->response[1]; struct u132_target *target = &ftdi->target[ ed_number]; target->halted = (ftdi->response[0] >> 3) & 0x01; target->skipped = (ftdi->response[0] >> 2) & 0x01; target->toggle_bits = (ftdi->response[3] >> 6) & 0x03; target->error_count = (ftdi->response[3] >> 4) & 0x03; target->condition_code = (ftdi->response[ 3] >> 0) & 0x0F; if ((ftdi->response[0] & 0x10) == 0x00) { b = have_ed_set_response(ftdi, target, ed_length, ed_number, ed_type, b); goto have; } else { b = have_ed_get_response(ftdi, target, ed_length, ed_number, ed_type, b); goto have; } } } } else goto more; } /* * create a urb, and a buffer for it, and copy the data to the urb * */ static ssize_t ftdi_elan_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *ppos) { int retval = 0; struct urb *urb; char *buf; struct usb_ftdi *ftdi = file->private_data; if (ftdi->disconnected > 0) { return -ENODEV; } if (count == 0) { goto exit; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error_1; } buf = usb_alloc_coherent(ftdi->udev, count, GFP_KERNEL, &urb->transfer_dma); if (!buf) { retval = -ENOMEM; goto error_2; } if (copy_from_user(buf, user_buffer, count)) { retval = -EFAULT; goto error_3; } usb_fill_bulk_urb(urb, ftdi->udev, usb_sndbulkpipe(ftdi->udev, ftdi->bulk_out_endpointAddr), buf, count, ftdi_elan_write_bulk_callback, ftdi); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; retval = usb_submit_urb(urb, GFP_KERNEL); if (retval) { dev_err(&ftdi->udev->dev, "failed submitting write urb, error %" "d\n", retval); goto error_3; } usb_free_urb(urb); exit: return count; error_3: usb_free_coherent(ftdi->udev, count, buf, urb->transfer_dma); error_2: usb_free_urb(urb); error_1: return retval; } static const struct file_operations ftdi_elan_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = ftdi_elan_read, .write = ftdi_elan_write, .open = ftdi_elan_open, .release = ftdi_elan_release, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver ftdi_elan_jtag_class = { .name = "ftdi-%d-jtag", .fops = &ftdi_elan_fops, .minor_base = USB_FTDI_ELAN_MINOR_BASE, }; /* * the following definitions are for the * ELAN FPGA state machgine processor that * lies on the other side of the FTDI chip */ #define cPCIu132rd 0x0 #define cPCIu132wr 0x1 #define cPCIiord 0x2 #define cPCIiowr 0x3 #define cPCImemrd 0x6 #define cPCImemwr 0x7 #define cPCIcfgrd 0xA #define cPCIcfgwr 0xB #define cPCInull 0xF #define cU132cmd_status 0x0 #define cU132flash 0x1 #define cPIDsetup 0x0 #define cPIDout 0x1 #define cPIDin 0x2 #define cPIDinonce 0x3 #define cCCnoerror 0x0 #define cCCcrc 0x1 #define cCCbitstuff 0x2 #define cCCtoggle 0x3 #define cCCstall 0x4 #define cCCnoresp 0x5 #define cCCbadpid1 0x6 #define cCCbadpid2 0x7 #define cCCdataoverrun 0x8 #define cCCdataunderrun 0x9 #define cCCbuffoverrun 0xC #define cCCbuffunderrun 0xD #define cCCnotaccessed 0xF static int ftdi_elan_write_reg(struct usb_ftdi *ftdi, u32 data) { wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x00 | cPCIu132wr; command->length = 0x04; command->address = 0x00; command->width = 0x00; command->follows = 4; command->value = data; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } static int ftdi_elan_write_config(struct usb_ftdi *ftdi, int config_offset, u8 width, u32 data) { u8 addressofs = config_offset / 4; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x00 | (cPCIcfgwr & 0x0F); command->length = 0x04; command->address = addressofs; command->width = 0x00 | (width & 0x0F); command->follows = 4; command->value = data; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } static int ftdi_elan_write_pcimem(struct usb_ftdi *ftdi, int mem_offset, u8 width, u32 data) { u8 addressofs = mem_offset / 4; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x00 | (cPCImemwr & 0x0F); command->length = 0x04; command->address = addressofs; command->width = 0x00 | (width & 0x0F); command->follows = 4; command->value = data; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, int mem_offset, u8 width, u32 data) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_write_pcimem(ftdi, mem_offset, width, data); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_write_pcimem); static int ftdi_elan_read_reg(struct usb_ftdi *ftdi, u32 *data) { wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; int respond_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; respond_size = ftdi->respond_next - ftdi->respond_head; if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; struct u132_respond *respond = &ftdi->respond[ RESPOND_MASK & ftdi->respond_next]; int result = -ENODEV; respond->result = &result; respond->header = command->header = 0x00 | cPCIu132rd; command->length = 0x04; respond->address = command->address = cU132cmd_status; command->width = 0x00; command->follows = 0; command->value = 0; command->buffer = NULL; respond->value = data; init_completion(&respond->wait_completion); ftdi->command_next += 1; ftdi->respond_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); wait_for_completion(&respond->wait_completion); return result; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset, u8 width, u32 *data) { u8 addressofs = config_offset / 4; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; int respond_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; respond_size = ftdi->respond_next - ftdi->respond_head; if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; struct u132_respond *respond = &ftdi->respond[ RESPOND_MASK & ftdi->respond_next]; int result = -ENODEV; respond->result = &result; respond->header = command->header = 0x00 | (cPCIcfgrd & 0x0F); command->length = 0x04; respond->address = command->address = addressofs; command->width = 0x00 | (width & 0x0F); command->follows = 0; command->value = 0; command->buffer = NULL; respond->value = data; init_completion(&respond->wait_completion); ftdi->command_next += 1; ftdi->respond_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); wait_for_completion(&respond->wait_completion); return result; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } static int ftdi_elan_read_pcimem(struct usb_ftdi *ftdi, int mem_offset, u8 width, u32 *data) { u8 addressofs = mem_offset / 4; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else { int command_size; int respond_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; respond_size = ftdi->respond_next - ftdi->respond_head; if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) { struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; struct u132_respond *respond = &ftdi->respond[ RESPOND_MASK & ftdi->respond_next]; int result = -ENODEV; respond->result = &result; respond->header = command->header = 0x00 | (cPCImemrd & 0x0F); command->length = 0x04; respond->address = command->address = addressofs; command->width = 0x00 | (width & 0x0F); command->follows = 0; command->value = 0; command->buffer = NULL; respond->value = data; init_completion(&respond->wait_completion); ftdi->command_next += 1; ftdi->respond_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); wait_for_completion(&respond->wait_completion); return result; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, int mem_offset, u8 width, u32 *data) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); if (ftdi->initialized == 0) { return -ENODEV; } else return ftdi_elan_read_pcimem(ftdi, mem_offset, width, data); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_read_pcimem); static int ftdi_elan_edset_setup(struct usb_ftdi *ftdi, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { u8 ed = ed_number - 1; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x80 | (ed << 5); command->length = 0x8007; command->address = (toggle_bits << 6) | (ep_number << 2) | (address << 0); command->width = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); command->follows = 8; command->value = 0; command->buffer = urb->setup_packet; target->callback = callback; target->endp = endp; target->urb = urb; target->active = 1; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_edset_setup(struct platform_device *pdev, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_setup(ftdi, ed_number, endp, urb, address, ep_number, toggle_bits, callback); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_setup); static int ftdi_elan_edset_input(struct usb_ftdi *ftdi, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { u8 ed = ed_number - 1; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; u32 remaining_length = urb->transfer_buffer_length - urb->actual_length; command->header = 0x82 | (ed << 5); if (remaining_length == 0) { command->length = 0x0000; } else if (remaining_length > 1024) { command->length = 0x8000 | 1023; } else command->length = 0x8000 | (remaining_length - 1); command->address = (toggle_bits << 6) | (ep_number << 2) | (address << 0); command->width = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); command->follows = 0; command->value = 0; command->buffer = NULL; target->callback = callback; target->endp = endp; target->urb = urb; target->active = 1; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_edset_input(struct platform_device *pdev, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_input(ftdi, ed_number, endp, urb, address, ep_number, toggle_bits, callback); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_input); static int ftdi_elan_edset_empty(struct usb_ftdi *ftdi, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { u8 ed = ed_number - 1; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x81 | (ed << 5); command->length = 0x0000; command->address = (toggle_bits << 6) | (ep_number << 2) | (address << 0); command->width = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); command->follows = 0; command->value = 0; command->buffer = NULL; target->callback = callback; target->endp = endp; target->urb = urb; target->active = 1; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_edset_empty(struct platform_device *pdev, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_empty(ftdi, ed_number, endp, urb, address, ep_number, toggle_bits, callback); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_empty); static int ftdi_elan_edset_output(struct usb_ftdi *ftdi, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { u8 ed = ed_number - 1; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { u8 *b; u16 urb_size; int i = 0; char data[30 *3 + 4]; char *d = data; int m = (sizeof(data) - 1) / 3; int l = 0; struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x81 | (ed << 5); command->address = (toggle_bits << 6) | (ep_number << 2) | (address << 0); command->width = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); command->follows = min_t(u32, 1024, urb->transfer_buffer_length - urb->actual_length); command->value = 0; command->buffer = urb->transfer_buffer + urb->actual_length; command->length = 0x8000 | (command->follows - 1); b = command->buffer; urb_size = command->follows; data[0] = 0; while (urb_size-- > 0) { if (i > m) { } else if (i++ < m) { int w = sprintf(d, " %02X", *b++); d += w; l += w; } else d += sprintf(d, " .."); } target->callback = callback; target->endp = endp; target->urb = urb; target->active = 1; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_edset_output(struct platform_device *pdev, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_output(ftdi, ed_number, endp, urb, address, ep_number, toggle_bits, callback); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_output); static int ftdi_elan_edset_single(struct usb_ftdi *ftdi, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { u8 ed = ed_number - 1; wait:if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { int command_size; mutex_lock(&ftdi->u132_lock); command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { u32 remaining_length = urb->transfer_buffer_length - urb->actual_length; struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ COMMAND_MASK & ftdi->command_next]; command->header = 0x83 | (ed << 5); if (remaining_length == 0) { command->length = 0x0000; } else if (remaining_length > 1024) { command->length = 0x8000 | 1023; } else command->length = 0x8000 | (remaining_length - 1); command->address = (toggle_bits << 6) | (ep_number << 2) | (address << 0); command->width = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); command->follows = 0; command->value = 0; command->buffer = NULL; target->callback = callback; target->endp = endp; target->urb = urb; target->active = 1; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); mutex_unlock(&ftdi->u132_lock); return 0; } else { mutex_unlock(&ftdi->u132_lock); msleep(100); goto wait; } } } int usb_ftdi_elan_edset_single(struct platform_device *pdev, u8 ed_number, void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_single(ftdi, ed_number, endp, urb, address, ep_number, toggle_bits, callback); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_single); static int ftdi_elan_edset_flush(struct usb_ftdi *ftdi, u8 ed_number, void *endp) { u8 ed = ed_number - 1; if (ftdi->disconnected > 0) { return -ENODEV; } else if (ftdi->initialized == 0) { return -ENODEV; } else { struct u132_target *target = &ftdi->target[ed]; mutex_lock(&ftdi->u132_lock); if (target->abandoning > 0) { mutex_unlock(&ftdi->u132_lock); return 0; } else { target->abandoning = 1; wait_1:if (target->active == 1) { int command_size = ftdi->command_next - ftdi->command_head; if (command_size < COMMAND_SIZE) { struct u132_command *command = &ftdi->command[COMMAND_MASK & ftdi->command_next]; command->header = 0x80 | (ed << 5) | 0x4; command->length = 0x00; command->address = 0x00; command->width = 0x00; command->follows = 0; command->value = 0; command->buffer = &command->value; ftdi->command_next += 1; ftdi_elan_kick_command_queue(ftdi); } else { mutex_unlock(&ftdi->u132_lock); msleep(100); mutex_lock(&ftdi->u132_lock); goto wait_1; } } mutex_unlock(&ftdi->u132_lock); return 0; } } } int usb_ftdi_elan_edset_flush(struct platform_device *pdev, u8 ed_number, void *endp) { struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev); return ftdi_elan_edset_flush(ftdi, ed_number, endp); } EXPORT_SYMBOL_GPL(usb_ftdi_elan_edset_flush); static int ftdi_elan_flush_input_fifo(struct usb_ftdi *ftdi) { int retry_on_empty = 10; int retry_on_timeout = 5; int retry_on_status = 20; more:{ int packet_bytes = 0; int retval = usb_bulk_msg(ftdi->udev, usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr), ftdi->bulk_in_buffer, ftdi->bulk_in_size, &packet_bytes, 100); if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; int m = (sizeof(diag) - 1) / 3; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; while (packet_bytes-- > 0) { char c = *b++; if (bytes_read < m) { d += sprintf(d, " %02X", 0x000000FF & c); } else if (bytes_read > m) { } else d += sprintf(d, " .."); bytes_read += 1; continue; } goto more; } else if (packet_bytes > 1) { char s1 = ftdi->bulk_in_buffer[0]; char s2 = ftdi->bulk_in_buffer[1]; if (s1 == 0x31 && s2 == 0x60) { return 0; } else if (retry_on_status-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "STATUS ERROR retry l" "imit reached\n"); return -EFAULT; } } else if (packet_bytes > 0) { char b1 = ftdi->bulk_in_buffer[0]; dev_err(&ftdi->udev->dev, "only one byte flushed from F" "TDI = %02X\n", b1); if (retry_on_status-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "STATUS ERROR retry l" "imit reached\n"); return -EFAULT; } } else if (retval == -ETIMEDOUT) { if (retry_on_timeout-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "TIMED OUT retry limi" "t reached\n"); return -ENOMEM; } } else if (retval == 0) { if (retry_on_empty-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "empty packet retry l" "imit reached\n"); return -ENOMEM; } } else { dev_err(&ftdi->udev->dev, "error = %d\n", retval); return retval; } } return -1; } /* * send the long flush sequence * */ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi) { int retval; struct urb *urb; char *buf; int I = 257; int i = 0; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&ftdi->udev->dev, "could not alloc a urb for flush sequ" "ence\n"); return -ENOMEM; } buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma); if (!buf) { dev_err(&ftdi->udev->dev, "could not get a buffer for flush seq" "uence\n"); usb_free_urb(urb); return -ENOMEM; } while (I-- > 0) buf[i++] = 0x55; usb_fill_bulk_urb(urb, ftdi->udev, usb_sndbulkpipe(ftdi->udev, ftdi->bulk_out_endpointAddr), buf, i, ftdi_elan_write_bulk_callback, ftdi); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; retval = usb_submit_urb(urb, GFP_KERNEL); if (retval) { dev_err(&ftdi->udev->dev, "failed to submit urb containing the " "flush sequence\n"); usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma); usb_free_urb(urb); return -ENOMEM; } usb_free_urb(urb); return 0; } /* * send the reset sequence * */ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi) { int retval; struct urb *urb; char *buf; int I = 4; int i = 0; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&ftdi->udev->dev, "could not get a urb for the reset se" "quence\n"); return -ENOMEM; } buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma); if (!buf) { dev_err(&ftdi->udev->dev, "could not get a buffer for the reset" " sequence\n"); usb_free_urb(urb); return -ENOMEM; } buf[i++] = 0x55; buf[i++] = 0xAA; buf[i++] = 0x5A; buf[i++] = 0xA5; usb_fill_bulk_urb(urb, ftdi->udev, usb_sndbulkpipe(ftdi->udev, ftdi->bulk_out_endpointAddr), buf, i, ftdi_elan_write_bulk_callback, ftdi); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; retval = usb_submit_urb(urb, GFP_KERNEL); if (retval) { dev_err(&ftdi->udev->dev, "failed to submit urb containing the " "reset sequence\n"); usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma); usb_free_urb(urb); return -ENOMEM; } usb_free_urb(urb); return 0; } static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) { int retval; int long_stop = 10; int retry_on_timeout = 5; int retry_on_empty = 10; int err_count = 0; retval = ftdi_elan_flush_input_fifo(ftdi); if (retval) return retval; ftdi->bulk_in_left = 0; ftdi->bulk_in_last = -1; while (long_stop-- > 0) { int read_stop; int read_stuck; retval = ftdi_elan_synchronize_flush(ftdi); if (retval) return retval; retval = ftdi_elan_flush_input_fifo(ftdi); if (retval) return retval; reset:retval = ftdi_elan_synchronize_reset(ftdi); if (retval) return retval; read_stop = 100; read_stuck = 10; read:{ int packet_bytes = 0; retval = usb_bulk_msg(ftdi->udev, usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr), ftdi->bulk_in_buffer, ftdi->bulk_in_size, &packet_bytes, 500); if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; int m = (sizeof(diag) - 1) / 3; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; unsigned char c = 0; diag[0] = 0; while (packet_bytes-- > 0) { c = *b++; if (bytes_read < m) { d += sprintf(d, " %02X", c); } else if (bytes_read > m) { } else d += sprintf(d, " .."); bytes_read += 1; continue; } if (c == 0x7E) { return 0; } else { if (c == 0x55) { goto read; } else if (read_stop-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "retr" "y limit reached\n"); continue; } } } else if (packet_bytes > 1) { unsigned char s1 = ftdi->bulk_in_buffer[0]; unsigned char s2 = ftdi->bulk_in_buffer[1]; if (s1 == 0x31 && s2 == 0x00) { if (read_stuck-- > 0) { goto read; } else goto reset; } else if (s1 == 0x31 && s2 == 0x60) { if (read_stop-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "retr" "y limit reached\n"); continue; } } else { if (read_stop-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "retr" "y limit reached\n"); continue; } } } else if (packet_bytes > 0) { if (read_stop-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "retry limit " "reached\n"); continue; } } else if (retval == -ETIMEDOUT) { if (retry_on_timeout-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "TIMED OUT re" "try limit reached\n"); continue; } } else if (retval == 0) { if (retry_on_empty-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "empty packet" " retry limit reached\n"); continue; } } else { err_count += 1; dev_err(&ftdi->udev->dev, "error = %d\n", retval); if (read_stop-- > 0) { goto read; } else { dev_err(&ftdi->udev->dev, "retry limit " "reached\n"); continue; } } } } dev_err(&ftdi->udev->dev, "failed to synchronize\n"); return -EFAULT; } static int ftdi_elan_stuck_waiting(struct usb_ftdi *ftdi) { int retry_on_empty = 10; int retry_on_timeout = 5; int retry_on_status = 50; more:{ int packet_bytes = 0; int retval = usb_bulk_msg(ftdi->udev, usb_rcvbulkpipe(ftdi->udev, ftdi->bulk_in_endpointAddr), ftdi->bulk_in_buffer, ftdi->bulk_in_size, &packet_bytes, 1000); if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; int m = (sizeof(diag) - 1) / 3; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; while (packet_bytes-- > 0) { char c = *b++; if (bytes_read < m) { d += sprintf(d, " %02X", 0x000000FF & c); } else if (bytes_read > m) { } else d += sprintf(d, " .."); bytes_read += 1; continue; } goto more; } else if (packet_bytes > 1) { char s1 = ftdi->bulk_in_buffer[0]; char s2 = ftdi->bulk_in_buffer[1]; if (s1 == 0x31 && s2 == 0x60) { return 0; } else if (retry_on_status-- > 0) { msleep(5); goto more; } else return -EFAULT; } else if (packet_bytes > 0) { char b1 = ftdi->bulk_in_buffer[0]; dev_err(&ftdi->udev->dev, "only one byte flushed from F" "TDI = %02X\n", b1); if (retry_on_status-- > 0) { msleep(5); goto more; } else { dev_err(&ftdi->udev->dev, "STATUS ERROR retry l" "imit reached\n"); return -EFAULT; } } else if (retval == -ETIMEDOUT) { if (retry_on_timeout-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "TIMED OUT retry limi" "t reached\n"); return -ENOMEM; } } else if (retval == 0) { if (retry_on_empty-- > 0) { goto more; } else { dev_err(&ftdi->udev->dev, "empty packet retry l" "imit reached\n"); return -ENOMEM; } } else { dev_err(&ftdi->udev->dev, "error = %d\n", retval); return -ENOMEM; } } return -1; } static int ftdi_elan_checkingPCI(struct usb_ftdi *ftdi) { int UxxxStatus = ftdi_elan_read_reg(ftdi, &ftdi->controlreg); if (UxxxStatus) return UxxxStatus; if (ftdi->controlreg & 0x00400000) { if (ftdi->card_ejected) { } else { ftdi->card_ejected = 1; dev_err(&ftdi->udev->dev, "CARD EJECTED - controlreg = " "%08X\n", ftdi->controlreg); } return -ENODEV; } else { u8 fn = ftdi->function - 1; int activePCIfn = fn << 8; u32 pcidata; u32 pciVID; u32 pciPID; int reg = 0; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; pciVID = pcidata & 0xFFFF; pciPID = (pcidata >> 16) & 0xFFFF; if (pciVID == ftdi->platform_data.vendor && pciPID == ftdi->platform_data.device) { return 0; } else { dev_err(&ftdi->udev->dev, "vendor=%04X pciVID=%04X devi" "ce=%04X pciPID=%04X\n", ftdi->platform_data.vendor, pciVID, ftdi->platform_data.device, pciPID); return -ENODEV; } } } #define ftdi_read_pcimem(ftdi, member, data) ftdi_elan_read_pcimem(ftdi, \ offsetof(struct ohci_regs, member), 0, data); #define ftdi_write_pcimem(ftdi, member, data) ftdi_elan_write_pcimem(ftdi, \ offsetof(struct ohci_regs, member), 0, data); #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \ OHCI_INTR_WDH) static int ftdi_elan_check_controller(struct usb_ftdi *ftdi, int quirk) { int devices = 0; int retval; u32 hc_control; int num_ports; u32 control; u32 rh_a = -1; u32 status; u32 fminterval; u32 hc_fminterval; u32 periodicstart; u32 cmdstatus; u32 roothub_a; int mask = OHCI_INTR_INIT; int sleep_time = 0; int reset_timeout = 30; /* ... allow extra time */ int temp; retval = ftdi_write_pcimem(ftdi, intrdisable, OHCI_INTR_MIE); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, roothub.a, &rh_a); if (retval) return retval; num_ports = rh_a & RH_A_NDP; retval = ftdi_read_pcimem(ftdi, fminterval, &hc_fminterval); if (retval) return retval; hc_fminterval &= 0x3fff; if (hc_fminterval != FI) { } hc_fminterval |= FSMP(hc_fminterval) << 16; retval = ftdi_read_pcimem(ftdi, control, &hc_control); if (retval) return retval; switch (hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: sleep_time = 0; break; case OHCI_USB_SUSPEND: case OHCI_USB_RESUME: hc_control &= OHCI_CTRL_RWC; hc_control |= OHCI_USB_RESUME; sleep_time = 10; break; default: hc_control &= OHCI_CTRL_RWC; hc_control |= OHCI_USB_RESET; sleep_time = 50; break; } retval = ftdi_write_pcimem(ftdi, control, hc_control); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; msleep(sleep_time); retval = ftdi_read_pcimem(ftdi, roothub.a, &roothub_a); if (retval) return retval; if (!(roothub_a & RH_A_NPS)) { /* power down each port */ for (temp = 0; temp < num_ports; temp++) { retval = ftdi_write_pcimem(ftdi, roothub.portstatus[temp], RH_PS_LSDA); if (retval) return retval; } } retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; retry:retval = ftdi_read_pcimem(ftdi, cmdstatus, &status); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, cmdstatus, OHCI_HCR); if (retval) return retval; extra:{ retval = ftdi_read_pcimem(ftdi, cmdstatus, &status); if (retval) return retval; if (0 != (status & OHCI_HCR)) { if (--reset_timeout == 0) { dev_err(&ftdi->udev->dev, "USB HC reset timed o" "ut!\n"); return -ENODEV; } else { msleep(5); goto extra; } } } if (quirk & OHCI_QUIRK_INITRESET) { retval = ftdi_write_pcimem(ftdi, control, hc_control); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; } retval = ftdi_write_pcimem(ftdi, ed_controlhead, 0x00000000); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, ed_bulkhead, 0x11000000); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, hcca, 0x00000000); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, fminterval, &fminterval); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, fminterval, ((fminterval & FIT) ^ FIT) | hc_fminterval); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, periodicstart, ((9 *hc_fminterval) / 10) & 0x3fff); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, fminterval, &fminterval); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, periodicstart, &periodicstart); if (retval) return retval; if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) { if (!(quirk & OHCI_QUIRK_INITRESET)) { quirk |= OHCI_QUIRK_INITRESET; goto retry; } else dev_err(&ftdi->udev->dev, "init err(%08x %04x)\n", fminterval, periodicstart); } /* start controller operations */ hc_control &= OHCI_CTRL_RWC; hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER; retval = ftdi_write_pcimem(ftdi, control, hc_control); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, cmdstatus, OHCI_BLF); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, cmdstatus, &cmdstatus); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, roothub.status, RH_HS_DRWE); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, intrstatus, mask); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, intrdisable, OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO | OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH | OHCI_INTR_SO); if (retval) return retval; /* handle root hub init quirks ... */ retval = ftdi_read_pcimem(ftdi, roothub.a, &roothub_a); if (retval) return retval; roothub_a &= ~(RH_A_PSM | RH_A_OCPM); if (quirk & OHCI_QUIRK_SUPERIO) { roothub_a |= RH_A_NOCP; roothub_a &= ~(RH_A_POTPGT | RH_A_NPS); retval = ftdi_write_pcimem(ftdi, roothub.a, roothub_a); if (retval) return retval; } else if ((quirk & OHCI_QUIRK_AMD756) || distrust_firmware) { roothub_a |= RH_A_NPS; retval = ftdi_write_pcimem(ftdi, roothub.a, roothub_a); if (retval) return retval; } retval = ftdi_write_pcimem(ftdi, roothub.status, RH_HS_LPSC); if (retval) return retval; retval = ftdi_write_pcimem(ftdi, roothub.b, (roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM); if (retval) return retval; retval = ftdi_read_pcimem(ftdi, control, &control); if (retval) return retval; mdelay((roothub_a >> 23) & 0x1fe); for (temp = 0; temp < num_ports; temp++) { u32 portstatus; retval = ftdi_read_pcimem(ftdi, roothub.portstatus[temp], &portstatus); if (retval) return retval; if (1 & portstatus) devices += 1; } return devices; } static int ftdi_elan_setup_controller(struct usb_ftdi *ftdi, int fn) { u32 latence_timer; int UxxxStatus; u32 pcidata; int reg = 0; int activePCIfn = fn << 8; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000025FL | 0x2800); if (UxxxStatus) return UxxxStatus; reg = 16; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0, 0xFFFFFFFF); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0, 0xF0000000); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; reg = 12; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &latence_timer); if (UxxxStatus) return UxxxStatus; latence_timer &= 0xFFFF00FF; latence_timer |= 0x00001600; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0x00, latence_timer); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; reg = 4; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0x00, 0x06); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; for (reg = 0; reg <= 0x54; reg += 4) { UxxxStatus = ftdi_elan_read_pcimem(ftdi, reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; } return 0; } static int ftdi_elan_close_controller(struct usb_ftdi *ftdi, int fn) { u32 latence_timer; int UxxxStatus; u32 pcidata; int reg = 0; int activePCIfn = fn << 8; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000025FL | 0x2800); if (UxxxStatus) return UxxxStatus; reg = 16; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0, 0xFFFFFFFF); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0, 0x00000000); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; reg = 12; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &latence_timer); if (UxxxStatus) return UxxxStatus; latence_timer &= 0xFFFF00FF; latence_timer |= 0x00001600; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0x00, latence_timer); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; reg = 4; UxxxStatus = ftdi_elan_write_config(ftdi, activePCIfn | reg, 0x00, 0x00); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; return 0; } static int ftdi_elan_found_controller(struct usb_ftdi *ftdi, int fn, int quirk) { int result; int UxxxStatus; UxxxStatus = ftdi_elan_setup_controller(ftdi, fn); if (UxxxStatus) return UxxxStatus; result = ftdi_elan_check_controller(ftdi, quirk); UxxxStatus = ftdi_elan_close_controller(ftdi, fn); if (UxxxStatus) return UxxxStatus; return result; } static int ftdi_elan_enumeratePCI(struct usb_ftdi *ftdi) { u32 controlreg; u8 sensebits; int UxxxStatus; UxxxStatus = ftdi_elan_read_reg(ftdi, &controlreg); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x00000000L); if (UxxxStatus) return UxxxStatus; msleep(750); UxxxStatus = ftdi_elan_write_reg(ftdi, 0x00000200L | 0x100); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x00000200L | 0x500); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_reg(ftdi, &controlreg); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000020CL | 0x000); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000020DL | 0x000); if (UxxxStatus) return UxxxStatus; msleep(250); UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000020FL | 0x000); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_reg(ftdi, &controlreg); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_write_reg(ftdi, 0x0000025FL | 0x800); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_reg(ftdi, &controlreg); if (UxxxStatus) return UxxxStatus; UxxxStatus = ftdi_elan_read_reg(ftdi, &controlreg); if (UxxxStatus) return UxxxStatus; msleep(1000); sensebits = (controlreg >> 16) & 0x000F; if (0x0D == sensebits) return 0; else return - ENXIO; } static int ftdi_elan_setupOHCI(struct usb_ftdi *ftdi) { int UxxxStatus; u32 pcidata; int reg = 0; u8 fn; int activePCIfn = 0; int max_devices = 0; int controllers = 0; int unrecognized = 0; ftdi->function = 0; for (fn = 0; (fn < 4); fn++) { u32 pciVID = 0; u32 pciPID = 0; int devices = 0; activePCIfn = fn << 8; UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata); if (UxxxStatus) return UxxxStatus; pciVID = pcidata & 0xFFFF; pciPID = (pcidata >> 16) & 0xFFFF; if ((pciVID == PCI_VENDOR_ID_OPTI) && (pciPID == 0xc861)) { devices = ftdi_elan_found_controller(ftdi, fn, 0); controllers += 1; } else if ((pciVID == PCI_VENDOR_ID_NEC) && (pciPID == 0x0035)) { devices = ftdi_elan_found_controller(ftdi, fn, 0); controllers += 1; } else if ((pciVID == PCI_VENDOR_ID_AL) && (pciPID == 0x5237)) { devices = ftdi_elan_found_controller(ftdi, fn, 0); controllers += 1; } else if ((pciVID == PCI_VENDOR_ID_ATT) && (pciPID == 0x5802)) { devices = ftdi_elan_found_controller(ftdi, fn, 0); controllers += 1; } else if (pciVID == PCI_VENDOR_ID_AMD && pciPID == 0x740c) { devices = ftdi_elan_found_controller(ftdi, fn, OHCI_QUIRK_AMD756); controllers += 1; } else if (pciVID == PCI_VENDOR_ID_COMPAQ && pciPID == 0xa0f8) { devices = ftdi_elan_found_controller(ftdi, fn, OHCI_QUIRK_ZFMICRO); controllers += 1; } else if (0 == pcidata) { } else unrecognized += 1; if (devices > max_devices) { max_devices = devices; ftdi->function = fn + 1; ftdi->platform_data.vendor = pciVID; ftdi->platform_data.device = pciPID; } } if (ftdi->function > 0) { UxxxStatus = ftdi_elan_setup_controller(ftdi, ftdi->function - 1); if (UxxxStatus) return UxxxStatus; return 0; } else if (controllers > 0) { return -ENXIO; } else if (unrecognized > 0) { return -ENXIO; } else { ftdi->enumerated = 0; return -ENXIO; } } /* * we use only the first bulk-in and bulk-out endpoints */ static int ftdi_elan_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; size_t buffer_size; int i; int retval = -ENOMEM; struct usb_ftdi *ftdi; ftdi = kzalloc(sizeof(struct usb_ftdi), GFP_KERNEL); if (!ftdi) { printk(KERN_ERR "Out of memory\n"); return -ENOMEM; } mutex_lock(&ftdi_module_lock); list_add_tail(&ftdi->ftdi_list, &ftdi_static_list); ftdi->sequence_num = ++ftdi_instances; mutex_unlock(&ftdi_module_lock); ftdi_elan_init_kref(ftdi); sema_init(&ftdi->sw_lock, 1); ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); ftdi->interface = interface; mutex_init(&ftdi->u132_lock); ftdi->expected = 4; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!ftdi->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ftdi->bulk_in_size = buffer_size; ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress; ftdi->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!ftdi->bulk_in_buffer) { dev_err(&ftdi->udev->dev, "Could not allocate b" "ulk_in_buffer\n"); retval = -ENOMEM; goto error; } } if (!ftdi->bulk_out_endpointAddr && usb_endpoint_is_bulk_out(endpoint)) { ftdi->bulk_out_endpointAddr = endpoint->bEndpointAddress; } } if (!(ftdi->bulk_in_endpointAddr && ftdi->bulk_out_endpointAddr)) { dev_err(&ftdi->udev->dev, "Could not find both bulk-in and bulk" "-out endpoints\n"); retval = -ENODEV; goto error; } dev_info(&ftdi->udev->dev, "interface %d has I=%02X O=%02X\n", iface_desc->desc.bInterfaceNumber, ftdi->bulk_in_endpointAddr, ftdi->bulk_out_endpointAddr); usb_set_intfdata(interface, ftdi); if (iface_desc->desc.bInterfaceNumber == 0 && ftdi->bulk_in_endpointAddr == 0x81 && ftdi->bulk_out_endpointAddr == 0x02) { retval = usb_register_dev(interface, &ftdi_elan_jtag_class); if (retval) { dev_err(&ftdi->udev->dev, "Not able to get a minor for " "this device.\n"); usb_set_intfdata(interface, NULL); retval = -ENOMEM; goto error; } else { ftdi->class = &ftdi_elan_jtag_class; dev_info(&ftdi->udev->dev, "USB FDTI=%p JTAG interface " "%d now attached to ftdi%d\n", ftdi, iface_desc->desc.bInterfaceNumber, interface->minor); return 0; } } else if (iface_desc->desc.bInterfaceNumber == 1 && ftdi->bulk_in_endpointAddr == 0x83 && ftdi->bulk_out_endpointAddr == 0x04) { ftdi->class = NULL; dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work); INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work); INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work); ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); return 0; } else { dev_err(&ftdi->udev->dev, "Could not find ELAN's U132 device\n"); retval = -ENODEV; goto error; } error:if (ftdi) { ftdi_elan_put_kref(ftdi); } return retval; } static void ftdi_elan_disconnect(struct usb_interface *interface) { struct usb_ftdi *ftdi = usb_get_intfdata(interface); ftdi->disconnected += 1; if (ftdi->class) { int minor = interface->minor; struct usb_class_driver *class = ftdi->class; usb_set_intfdata(interface, NULL); usb_deregister_dev(interface, class); dev_info(&ftdi->udev->dev, "USB FTDI U132 jtag interface on min" "or %d now disconnected\n", minor); } else { ftdi_status_cancel_work(ftdi); ftdi_command_cancel_work(ftdi); ftdi_response_cancel_work(ftdi); ftdi_elan_abandon_completions(ftdi); ftdi_elan_abandon_targets(ftdi); if (ftdi->registered) { platform_device_unregister(&ftdi->platform_dev); ftdi->synchronized = 0; ftdi->enumerated = 0; ftdi->initialized = 0; ftdi->registered = 0; } flush_workqueue(status_queue); flush_workqueue(command_queue); flush_workqueue(respond_queue); ftdi->disconnected += 1; usb_set_intfdata(interface, NULL); dev_info(&ftdi->udev->dev, "USB FTDI U132 host controller inter" "face now disconnected\n"); } ftdi_elan_put_kref(ftdi); } static struct usb_driver ftdi_elan_driver = { .name = "ftdi-elan", .probe = ftdi_elan_probe, .disconnect = ftdi_elan_disconnect, .id_table = ftdi_elan_table, }; static int __init ftdi_elan_init(void) { int result; printk(KERN_INFO "driver %s\n", ftdi_elan_driver.name); mutex_init(&ftdi_module_lock); INIT_LIST_HEAD(&ftdi_static_list); status_queue = create_singlethread_workqueue("ftdi-status-control"); if (!status_queue) goto err_status_queue; command_queue = create_singlethread_workqueue("ftdi-command-engine"); if (!command_queue) goto err_command_queue; respond_queue = create_singlethread_workqueue("ftdi-respond-engine"); if (!respond_queue) goto err_respond_queue; result = usb_register(&ftdi_elan_driver); if (result) { destroy_workqueue(status_queue); destroy_workqueue(command_queue); destroy_workqueue(respond_queue); printk(KERN_ERR "usb_register failed. Error number %d\n", result); } return result; err_respond_queue: destroy_workqueue(command_queue); err_command_queue: destroy_workqueue(status_queue); err_status_queue: printk(KERN_ERR "%s couldn't create workqueue\n", ftdi_elan_driver.name); return -ENOMEM; } static void __exit ftdi_elan_exit(void) { struct usb_ftdi *ftdi; struct usb_ftdi *temp; usb_deregister(&ftdi_elan_driver); printk(KERN_INFO "ftdi_u132 driver deregistered\n"); list_for_each_entry_safe(ftdi, temp, &ftdi_static_list, ftdi_list) { ftdi_status_cancel_work(ftdi); ftdi_command_cancel_work(ftdi); ftdi_response_cancel_work(ftdi); } flush_workqueue(status_queue); destroy_workqueue(status_queue); status_queue = NULL; flush_workqueue(command_queue); destroy_workqueue(command_queue); command_queue = NULL; flush_workqueue(respond_queue); destroy_workqueue(respond_queue); respond_queue = NULL; } module_init(ftdi_elan_init); module_exit(ftdi_elan_exit);
gpl-2.0
duki994/G900H-Kernel-XXU1BOA7
drivers/ata/pata_cmd640.c
2373
6664
/* * pata_cmd640.c - CMD640 PCI PATA for new ATA layer * (C) 2007 Red Hat Inc * * Based upon * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996 * * Copyright (C) 1995-1996 Linus Torvalds & authors (see driver) * * This drives only the PCI version of the controller. If you have a * VLB one then we have enough docs to support it but you can write * your own code. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_cmd640" #define DRV_VERSION "0.0.5" struct cmd640_reg { int last; u8 reg58[ATA_MAX_DEVICES]; }; enum { CFR = 0x50, CNTRL = 0x51, CMDTIM = 0x52, ARTIM0 = 0x53, DRWTIM0 = 0x54, ARTIM23 = 0x57, DRWTIM23 = 0x58, BRST = 0x59 }; /** * cmd640_set_piomode - set initial PIO mode data * @ap: ATA port * @adev: ATA device * * Called to do the PIO mode setup. */ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct cmd640_reg *timing = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_timing t; const unsigned long T = 1000000 / 33; const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 }; u8 reg; int arttim = ARTIM0 + 2 * adev->devno; struct ata_device *pair = ata_dev_pair(adev); if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); return; } /* The second channel has shared timings and the setup timing is messy to switch to merge it for worst case */ if (ap->port_no && pair) { struct ata_timing p; ata_timing_compute(pair, pair->pio_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP); } /* Make the timings fit */ if (t.recover > 16) { t.active += t.recover - 16; t.recover = 16; } if (t.active > 16) t.active = 16; /* Now convert the clocks into values we can actually stuff into the chip */ if (t.recover > 1) t.recover--; /* 640B only */ else t.recover = 15; if (t.setup > 4) t.setup = 0xC0; else t.setup = setup_data[t.setup]; if (ap->port_no == 0) { t.active &= 0x0F; /* 0 = 16 */ /* Load setup timing */ pci_read_config_byte(pdev, arttim, &reg); reg &= 0x3F; reg |= t.setup; pci_write_config_byte(pdev, arttim, reg); /* Load active/recovery */ pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover); } else { /* Save the shared timings for channel, they will be loaded by qc_issue. Reloading the setup time is expensive so we keep a merged one loaded */ pci_read_config_byte(pdev, ARTIM23, &reg); reg &= 0x3F; reg |= t.setup; pci_write_config_byte(pdev, ARTIM23, reg); timing->reg58[adev->devno] = (t.active << 4) | t.recover; } } /** * cmd640_qc_issue - command preparation hook * @qc: Command to be issued * * Channel 1 has shared timings. We must reprogram the * clock each drive 2/3 switch we do. */ static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct cmd640_reg *timing = ap->private_data; if (ap->port_no != 0 && adev->devno != timing->last) { pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]); timing->last = adev->devno; } return ata_sff_qc_issue(qc); } /** * cmd640_port_start - port setup * @ap: ATA port being set up * * The CMD640 needs to maintain private data structures so we * allocate space here. */ static int cmd640_port_start(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct cmd640_reg *timing; timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL); if (timing == NULL) return -ENOMEM; timing->last = -1; /* Force a load */ ap->private_data = timing; return 0; } static bool cmd640_sff_irq_check(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int irq_reg = ap->port_no ? ARTIM23 : CFR; u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04; pci_read_config_byte(pdev, irq_reg, &irq_stat); return irq_stat & irq_mask; } static struct scsi_host_template cmd640_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations cmd640_port_ops = { .inherits = &ata_sff_port_ops, /* In theory xfer_noirq is not needed once we kill the prefetcher */ .sff_data_xfer = ata_sff_data_xfer_noirq, .sff_irq_check = cmd640_sff_irq_check, .qc_issue = cmd640_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = cmd640_set_piomode, .port_start = cmd640_port_start, }; static void cmd640_hardware_init(struct pci_dev *pdev) { u8 ctrl; /* CMD640 detected, commiserations */ pci_write_config_byte(pdev, 0x5B, 0x00); /* PIO0 command cycles */ pci_write_config_byte(pdev, CMDTIM, 0); /* 512 byte bursts (sector) */ pci_write_config_byte(pdev, BRST, 0x40); /* * A reporter a long time ago * Had problems with the data fifo * So don't run the risk * Of putting crap on the disk * For its better just to go slow */ /* Do channel 0 */ pci_read_config_byte(pdev, CNTRL, &ctrl); pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0); /* Ditto for channel 1 */ pci_read_config_byte(pdev, ARTIM23, &ctrl); ctrl |= 0x0C; pci_write_config_byte(pdev, ARTIM23, ctrl); } static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cmd640_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; cmd640_hardware_init(pdev); return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0); } #ifdef CONFIG_PM static int cmd640_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; cmd640_hardware_init(pdev); ata_host_resume(host); return 0; } #endif static const struct pci_device_id cmd640[] = { { PCI_VDEVICE(CMD, 0x640), 0 }, { }, }; static struct pci_driver cmd640_pci_driver = { .name = DRV_NAME, .id_table = cmd640, .probe = cmd640_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = cmd640_reinit_one, #endif }; module_pci_driver(cmd640_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cmd640); MODULE_VERSION(DRV_VERSION);
gpl-2.0
bju2000/android_kernel_lge_msm8994
drivers/infiniband/hw/mthca/mthca_provider.c
2629
35953
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/export.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_user.h" #include "mthca_memfree.h" static void init_query_mad(struct ib_smp *mad) { mad->base_version = 1; mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; mad->class_version = 1; mad->method = IB_MGMT_METHOD_GET; } static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; struct mthca_dev *mdev = to_mdev(ibdev); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); props->fw_ver = mdev->fw_ver; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(mdev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; props->device_cap_flags = mdev->device_cap_flags; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; props->page_size_cap = mdev->limits.page_size_cap; props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; props->max_qp_wr = mdev->limits.max_wqes; props->max_sge = mdev->limits.max_sg; props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; props->max_cqe = mdev->limits.max_cqes; props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; props->max_srq_wr = mdev->limits.max_srq_wqes; props->max_srq_sge = mdev->limits.max_srq_sge; props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = mdev->limits.pkey_table_len; props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; /* * If Sinai memory key optimization is being used, then only * the 8-bit key portion will change. For other HCAs, the * unused index bits will also be used for FMR remapping. */ if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) props->max_map_per_fmr = 255; else props->max_map_per_fmr = (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1; err = 0; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; props->max_mtu = out_mad->data[41] & 0xf; props->active_mtu = out_mad->data[36] >> 4; props->subnet_timeout = out_mad->data[51] & 0x1f; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = out_mad->data[41] >> 4; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (mask & IB_DEVICE_MODIFY_NODE_DESC) { if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; memcpy(ibdev->node_desc, props->node_desc, 64); mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); } return 0; } static int mthca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct mthca_set_ib_param set_ib; struct ib_port_attr attr; int err; if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; err = mthca_query_port(ibdev, port, &attr); if (err) goto out; set_ib.set_si_guid = 0; set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); if (err) goto out; out: mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); return err; } static int mthca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw, out_mad->data + 8, 8); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); out: kfree(in_mad); kfree(out_mad); return err; } static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct mthca_alloc_ucontext_resp uresp; struct mthca_ucontext *context; int err; if (!(to_mdev(ibdev)->active)) return ERR_PTR(-EAGAIN); memset(&uresp, 0, sizeof uresp); uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; if (mthca_is_memfree(to_mdev(ibdev))) uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; else uresp.uarc_size = 0; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); if (err) { kfree(context); return ERR_PTR(err); } context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); if (IS_ERR(context->db_tab)) { err = PTR_ERR(context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); kfree(context); return ERR_PTR(err); } if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); kfree(context); return ERR_PTR(-EFAULT); } context->reg_mr_warned = 0; return &context->ibucontext; } static int mthca_dealloc_ucontext(struct ib_ucontext *context) { mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, to_mucontext(context)->db_tab); mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); kfree(to_mucontext(context)); return 0; } static int mthca_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) { if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; return 0; } static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct mthca_pd *pd; int err; pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); if (err) { kfree(pd); return ERR_PTR(err); } if (context) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); kfree(pd); return ERR_PTR(-EFAULT); } } return &pd->ibpd; } static int mthca_dealloc_pd(struct ib_pd *pd) { mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); kfree(pd); return 0; } static struct ib_ah *mthca_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { int err; struct mthca_ah *ah; ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); if (err) { kfree(ah); return ERR_PTR(err); } return &ah->ibah; } static int mthca_ah_destroy(struct ib_ah *ah) { mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); kfree(ah); return 0; } static struct ib_srq *mthca_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mthca_create_srq ucmd; struct mthca_ucontext *context = NULL; struct mthca_srq *srq; int err; if (init_attr->srq_type != IB_SRQT_BASIC) return ERR_PTR(-ENOSYS); srq = kmalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); if (pd->uobject) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_free; } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.db_index, ucmd.db_page); if (err) goto err_free; srq->mr.ibmr.lkey = ucmd.lkey; srq->db_index = ucmd.db_index; } err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), &init_attr->attr, srq); if (err && pd->uobject) mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.db_index); if (err) goto err_free; if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { mthca_free_srq(to_mdev(pd->device), srq); err = -EFAULT; goto err_free; } return &srq->ibsrq; err_free: kfree(srq); return ERR_PTR(err); } static int mthca_destroy_srq(struct ib_srq *srq) { struct mthca_ucontext *context; if (srq->uobject) { context = to_mucontext(srq->uobject->context); mthca_unmap_user_db(to_mdev(srq->device), &context->uar, context->db_tab, to_msrq(srq)->db_index); } mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); kfree(srq); return 0; } static struct ib_qp *mthca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mthca_create_qp ucmd; struct mthca_qp *qp; int err; if (init_attr->create_flags) return ERR_PTR(-EINVAL); switch (init_attr->qp_type) { case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: { struct mthca_ucontext *context; qp = kmalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); if (pd->uobject) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { kfree(qp); return ERR_PTR(-EFAULT); } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index, ucmd.sq_db_page); if (err) { kfree(qp); return ERR_PTR(err); } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.rq_db_index, ucmd.rq_db_page); if (err) { mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index); kfree(qp); return ERR_PTR(err); } qp->mr.ibmr.lkey = ucmd.lkey; qp->sq.db_index = ucmd.sq_db_index; qp->rq.db_index = ucmd.rq_db_index; } err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->qp_type, init_attr->sq_sig_type, &init_attr->cap, qp); if (err && pd->uobject) { context = to_mucontext(pd->uobject->context); mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index); mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.rq_db_index); } qp->ibqp.qp_num = qp->qpn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { /* Don't allow userspace to create special QPs */ if (pd->uobject) return ERR_PTR(-EINVAL); qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->sq_sig_type, &init_attr->cap, qp->ibqp.qp_num, init_attr->port_num, to_msqp(qp)); break; } default: /* Don't support raw QPs */ return ERR_PTR(-ENOSYS); } if (err) { kfree(qp); return ERR_PTR(err); } init_attr->cap.max_send_wr = qp->sq.max; init_attr->cap.max_recv_wr = qp->rq.max; init_attr->cap.max_send_sge = qp->sq.max_gs; init_attr->cap.max_recv_sge = qp->rq.max_gs; init_attr->cap.max_inline_data = qp->max_inline_data; return &qp->ibqp; } static int mthca_destroy_qp(struct ib_qp *qp) { if (qp->uobject) { mthca_unmap_user_db(to_mdev(qp->device), &to_mucontext(qp->uobject->context)->uar, to_mucontext(qp->uobject->context)->db_tab, to_mqp(qp)->sq.db_index); mthca_unmap_user_db(to_mdev(qp->device), &to_mucontext(qp->uobject->context)->uar, to_mucontext(qp->uobject->context)->db_tab, to_mqp(qp)->rq.db_index); } mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); kfree(qp); return 0; } static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { struct mthca_create_cq ucmd; struct mthca_cq *cq; int nent; int err; if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return ERR_PTR(-EINVAL); if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.set_db_index, ucmd.set_db_page); if (err) return ERR_PTR(err); err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.arm_db_index, ucmd.arm_db_page); if (err) goto err_unmap_set; } cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) { err = -ENOMEM; goto err_unmap_arm; } if (context) { cq->buf.mr.ibmr.lkey = ucmd.lkey; cq->set_ci_db_index = ucmd.set_db_index; cq->arm_db_index = ucmd.arm_db_index; } for (nent = 1; nent <= entries; nent <<= 1) ; /* nothing */ err = mthca_init_cq(to_mdev(ibdev), nent, context ? to_mucontext(context) : NULL, context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, cq); if (err) goto err_free; if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { mthca_free_cq(to_mdev(ibdev), cq); err = -EFAULT; goto err_free; } cq->resize_buf = NULL; return &cq->ibcq; err_free: kfree(cq); err_unmap_arm: if (context) mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.arm_db_index); err_unmap_set: if (context) mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.set_db_index); return ERR_PTR(err); } static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, int entries) { int ret; spin_lock_irq(&cq->lock); if (cq->resize_buf) { ret = -EBUSY; goto unlock; } cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); if (!cq->resize_buf) { ret = -ENOMEM; goto unlock; } cq->resize_buf->state = CQ_RESIZE_ALLOC; ret = 0; unlock: spin_unlock_irq(&cq->lock); if (ret) return ret; ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); if (ret) { spin_lock_irq(&cq->lock); kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); return ret; } cq->resize_buf->cqe = entries - 1; spin_lock_irq(&cq->lock); cq->resize_buf->state = CQ_RESIZE_READY; spin_unlock_irq(&cq->lock); return 0; } static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibcq->device); struct mthca_cq *cq = to_mcq(ibcq); struct mthca_resize_cq ucmd; u32 lkey; int ret; if (entries < 1 || entries > dev->limits.max_cqes) return -EINVAL; mutex_lock(&cq->mutex); entries = roundup_pow_of_two(entries + 1); if (entries == ibcq->cqe + 1) { ret = 0; goto out; } if (cq->is_kernel) { ret = mthca_alloc_resize_buf(dev, cq, entries); if (ret) goto out; lkey = cq->resize_buf->buf.mr.ibmr.lkey; } else { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { ret = -EFAULT; goto out; } lkey = ucmd.lkey; } ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); if (ret) { if (cq->resize_buf) { mthca_free_cq_buf(dev, &cq->resize_buf->buf, cq->resize_buf->cqe); kfree(cq->resize_buf); spin_lock_irq(&cq->lock); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); } goto out; } if (cq->is_kernel) { struct mthca_cq_buf tbuf; int tcqe; spin_lock_irq(&cq->lock); if (cq->resize_buf->state == CQ_RESIZE_READY) { mthca_cq_resize_copy_cqes(cq); tbuf = cq->buf; tcqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; } else { tbuf = cq->resize_buf->buf; tcqe = cq->resize_buf->cqe; } kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); mthca_free_cq_buf(dev, &tbuf, tcqe); } else ibcq->cqe = entries - 1; out: mutex_unlock(&cq->mutex); return ret; } static int mthca_destroy_cq(struct ib_cq *cq) { if (cq->uobject) { mthca_unmap_user_db(to_mdev(cq->device), &to_mucontext(cq->uobject->context)->uar, to_mucontext(cq->uobject->context)->db_tab, to_mcq(cq)->arm_db_index); mthca_unmap_user_db(to_mdev(cq->device), &to_mucontext(cq->uobject->context)->uar, to_mucontext(cq->uobject->context)->db_tab, to_mcq(cq)->set_ci_db_index); } mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); kfree(cq); return 0; } static inline u32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | MTHCA_MPT_FLAG_LOCAL_READ; } static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) { struct mthca_mr *mr; int err; mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mthca_mr_alloc_notrans(to_mdev(pd->device), to_mpd(pd)->pd_num, convert_access(acc), mr); if (err) { kfree(mr); return ERR_PTR(err); } mr->umem = NULL; return &mr->ibmr; } static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { struct mthca_mr *mr; u64 *page_list; u64 total_size; unsigned long mask; int shift; int npages; int err; int i, j, n; mask = buffer_list[0].addr ^ *iova_start; total_size = 0; for (i = 0; i < num_phys_buf; ++i) { if (i != 0) mask |= buffer_list[i].addr; if (i != num_phys_buf - 1) mask |= buffer_list[i].addr + buffer_list[i].size; total_size += buffer_list[i].size; } if (mask & ~PAGE_MASK) return ERR_PTR(-EINVAL); shift = __ffs(mask | 1 << 31); buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); buffer_list[0].addr &= ~0ull << shift; mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); npages = 0; for (i = 0; i < num_phys_buf; ++i) npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; if (!npages) return &mr->ibmr; page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); if (!page_list) { kfree(mr); return ERR_PTR(-ENOMEM); } n = 0; for (i = 0; i < num_phys_buf; ++i) for (j = 0; j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift; ++j) page_list[n++] = buffer_list[i].addr + ((u64) j << shift); mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) " "in PD %x; shift %d, npages %d.\n", (unsigned long long) buffer_list[0].addr, (unsigned long long) *iova_start, to_mpd(pd)->pd_num, shift, npages); err = mthca_mr_alloc_phys(to_mdev(pd->device), to_mpd(pd)->pd_num, page_list, shift, npages, *iova_start, total_size, convert_access(acc), mr); if (err) { kfree(page_list); kfree(mr); return ERR_PTR(err); } kfree(page_list); mr->umem = NULL; return &mr->ibmr; } static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(pd->device); struct ib_umem_chunk *chunk; struct mthca_mr *mr; struct mthca_reg_mr ucmd; u64 *pages; int shift, n, len; int i, j, k; int err = 0; int write_mtt_size; if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) { if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", current->comm); mthca_warn(dev, " Update libmthca to fix this.\n"); } ++to_mucontext(pd->uobject->context)->reg_mr_warned; ucmd.mr_attrs = 0; } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, ucmd.mr_attrs & MTHCA_MR_DMASYNC); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err; } shift = ffs(mr->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mr->umem->chunk_list, list) n += chunk->nents; mr->mtt = mthca_alloc_mtt(dev, n); if (IS_ERR(mr->mtt)) { err = PTR_ERR(mr->mtt); goto err_umem; } pages = (u64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_mtt; } i = n = 0; write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); list_for_each_entry(chunk, &mr->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(&chunk->page_list[j]) + mr->umem->page_size * k; /* * Be friendly to write_mtt and pass it chunks * of appropriate size. */ if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err) goto mtt_done; n += i; i = 0; } } } if (i) err = mthca_write_mtt(dev, mr->mtt, n, pages, i); mtt_done: free_page((unsigned long) pages); if (err) goto err_mtt; err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length, convert_access(acc), mr); if (err) goto err_mtt; return &mr->ibmr; err_mtt: mthca_free_mtt(dev, mr->mtt); err_umem: ib_umem_release(mr->umem); err: kfree(mr); return ERR_PTR(err); } static int mthca_dereg_mr(struct ib_mr *mr) { struct mthca_mr *mmr = to_mmr(mr); mthca_free_mr(to_mdev(mr->device), mmr); if (mmr->umem) ib_umem_release(mmr->umem); kfree(mmr); return 0; } static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct mthca_fmr *fmr; int err; fmr = kmalloc(sizeof *fmr, GFP_KERNEL); if (!fmr) return ERR_PTR(-ENOMEM); memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, convert_access(mr_access_flags), fmr); if (err) { kfree(fmr); return ERR_PTR(err); } return &fmr->ibmr; } static int mthca_dealloc_fmr(struct ib_fmr *fmr) { struct mthca_fmr *mfmr = to_mfmr(fmr); int err; err = mthca_free_fmr(to_mdev(fmr->device), mfmr); if (err) return err; kfree(mfmr); return 0; } static int mthca_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; int err; struct mthca_dev *mdev = NULL; list_for_each_entry(fmr, fmr_list, list) { if (mdev && to_mdev(fmr->device) != mdev) return -EINVAL; mdev = to_mdev(fmr->device); } if (!mdev) return 0; if (mthca_is_memfree(mdev)) { list_for_each_entry(fmr, fmr_list, list) mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); wmb(); } else list_for_each_entry(fmr, fmr_list, list) mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); err = mthca_SYNC_TPT(mdev); return err; } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%x\n", dev->rev_id); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32), (int) (dev->fw_ver >> 16) & 0xffff, (int) dev->fw_ver & 0xffff); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); switch (dev->pdev->device) { case PCI_DEVICE_ID_MELLANOX_TAVOR: return sprintf(buf, "MT23108\n"); case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: return sprintf(buf, "MT25208 (MT23108 compat mode)\n"); case PCI_DEVICE_ID_MELLANOX_ARBEL: return sprintf(buf, "MT25208\n"); case PCI_DEVICE_ID_MELLANOX_SINAI: case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: return sprintf(buf, "MT25204\n"); default: return sprintf(buf, "unknown\n"); } } static ssize_t show_board(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); static struct device_attribute *mthca_dev_attributes[] = { &dev_attr_hw_rev, &dev_attr_fw_ver, &dev_attr_hca_type, &dev_attr_board_id }; static int mthca_init_node_data(struct mthca_dev *dev) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(dev->ib_dev.node_desc, out_mad->data, 64); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; if (mthca_is_memfree(dev)) dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: kfree(in_mad); kfree(out_mad); return err; } int mthca_register_device(struct mthca_dev *dev) { int ret; int i; ret = mthca_init_node_data(dev); if (ret) return ret; strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; dev->ib_dev.modify_device = mthca_modify_device; dev->ib_dev.modify_port = mthca_modify_port; dev->ib_dev.query_pkey = mthca_query_pkey; dev->ib_dev.query_gid = mthca_query_gid; dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext; dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext; dev->ib_dev.mmap = mthca_mmap_uar; dev->ib_dev.alloc_pd = mthca_alloc_pd; dev->ib_dev.dealloc_pd = mthca_dealloc_pd; dev->ib_dev.create_ah = mthca_ah_create; dev->ib_dev.query_ah = mthca_ah_query; dev->ib_dev.destroy_ah = mthca_ah_destroy; if (dev->mthca_flags & MTHCA_FLAG_SRQ) { dev->ib_dev.create_srq = mthca_create_srq; dev->ib_dev.modify_srq = mthca_modify_srq; dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); if (mthca_is_memfree(dev)) dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; else dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; } dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.modify_qp = mthca_modify_qp; dev->ib_dev.query_qp = mthca_query_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp; dev->ib_dev.create_cq = mthca_create_cq; dev->ib_dev.resize_cq = mthca_resize_cq; dev->ib_dev.destroy_cq = mthca_destroy_cq; dev->ib_dev.poll_cq = mthca_poll_cq; dev->ib_dev.get_dma_mr = mthca_get_dma_mr; dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; dev->ib_dev.reg_user_mr = mthca_reg_user_mr; dev->ib_dev.dereg_mr = mthca_dereg_mr; if (dev->mthca_flags & MTHCA_FLAG_FMR) { dev->ib_dev.alloc_fmr = mthca_alloc_fmr; dev->ib_dev.unmap_fmr = mthca_unmap_fmr; dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; if (mthca_is_memfree(dev)) dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; else dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; } dev->ib_dev.attach_mcast = mthca_multicast_attach; dev->ib_dev.detach_mcast = mthca_multicast_detach; dev->ib_dev.process_mad = mthca_process_mad; if (mthca_is_memfree(dev)) { dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; dev->ib_dev.post_send = mthca_arbel_post_send; dev->ib_dev.post_recv = mthca_arbel_post_receive; } else { dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq; dev->ib_dev.post_send = mthca_tavor_post_send; dev->ib_dev.post_recv = mthca_tavor_post_receive; } mutex_init(&dev->cap_mask_mutex); ret = ib_register_device(&dev->ib_dev, NULL); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) { ret = device_create_file(&dev->ib_dev.dev, mthca_dev_attributes[i]); if (ret) { ib_unregister_device(&dev->ib_dev); return ret; } } mthca_start_catas_poll(dev); return 0; } void mthca_unregister_device(struct mthca_dev *dev) { mthca_stop_catas_poll(dev); ib_unregister_device(&dev->ib_dev); }
gpl-2.0
stedman420/android_kernel_zte_hera
arch/arm/mach-msm/idle_stats_device.c
3397
11038
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <linux/idle_stats_device.h> #include <linux/module.h> DEFINE_MUTEX(device_list_lock); LIST_HEAD(device_list); static ktime_t us_to_ktime(__u32 us) { return ns_to_ktime((u64)us * NSEC_PER_USEC); } static struct msm_idle_stats_device *_device_from_minor(unsigned int minor) { struct msm_idle_stats_device *device, *ret = NULL; mutex_lock(&device_list_lock); list_for_each_entry(device, &device_list, list) { if (minor == device->miscdev.minor) { ret = device; break; } } mutex_unlock(&device_list_lock); return ret; } void msm_idle_stats_update_event(struct msm_idle_stats_device *device, __u32 event) { __u32 wake_up = !device->stats->event; device->stats->event |= event; if (wake_up) wake_up_interruptible(&device->wait); } EXPORT_SYMBOL(msm_idle_stats_update_event); static enum hrtimer_restart msm_idle_stats_busy_timer(struct hrtimer *timer) { struct msm_idle_stats_device *device = container_of(timer, struct msm_idle_stats_device, busy_timer); /* This is the only case that the event is modified without a device * lock. However, since the timer is cancelled in the other cases we are * assured that we have exclusive access to the event at this time. */ hrtimer_set_expires(&device->busy_timer, us_to_ktime(0)); msm_idle_stats_update_event(device, MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED); return HRTIMER_NORESTART; } static void start_busy_timer(struct msm_idle_stats_device *device, ktime_t relative_time) { hrtimer_cancel(&device->busy_timer); hrtimer_set_expires(&device->busy_timer, us_to_ktime(0)); if (!((device->stats->event & MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) || (device->stats->event & MSM_IDLE_STATS_EVENT_COLLECTION_FULL))) { if (ktime_to_us(relative_time) > 0) { hrtimer_start(&device->busy_timer, relative_time, HRTIMER_MODE_REL); } } } static unsigned int msm_idle_stats_device_poll(struct file *file, poll_table *wait) { struct msm_idle_stats_device *device = file->private_data; unsigned int mask = 0; poll_wait(file, &device->wait, wait); if (device->stats->event) mask = POLLIN | POLLRDNORM; return mask; } static void msm_idle_stats_add_sample(struct msm_idle_stats_device *device, struct msm_idle_pulse *pulse) { hrtimer_cancel(&device->busy_timer); hrtimer_set_expires(&device->busy_timer, us_to_ktime(0)); if (device->stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) { pr_warning("idle_stats_device: Overwriting samples\n"); device->stats->nr_collected = 0; } device->stats->pulse_chain[device->stats->nr_collected] = *pulse; device->stats->nr_collected++; if (device->stats->nr_collected == device->max_samples) { msm_idle_stats_update_event(device, MSM_IDLE_STATS_EVENT_COLLECTION_FULL); } else if (device->stats->nr_collected == ((device->max_samples * 3) / 4)) { msm_idle_stats_update_event(device, MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL); } } static long ioctl_read_stats(struct msm_idle_stats_device *device, unsigned long arg) { int remaining; int requested; struct msm_idle_pulse pulse; struct msm_idle_read_stats *stats; __s64 remaining_time = ktime_to_us(hrtimer_get_remaining(&device->busy_timer)); device->get_sample(device, &pulse); spin_lock(&device->lock); hrtimer_cancel(&device->busy_timer); stats = device->stats; if (stats == &device->stats_vector[0]) device->stats = &device->stats_vector[1]; else device->stats = &device->stats_vector[0]; device->stats->event = 0; device->stats->nr_collected = 0; spin_unlock(&device->lock); if (stats->nr_collected >= device->max_samples) { stats->nr_collected = device->max_samples; } else { stats->pulse_chain[stats->nr_collected] = pulse; stats->nr_collected++; if (stats->nr_collected == device->max_samples) stats->event |= MSM_IDLE_STATS_EVENT_COLLECTION_FULL; else if (stats->nr_collected == ((device->max_samples * 3) / 4)) stats->event |= MSM_IDLE_STATS_EVENT_COLLECTION_NEARLY_FULL; } if (remaining_time < 0) { stats->busy_timer_remaining = 0; } else { stats->busy_timer_remaining = remaining_time; if ((__s64)stats->busy_timer_remaining != remaining_time) stats->busy_timer_remaining = -1; } stats->return_timestamp = ktime_to_us(ktime_get()); requested = ((sizeof(*stats) - sizeof(stats->pulse_chain)) + (sizeof(stats->pulse_chain[0]) * stats->nr_collected)); remaining = copy_to_user((void __user *)arg, stats, requested); if (remaining > 0) return -EFAULT; return 0; } static long ioctl_write_stats(struct msm_idle_stats_device *device, unsigned long arg) { struct msm_idle_write_stats stats; int remaining; int ret = 0; remaining = copy_from_user(&stats, (void __user *) arg, sizeof(stats)); if (remaining > 0) { ret = -EFAULT; } else { spin_lock(&device->lock); device->busy_timer_interval = us_to_ktime(stats.next_busy_timer); if (ktime_to_us(device->idle_start) == 0) start_busy_timer(device, us_to_ktime(stats.busy_timer)); if ((stats.max_samples > 0) && (stats.max_samples <= MSM_IDLE_STATS_NR_MAX_INTERVALS)) device->max_samples = stats.max_samples; spin_unlock(&device->lock); } return ret; } void msm_idle_stats_prepare_idle_start(struct msm_idle_stats_device *device) { spin_lock(&device->lock); hrtimer_cancel(&device->busy_timer); spin_unlock(&device->lock); } EXPORT_SYMBOL(msm_idle_stats_prepare_idle_start); void msm_idle_stats_abort_idle_start(struct msm_idle_stats_device *device) { spin_lock(&device->lock); if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0) hrtimer_restart(&device->busy_timer); spin_unlock(&device->lock); } EXPORT_SYMBOL(msm_idle_stats_abort_idle_start); void msm_idle_stats_idle_start(struct msm_idle_stats_device *device) { spin_lock(&device->lock); hrtimer_cancel(&device->busy_timer); device->idle_start = ktime_get(); if (ktime_to_us(hrtimer_get_expires(&device->busy_timer)) > 0) { device->remaining_time = hrtimer_get_remaining(&device->busy_timer); if (ktime_to_us(device->remaining_time) <= 0) device->remaining_time = us_to_ktime(0); } else { device->remaining_time = us_to_ktime(0); } spin_unlock(&device->lock); } EXPORT_SYMBOL(msm_idle_stats_idle_start); void msm_idle_stats_idle_end(struct msm_idle_stats_device *device, struct msm_idle_pulse *pulse) { int tmp; u32 idle_time = 0; spin_lock(&device->lock); if (ktime_to_us(device->idle_start) != 0) { idle_time = ktime_to_us(ktime_get()) - ktime_to_us(device->idle_start); device->idle_start = us_to_ktime(0); msm_idle_stats_add_sample(device, pulse); if (device->stats->event & MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED) { device->stats->event &= ~MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED; msm_idle_stats_update_event(device, MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED_RESET); } else if (ktime_to_us(device->busy_timer_interval) > 0) { ktime_t busy_timer = device->busy_timer_interval; /* if it is serialized, it would be full busy, * checking 80% */ if ((pulse->wait_interval*5 >= idle_time*4) && (ktime_to_us(device->remaining_time) > 0) && (ktime_to_us(device->remaining_time) < ktime_to_us(busy_timer))) busy_timer = device->remaining_time; start_busy_timer(device, busy_timer); /* If previous busy interval exceeds the current submit, * raise a busy timer expired event intentionally. */ tmp = device->stats->nr_collected - 1; if (tmp > 0) { if ((device->stats->pulse_chain[tmp - 1].busy_start_time + device->stats->pulse_chain[tmp - 1].busy_interval) > device->stats->pulse_chain[tmp].busy_start_time) msm_idle_stats_update_event(device, MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED); } } } spin_unlock(&device->lock); } EXPORT_SYMBOL(msm_idle_stats_idle_end); static long msm_idle_stats_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct msm_idle_stats_device *device = file->private_data; int ret; switch (cmd) { case MSM_IDLE_STATS_IOC_READ_STATS: ret = ioctl_read_stats(device, arg); break; case MSM_IDLE_STATS_IOC_WRITE_STATS: ret = ioctl_write_stats(device, arg); break; default: ret = -EINVAL; } return ret; } static int msm_idle_stats_device_release (struct inode *inode, struct file *filep) { return 0; } static int msm_idle_stats_device_open(struct inode *inode, struct file *filep) { struct msm_idle_stats_device *device; device = _device_from_minor(iminor(inode)); if (device == NULL) return -EPERM; filep->private_data = device; return 0; } static const struct file_operations msm_idle_stats_fops = { .open = msm_idle_stats_device_open, .release = msm_idle_stats_device_release, .unlocked_ioctl = msm_idle_stats_device_ioctl, .poll = msm_idle_stats_device_poll, }; int msm_idle_stats_register_device(struct msm_idle_stats_device *device) { int ret = -ENOMEM; spin_lock_init(&device->lock); init_waitqueue_head(&device->wait); hrtimer_init(&device->busy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); device->busy_timer.function = msm_idle_stats_busy_timer; device->stats_vector[0].event = 0; device->stats_vector[0].nr_collected = 0; device->stats_vector[1].event = 0; device->stats_vector[1].nr_collected = 0; device->stats = &device->stats_vector[0]; device->busy_timer_interval = us_to_ktime(0); device->max_samples = MSM_IDLE_STATS_NR_MAX_INTERVALS; mutex_lock(&device_list_lock); list_add(&device->list, &device_list); mutex_unlock(&device_list_lock); device->miscdev.minor = MISC_DYNAMIC_MINOR; device->miscdev.name = device->name; device->miscdev.fops = &msm_idle_stats_fops; ret = misc_register(&device->miscdev); if (ret) goto err_list; return ret; err_list: mutex_lock(&device_list_lock); list_del(&device->list); mutex_unlock(&device_list_lock); return ret; } EXPORT_SYMBOL(msm_idle_stats_register_device); int msm_idle_stats_deregister_device(struct msm_idle_stats_device *device) { if (device == NULL) return 0; mutex_lock(&device_list_lock); spin_lock(&device->lock); hrtimer_cancel(&device->busy_timer); list_del(&device->list); spin_unlock(&device->lock); mutex_unlock(&device_list_lock); return misc_deregister(&device->miscdev); } EXPORT_SYMBOL(msm_idle_stats_deregister_device);
gpl-2.0
MattCrystal/glowing-happiness
drivers/spi/spi-omap-uwire.c
4933
13572
/* * MicroWire interface driver for OMAP * * Copyright 2003 MontaVista Software Inc. <source@mvista.com> * * Ported to 2.6 OMAP uwire interface. * Copyright (C) 2004 Texas Instruments. * * Generalization patches by Juha Yrjola <juha.yrjola@nokia.com> * * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface) * Copyright (C) 2006 Nokia * * Many updates by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/module.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/mach-types.h> #include <plat/mux.h> #include <plat/omap7xx.h> /* OMAP7XX_IO_CONF registers */ /* FIXME address is now a platform device resource, * and irqs should show there too... */ #define UWIRE_BASE_PHYS 0xFFFB3000 /* uWire Registers: */ #define UWIRE_IO_SIZE 0x20 #define UWIRE_TDR 0x00 #define UWIRE_RDR 0x00 #define UWIRE_CSR 0x01 #define UWIRE_SR1 0x02 #define UWIRE_SR2 0x03 #define UWIRE_SR3 0x04 #define UWIRE_SR4 0x05 #define UWIRE_SR5 0x06 /* CSR bits */ #define RDRB (1 << 15) #define CSRB (1 << 14) #define START (1 << 13) #define CS_CMD (1 << 12) /* SR1 or SR2 bits */ #define UWIRE_READ_FALLING_EDGE 0x0001 #define UWIRE_READ_RISING_EDGE 0x0000 #define UWIRE_WRITE_FALLING_EDGE 0x0000 #define UWIRE_WRITE_RISING_EDGE 0x0002 #define UWIRE_CS_ACTIVE_LOW 0x0000 #define UWIRE_CS_ACTIVE_HIGH 0x0004 #define UWIRE_FREQ_DIV_2 0x0000 #define UWIRE_FREQ_DIV_4 0x0008 #define UWIRE_FREQ_DIV_8 0x0010 #define UWIRE_CHK_READY 0x0020 #define UWIRE_CLK_INVERTED 0x0040 struct uwire_spi { struct spi_bitbang bitbang; struct clk *ck; }; struct uwire_state { unsigned bits_per_word; unsigned div1_idx; }; /* REVISIT compile time constant for idx_shift? */ /* * Or, put it in a structure which is used throughout the driver; * that avoids having to issue two loads for each bit of static data. */ static unsigned int uwire_idx_shift; static void __iomem *uwire_base; static inline void uwire_write_reg(int idx, u16 val) { __raw_writew(val, uwire_base + (idx << uwire_idx_shift)); } static inline u16 uwire_read_reg(int idx) { return __raw_readw(uwire_base + (idx << uwire_idx_shift)); } static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) { u16 w, val = 0; int shift, reg; if (flags & UWIRE_CLK_INVERTED) val ^= 0x03; val = flags & 0x3f; if (cs & 1) shift = 6; else shift = 0; if (cs <= 1) reg = UWIRE_SR1; else reg = UWIRE_SR2; w = uwire_read_reg(reg); w &= ~(0x3f << shift); w |= val << shift; uwire_write_reg(reg, w); } static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch) { u16 w; int c = 0; unsigned long max_jiffies = jiffies + HZ; for (;;) { w = uwire_read_reg(UWIRE_CSR); if ((w & mask) == val) break; if (time_after(jiffies, max_jiffies)) { printk(KERN_ERR "%s: timeout. reg=%#06x " "mask=%#06x val=%#06x\n", __func__, w, mask, val); return -1; } c++; if (might_not_catch && c > 64) break; } return 0; } static void uwire_set_clk1_div(int div1_idx) { u16 w; w = uwire_read_reg(UWIRE_SR3); w &= ~(0x03 << 1); w |= div1_idx << 1; uwire_write_reg(UWIRE_SR3, w); } static void uwire_chipselect(struct spi_device *spi, int value) { struct uwire_state *ust = spi->controller_state; u16 w; int old_cs; BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0)); w = uwire_read_reg(UWIRE_CSR); old_cs = (w >> 10) & 0x03; if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) { /* Deselect this CS, or the previous CS */ w &= ~CS_CMD; uwire_write_reg(UWIRE_CSR, w); } /* activate specfied chipselect */ if (value == BITBANG_CS_ACTIVE) { uwire_set_clk1_div(ust->div1_idx); /* invert clock? */ if (spi->mode & SPI_CPOL) uwire_write_reg(UWIRE_SR4, 1); else uwire_write_reg(UWIRE_SR4, 0); w = spi->chip_select << 10; w |= CS_CMD; uwire_write_reg(UWIRE_CSR, w); } } static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; unsigned len = t->len; unsigned bits = ust->bits_per_word; unsigned bytes; u16 val, w; int status = 0; if (!t->tx_buf && !t->rx_buf) return 0; /* Microwire doesn't read and write concurrently */ if (t->tx_buf && t->rx_buf) return -EPERM; w = spi->chip_select << 10; w |= CS_CMD; if (t->tx_buf) { const u8 *buf = t->tx_buf; /* NOTE: DMA could be used for TX transfers */ /* write one or two bytes at a time */ while (len >= 1) { /* tx bit 15 is first sent; we byteswap multibyte words * (msb-first) on the way out from memory. */ val = *buf++; if (bits > 8) { bytes = 2; val |= *buf++ << 8; } else bytes = 1; val <<= 16 - bits; #ifdef VERBOSE pr_debug("%s: write-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; uwire_write_reg(UWIRE_TDR, val); /* start write */ val = START | w | (bits << 5); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till write actually starts. * This is needed with MPU clock 60+ MHz. * REVISIT: we may not have time to catch it... */ if (wait_uwire_csr_flag(CSRB, CSRB, 1)) goto eio; status += bytes; } /* REVISIT: save this for later to get more i/o overlap */ if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; } else if (t->rx_buf) { u8 *buf = t->rx_buf; /* read one or two bytes at a time */ while (len) { if (bits > 8) { bytes = 2; } else bytes = 1; /* start read */ val = START | w | (bits << 0); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till read actually starts */ (void) wait_uwire_csr_flag(CSRB, CSRB, 1); if (wait_uwire_csr_flag(RDRB | CSRB, RDRB, 0)) goto eio; /* rx bit 0 is last received; multibyte words will * be properly byteswapped on the way to memory. */ val = uwire_read_reg(UWIRE_RDR); val &= (1 << bits) - 1; *buf++ = (u8) val; if (bytes == 2) *buf++ = val >> 8; status += bytes; #ifdef VERBOSE pr_debug("%s: read-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif } } return status; eio: return -EIO; } static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; struct uwire_spi *uwire; unsigned flags = 0; unsigned bits; unsigned hz; unsigned long rate; int div1_idx; int div1; int div2; int status; uwire = spi_master_get_devdata(spi->master); if (spi->chip_select > 3) { pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select); status = -ENODEV; goto done; } bits = spi->bits_per_word; if (t != NULL && t->bits_per_word) bits = t->bits_per_word; if (bits > 16) { pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); status = -ENODEV; goto done; } ust->bits_per_word = bits; /* mode 0..3, clock inverted separately; * standard nCS signaling; * don't treat DI=high as "not ready" */ if (spi->mode & SPI_CS_HIGH) flags |= UWIRE_CS_ACTIVE_HIGH; if (spi->mode & SPI_CPOL) flags |= UWIRE_CLK_INVERTED; switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { case SPI_MODE_0: case SPI_MODE_3: flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE; break; case SPI_MODE_1: case SPI_MODE_2: flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE; break; } /* assume it's already enabled */ rate = clk_get_rate(uwire->ck); hz = spi->max_speed_hz; if (t != NULL && t->speed_hz) hz = t->speed_hz; if (!hz) { pr_debug("%s: zero speed?\n", dev_name(&spi->dev)); status = -EINVAL; goto done; } /* F_INT = mpu_xor_clk / DIV1 */ for (div1_idx = 0; div1_idx < 4; div1_idx++) { switch (div1_idx) { case 0: div1 = 2; break; case 1: div1 = 4; break; case 2: div1 = 7; break; default: case 3: div1 = 10; break; } div2 = (rate / div1 + hz - 1) / hz; if (div2 <= 8) break; } if (div1_idx == 4) { pr_debug("%s: lowest clock %ld, need %d\n", dev_name(&spi->dev), rate / 10 / 8, hz); status = -EDOM; goto done; } /* we have to cache this and reset in uwire_chipselect as this is a * global parameter and another uwire device can change it under * us */ ust->div1_idx = div1_idx; uwire_set_clk1_div(div1_idx); rate /= div1; switch (div2) { case 0: case 1: case 2: flags |= UWIRE_FREQ_DIV_2; rate /= 2; break; case 3: case 4: flags |= UWIRE_FREQ_DIV_4; rate /= 4; break; case 5: case 6: case 7: case 8: flags |= UWIRE_FREQ_DIV_8; rate /= 8; break; } omap_uwire_configure_mode(spi->chip_select, flags); pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n", __func__, flags, clk_get_rate(uwire->ck) / 1000, rate / 1000); status = 0; done: return status; } static int uwire_setup(struct spi_device *spi) { struct uwire_state *ust = spi->controller_state; if (ust == NULL) { ust = kzalloc(sizeof(*ust), GFP_KERNEL); if (ust == NULL) return -ENOMEM; spi->controller_state = ust; } return uwire_setup_transfer(spi, NULL); } static void uwire_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } static void uwire_off(struct uwire_spi *uwire) { uwire_write_reg(UWIRE_SR3, 0); clk_disable(uwire->ck); clk_put(uwire->ck); spi_master_put(uwire->bitbang.master); } static int __init uwire_probe(struct platform_device *pdev) { struct spi_master *master; struct uwire_spi *uwire; int status; master = spi_alloc_master(&pdev->dev, sizeof *uwire); if (!master) return -ENODEV; uwire = spi_master_get_devdata(master); uwire_base = ioremap(UWIRE_BASE_PHYS, UWIRE_IO_SIZE); if (!uwire_base) { dev_dbg(&pdev->dev, "can't ioremap UWIRE\n"); spi_master_put(master); return -ENOMEM; } dev_set_drvdata(&pdev->dev, uwire); uwire->ck = clk_get(&pdev->dev, "fck"); if (IS_ERR(uwire->ck)) { status = PTR_ERR(uwire->ck); dev_dbg(&pdev->dev, "no functional clock?\n"); spi_master_put(master); return status; } clk_enable(uwire->ck); if (cpu_is_omap7xx()) uwire_idx_shift = 1; else uwire_idx_shift = 2; uwire_write_reg(UWIRE_SR3, 1); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->flags = SPI_MASTER_HALF_DUPLEX; master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; master->cleanup = uwire_cleanup; uwire->bitbang.master = master; uwire->bitbang.chipselect = uwire_chipselect; uwire->bitbang.setup_transfer = uwire_setup_transfer; uwire->bitbang.txrx_bufs = uwire_txrx; status = spi_bitbang_start(&uwire->bitbang); if (status < 0) { uwire_off(uwire); iounmap(uwire_base); } return status; } static int __exit uwire_remove(struct platform_device *pdev) { struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); int status; // FIXME remove all child devices, somewhere ... status = spi_bitbang_stop(&uwire->bitbang); uwire_off(uwire); iounmap(uwire_base); return status; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap_uwire"); static struct platform_driver uwire_driver = { .driver = { .name = "omap_uwire", .owner = THIS_MODULE, }, .remove = __exit_p(uwire_remove), // suspend ... unuse ck // resume ... use ck }; static int __init omap_uwire_init(void) { /* FIXME move these into the relevant board init code. also, include * H3 support; it uses tsc2101 like H2 (on a different chipselect). */ if (machine_is_omap_h2()) { /* defaults: W21 SDO, U18 SDI, V19 SCL */ omap_cfg_reg(N14_1610_UWIRE_CS0); omap_cfg_reg(N15_1610_UWIRE_CS1); } if (machine_is_omap_perseus2()) { /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); } return platform_driver_probe(&uwire_driver, uwire_probe); } static void __exit omap_uwire_exit(void) { platform_driver_unregister(&uwire_driver); } subsys_initcall(omap_uwire_init); module_exit(omap_uwire_exit); MODULE_LICENSE("GPL");
gpl-2.0
p12tic/tf700-kernel
sound/pci/mixart/mixart_hwdep.c
5189
19600
/* * Driver for Digigram miXart soundcards * * DSP firmware management * * Copyright (c) 2003 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/io.h> #include <sound/core.h> #include "mixart.h" #include "mixart_mixer.h" #include "mixart_core.h" #include "mixart_hwdep.h" /** * wait for a value on a peudo register, exit with a timeout * * @param mgr pointer to miXart manager structure * @param offset unsigned pseudo_register base + offset of value * @param value value * @param timeout timeout in centisenconds */ static int mixart_wait_nice_for_register_value(struct mixart_mgr *mgr, u32 offset, int is_egal, u32 value, unsigned long timeout) { unsigned long end_time = jiffies + (timeout * HZ / 100); u32 read; do { /* we may take too long time in this loop. * so give controls back to kernel if needed. */ cond_resched(); read = readl_be( MIXART_MEM( mgr, offset )); if(is_egal) { if(read == value) return 0; } else { /* wait for different value */ if(read != value) return 0; } } while ( time_after_eq(end_time, jiffies) ); return -EBUSY; } /* structures needed to upload elf code packets */ struct snd_mixart_elf32_ehdr { u8 e_ident[16]; u16 e_type; u16 e_machine; u32 e_version; u32 e_entry; u32 e_phoff; u32 e_shoff; u32 e_flags; u16 e_ehsize; u16 e_phentsize; u16 e_phnum; u16 e_shentsize; u16 e_shnum; u16 e_shstrndx; }; struct snd_mixart_elf32_phdr { u32 p_type; u32 p_offset; u32 p_vaddr; u32 p_paddr; u32 p_filesz; u32 p_memsz; u32 p_flags; u32 p_align; }; static int mixart_load_elf(struct mixart_mgr *mgr, const struct firmware *dsp ) { char elf32_magic_number[4] = {0x7f,'E','L','F'}; struct snd_mixart_elf32_ehdr *elf_header; int i; elf_header = (struct snd_mixart_elf32_ehdr *)dsp->data; for( i=0; i<4; i++ ) if ( elf32_magic_number[i] != elf_header->e_ident[i] ) return -EINVAL; if( elf_header->e_phoff != 0 ) { struct snd_mixart_elf32_phdr elf_programheader; for( i=0; i < be16_to_cpu(elf_header->e_phnum); i++ ) { u32 pos = be32_to_cpu(elf_header->e_phoff) + (u32)(i * be16_to_cpu(elf_header->e_phentsize)); memcpy( &elf_programheader, dsp->data + pos, sizeof(elf_programheader) ); if(elf_programheader.p_type != 0) { if( elf_programheader.p_filesz != 0 ) { memcpy_toio( MIXART_MEM( mgr, be32_to_cpu(elf_programheader.p_vaddr)), dsp->data + be32_to_cpu( elf_programheader.p_offset ), be32_to_cpu( elf_programheader.p_filesz )); } } } } return 0; } /* * get basic information and init miXart */ /* audio IDs for request to the board */ #define MIXART_FIRST_ANA_AUDIO_ID 0 #define MIXART_FIRST_DIG_AUDIO_ID 8 static int mixart_enum_connectors(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_enum_connector_resp *connector; struct mixart_audio_info_req *audio_info_req; struct mixart_audio_info_resp *audio_info; connector = kmalloc(sizeof(*connector), GFP_KERNEL); audio_info_req = kmalloc(sizeof(*audio_info_req), GFP_KERNEL); audio_info = kmalloc(sizeof(*audio_info), GFP_KERNEL); if (! connector || ! audio_info_req || ! audio_info) { err = -ENOMEM; goto __error; } audio_info_req->line_max_level = MIXART_FLOAT_P_22_0_TO_HEX; audio_info_req->micro_max_level = MIXART_FLOAT_M_20_0_TO_HEX; audio_info_req->cd_max_level = MIXART_FLOAT____0_0_TO_HEX; request.message_id = MSG_SYSTEM_ENUM_PLAY_CONNECTOR; request.uid = (struct mixart_uid){0,0}; /* board num = 0 */ request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PLAY_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_out_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_out_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; /* odd */ } else { pipe->uid_left_connector = connector->uid[k]; /* even */ } /* snd_printk(KERN_DEBUG "playback connector[%d].object_id = %x\n", k, connector->uid[k].object_id); */ /* TODO: really need send_msg MSG_CONNECTOR_GET_AUDIO_INFO for each connector ? perhaps for analog level caps ? */ request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } /*snd_printk(KERN_DEBUG "play analog_info.analog_level_present = %x\n", audio_info->info.analog_info.analog_level_present);*/ } request.message_id = MSG_SYSTEM_ENUM_RECORD_CONNECTOR; request.uid = (struct mixart_uid){0,0}; /* board num = 0 */ request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_RECORD_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_in_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_in_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; /* odd */ } else { pipe->uid_left_connector = connector->uid[k]; /* even */ } /* snd_printk(KERN_DEBUG "capture connector[%d].object_id = %x\n", k, connector->uid[k].object_id); */ /* TODO: really need send_msg MSG_CONNECTOR_GET_AUDIO_INFO for each connector ? perhaps for analog level caps ? */ request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } /*snd_printk(KERN_DEBUG "rec analog_info.analog_level_present = %x\n", audio_info->info.analog_info.analog_level_present);*/ } err = 0; __error: kfree(connector); kfree(audio_info_req); kfree(audio_info); return err; } static int mixart_enum_physio(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_uid get_console_mgr; struct mixart_return_uid console_mgr; struct mixart_uid_enumeration phys_io; /* get the uid for the console manager */ get_console_mgr.object_id = 0; get_console_mgr.desc = MSG_CONSOLE_MANAGER | 0; /* cardindex = 0 */ request.message_id = MSG_CONSOLE_GET_CLOCK_UID; request.uid = get_console_mgr; request.data = &get_console_mgr; request.size = sizeof(get_console_mgr); err = snd_mixart_send_msg(mgr, &request, sizeof(console_mgr), &console_mgr); if( (err < 0) || (console_mgr.error_code != 0) ) { snd_printk(KERN_DEBUG "error MSG_CONSOLE_GET_CLOCK_UID : err=%x\n", console_mgr.error_code); return -EINVAL; } /* used later for clock issues ! */ mgr->uid_console_manager = console_mgr.uid; request.message_id = MSG_SYSTEM_ENUM_PHYSICAL_IO; request.uid = (struct mixart_uid){0,0}; request.data = &console_mgr.uid; request.size = sizeof(console_mgr.uid); err = snd_mixart_send_msg(mgr, &request, sizeof(phys_io), &phys_io); if( (err < 0) || ( phys_io.error_code != 0 ) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PHYSICAL_IO err(%x) error_code(%x)\n", err, phys_io.error_code ); return -EINVAL; } /* min 2 phys io per card (analog in + analog out) */ if (phys_io.nb_uid < MIXART_MAX_CARDS * 2) return -EINVAL; for(k=0; k<mgr->num_cards; k++) { mgr->chip[k]->uid_in_analog_physio = phys_io.uid[k]; mgr->chip[k]->uid_out_analog_physio = phys_io.uid[phys_io.nb_uid/2 + k]; } return 0; } static int mixart_first_init(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; if((err = mixart_enum_connectors(mgr)) < 0) return err; if((err = mixart_enum_physio(mgr)) < 0) return err; /* send a synchro command to card (necessary to do this before first MSG_STREAM_START_STREAM_GRP_PACKET) */ /* though why not here */ request.message_id = MSG_SYSTEM_SEND_SYNCHRO_CMD; request.uid = (struct mixart_uid){0,0}; request.data = NULL; request.size = 0; /* this command has no data. response is a 32 bit status */ err = snd_mixart_send_msg(mgr, &request, sizeof(k), &k); if( (err < 0) || (k != 0) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_SEND_SYNCHRO_CMD\n"); return err == 0 ? -EINVAL : err; } return 0; } /* firmware base addresses (when hard coded) */ #define MIXART_MOTHERBOARD_XLX_BASE_ADDRESS 0x00600000 static int mixart_dsp_load(struct mixart_mgr* mgr, int index, const struct firmware *dsp) { int err, card_index; u32 status_xilinx, status_elf, status_daught; u32 val; /* read motherboard xilinx status */ status_xilinx = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* read elf status */ status_elf = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* read daughterboard xilinx status */ status_daught = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* motherboard xilinx status 5 will say that the board is performing a reset */ if (status_xilinx == 5) { snd_printk(KERN_ERR "miXart is resetting !\n"); return -EAGAIN; /* try again later */ } switch (index) { case MIXART_MOTHERBOARD_XLX_INDEX: /* xilinx already loaded ? */ if (status_xilinx == 4) { snd_printk(KERN_DEBUG "xilinx is already loaded !\n"); return 0; } /* the status should be 0 == "idle" */ if (status_xilinx != 0) { snd_printk(KERN_ERR "xilinx load error ! status = %d\n", status_xilinx); return -EIO; /* modprob -r may help ? */ } /* check xilinx validity */ if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; /* set xilinx status to copying */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* setup xilinx base address */ writel_be( MIXART_MOTHERBOARD_XLX_BASE_ADDRESS, MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_BASE_ADDR_OFFSET )); /* setup code size for xilinx file */ writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_SIZE_OFFSET )); /* copy xilinx code */ memcpy_toio( MIXART_MEM( mgr, MIXART_MOTHERBOARD_XLX_BASE_ADDRESS), dsp->data, dsp->size); /* set xilinx status to copy finished */ writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* return, because no further processing needed */ return 0; case MIXART_MOTHERBOARD_ELF_INDEX: if (status_elf == 4) { snd_printk(KERN_DEBUG "elf file already loaded !\n"); return 0; } /* the status should be 0 == "idle" */ if (status_elf != 0) { snd_printk(KERN_ERR "elf load error ! status = %d\n", status_elf); return -EIO; /* modprob -r may help ? */ } /* wait for xilinx status == 4 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET, 1, 4, 500); /* 5sec */ if (err < 0) { snd_printk(KERN_ERR "xilinx was not loaded or " "could not be started\n"); return err; } /* init some data on the card */ writel_be( 0, MIXART_MEM( mgr, MIXART_PSEUDOREG_BOARDNUMBER ) ); /* set miXart boardnumber to 0 */ writel_be( 0, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); /* reset pointer to flow table on miXart */ /* set elf status to copying */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* process the copying of the elf packets */ err = mixart_load_elf( mgr, dsp ); if (err < 0) return err; /* set elf status to copy finished */ writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* wait for elf status == 4 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET, 1, 4, 300); /* 3sec */ if (err < 0) { snd_printk(KERN_ERR "elf could not be started\n"); return err; } /* miXart waits at this point on the pointer to the flow table */ writel_be( (u32)mgr->flowinfo.addr, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); /* give pointer of flow table to miXart */ return 0; /* return, another xilinx file has to be loaded before */ case MIXART_AESEBUBOARD_XLX_INDEX: default: /* elf and xilinx should be loaded */ if (status_elf != 4 || status_xilinx != 4) { printk(KERN_ERR "xilinx or elf not " "successfully loaded\n"); return -EIO; /* modprob -r may help ? */ } /* wait for daughter detection != 0 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DBRD_PRESENCE_OFFSET, 0, 0, 30); /* 300msec */ if (err < 0) { snd_printk(KERN_ERR "error starting elf file\n"); return err; } /* the board type can now be retrieved */ mgr->board_type = (DAUGHTER_TYPE_MASK & readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DBRD_TYPE_OFFSET))); if (mgr->board_type == MIXART_DAUGHTER_TYPE_NONE) break; /* no daughter board; the file does not have to be loaded, continue after the switch */ /* only if aesebu daughter board presence (elf code must run) */ if (mgr->board_type != MIXART_DAUGHTER_TYPE_AES ) return -EINVAL; /* daughter should be idle */ if (status_daught != 0) { printk(KERN_ERR "daughter load error ! status = %d\n", status_daught); return -EIO; /* modprob -r may help ? */ } /* check daughterboard xilinx validity */ if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; /* inform mixart about the size of the file */ writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_SIZE_OFFSET )); /* set daughterboard status to 1 */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* wait for status == 2 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 2, 30); /* 300msec */ if (err < 0) { snd_printk(KERN_ERR "daughter board load error\n"); return err; } /* get the address where to write the file */ val = readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_BASE_ADDR_OFFSET )); if (!val) return -EINVAL; /* copy daughterboard xilinx code */ memcpy_toio( MIXART_MEM( mgr, val), dsp->data, dsp->size); /* set daughterboard status to 4 */ writel_be( 4, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* continue with init */ break; } /* end of switch file index*/ /* wait for daughter status == 3 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 3, 300); /* 3sec */ if (err < 0) { snd_printk(KERN_ERR "daughter board could not be initialised\n"); return err; } /* init mailbox (communication with embedded) */ snd_mixart_init_mailbox(mgr); /* first communication with embedded */ err = mixart_first_init(mgr); if (err < 0) { snd_printk(KERN_ERR "miXart could not be set up\n"); return err; } /* create devices and mixer in accordance with HW options*/ for (card_index = 0; card_index < mgr->num_cards; card_index++) { struct snd_mixart *chip = mgr->chip[card_index]; if ((err = snd_mixart_create_pcm(chip)) < 0) return err; if (card_index == 0) { if ((err = snd_mixart_create_mixer(chip->mgr)) < 0) return err; } if ((err = snd_card_register(chip->card)) < 0) return err; }; snd_printdd("miXart firmware downloaded and successfully set up\n"); return 0; } #if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) #if !defined(CONFIG_USE_MIXARTLOADER) && !defined(CONFIG_SND_MIXART) /* built-in kernel */ #define SND_MIXART_FW_LOADER /* use the standard firmware loader */ #endif #endif #ifdef SND_MIXART_FW_LOADER int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { static char *fw_files[3] = { "miXart8.xlx", "miXart8.elf", "miXart8AES.xlx" }; char path[32]; const struct firmware *fw_entry; int i, err; for (i = 0; i < 3; i++) { sprintf(path, "mixart/%s", fw_files[i]); if (request_firmware(&fw_entry, path, &mgr->pci->dev)) { snd_printk(KERN_ERR "miXart: can't load firmware %s\n", path); return -ENOENT; } /* fake hwdep dsp record */ err = mixart_dsp_load(mgr, i, fw_entry); release_firmware(fw_entry); if (err < 0) return err; mgr->dsp_loaded |= 1 << i; } return 0; } MODULE_FIRMWARE("mixart/miXart8.xlx"); MODULE_FIRMWARE("mixart/miXart8.elf"); MODULE_FIRMWARE("mixart/miXart8AES.xlx"); #else /* old style firmware loading */ /* miXart hwdep interface id string */ #define SND_MIXART_HWDEP_ID "miXart Loader" static int mixart_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { struct mixart_mgr *mgr = hw->private_data; strcpy(info->id, "miXart"); info->num_dsps = MIXART_HARDW_FILES_MAX_INDEX; if (mgr->dsp_loaded & (1 << MIXART_MOTHERBOARD_ELF_INDEX)) info->chip_ready = 1; info->version = MIXART_DRIVER_VERSION; return 0; } static int mixart_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct mixart_mgr* mgr = hw->private_data; struct firmware fw; int err; fw.size = dsp->length; fw.data = vmalloc(dsp->length); if (! fw.data) { snd_printk(KERN_ERR "miXart: cannot allocate image size %d\n", (int)dsp->length); return -ENOMEM; } if (copy_from_user((void *) fw.data, dsp->image, dsp->length)) { vfree(fw.data); return -EFAULT; } err = mixart_dsp_load(mgr, dsp->index, &fw); vfree(fw.data); if (err < 0) return err; mgr->dsp_loaded |= 1 << dsp->index; return err; } int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { int err; struct snd_hwdep *hw; /* only create hwdep interface for first cardX (see "index" module parameter)*/ if ((err = snd_hwdep_new(mgr->chip[0]->card, SND_MIXART_HWDEP_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_MIXART; hw->private_data = mgr; hw->ops.dsp_status = mixart_hwdep_dsp_status; hw->ops.dsp_load = mixart_hwdep_dsp_load; hw->exclusive = 1; sprintf(hw->name, SND_MIXART_HWDEP_ID); mgr->dsp_loaded = 0; return snd_card_register(mgr->chip[0]->card); } #endif /* SND_MIXART_FW_LOADER */
gpl-2.0
boa19861105/BOA_Eye_M6.0_Kernel
net/decnet/dn_fib.c
5445
18541
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Routing Forwarding Information Base (Glue/Info List) * * Author: Steve Whitehouse <SteveW@ACM.org> * * * Changes: * Alexey Kuznetsov : SMP locking changes * Steve Whitehouse : Rewrote it... Well to be more correct, I * copied most of it from the ipv4 fib code. * Steve Whitehouse : Updated it in style and fixed a few bugs * which were fixed in the ipv4 code since * this code was copied from it. * */ #include <linux/string.h> #include <linux/net.h> #include <linux/socket.h> #include <linux/slab.h> #include <linux/sockios.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/flow.h> #include <net/fib_rules.h> #include <net/dn.h> #include <net/dn_route.h> #include <net/dn_fib.h> #include <net/dn_neigh.h> #include <net/dn_dev.h> #define RT_MIN_TABLE 1 #define for_fib_info() { struct dn_fib_info *fi;\ for(fi = dn_fib_info_list; fi; fi = fi->fib_next) #define endfor_fib_info() } #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) #define change_nexthops(fi) { int nhsel; struct dn_fib_nh *nh;\ for(nhsel = 0, nh = (struct dn_fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) #define endfor_nexthops(fi) } static DEFINE_SPINLOCK(dn_fib_multipath_lock); static struct dn_fib_info *dn_fib_info_list; static DEFINE_SPINLOCK(dn_fib_info_lock); static struct { int error; u8 scope; } dn_fib_props[RTN_MAX+1] = { [RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE }, [RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE }, [RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST }, [RTN_BROADCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, [RTN_ANYCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, [RTN_MULTICAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, [RTN_BLACKHOLE] = { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE }, [RTN_UNREACHABLE] = { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE }, [RTN_PROHIBIT] = { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE }, [RTN_THROW] = { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE }, [RTN_NAT] = { .error = 0, .scope = RT_SCOPE_NOWHERE }, [RTN_XRESOLVE] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, }; static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force); static int dn_fib_sync_up(struct net_device *dev); void dn_fib_free_info(struct dn_fib_info *fi) { if (fi->fib_dead == 0) { printk(KERN_DEBUG "DECnet: BUG! Attempt to free alive dn_fib_info\n"); return; } change_nexthops(fi) { if (nh->nh_dev) dev_put(nh->nh_dev); nh->nh_dev = NULL; } endfor_nexthops(fi); kfree(fi); } void dn_fib_release_info(struct dn_fib_info *fi) { spin_lock(&dn_fib_info_lock); if (fi && --fi->fib_treeref == 0) { if (fi->fib_next) fi->fib_next->fib_prev = fi->fib_prev; if (fi->fib_prev) fi->fib_prev->fib_next = fi->fib_next; if (fi == dn_fib_info_list) dn_fib_info_list = fi->fib_next; fi->fib_dead = 1; dn_fib_info_put(fi); } spin_unlock(&dn_fib_info_lock); } static inline int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi) { const struct dn_fib_nh *onh = ofi->fib_nh; for_nexthops(fi) { if (nh->nh_oif != onh->nh_oif || nh->nh_gw != onh->nh_gw || nh->nh_scope != onh->nh_scope || nh->nh_weight != onh->nh_weight || ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD)) return -1; onh++; } endfor_nexthops(fi); return 0; } static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi) { for_fib_info() { if (fi->fib_nhs != nfi->fib_nhs) continue; if (nfi->fib_protocol == fi->fib_protocol && nfi->fib_prefsrc == fi->fib_prefsrc && nfi->fib_priority == fi->fib_priority && memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 && ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 && (nfi->fib_nhs == 0 || dn_fib_nh_comp(fi, nfi) == 0)) return fi; } endfor_fib_info(); return NULL; } __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type) { while(RTA_OK(attr,attrlen)) { if (attr->rta_type == type) return *(__le16*)RTA_DATA(attr); attr = RTA_NEXT(attr, attrlen); } return 0; } static int dn_fib_count_nhs(struct rtattr *rta) { int nhs = 0; struct rtnexthop *nhp = RTA_DATA(rta); int nhlen = RTA_PAYLOAD(rta); while(nhlen >= (int)sizeof(struct rtnexthop)) { if ((nhlen -= nhp->rtnh_len) < 0) return 0; nhs++; nhp = RTNH_NEXT(nhp); } return nhs; } static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct rtattr *rta, const struct rtmsg *r) { struct rtnexthop *nhp = RTA_DATA(rta); int nhlen = RTA_PAYLOAD(rta); change_nexthops(fi) { int attrlen = nhlen - sizeof(struct rtnexthop); if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) return -EINVAL; nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; nh->nh_oif = nhp->rtnh_ifindex; nh->nh_weight = nhp->rtnh_hops + 1; if (attrlen) { nh->nh_gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); } nhp = RTNH_NEXT(nhp); } endfor_nexthops(fi); return 0; } static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh) { int err; if (nh->nh_gw) { struct flowidn fld; struct dn_fib_res res; if (nh->nh_flags&RTNH_F_ONLINK) { struct net_device *dev; if (r->rtm_scope >= RT_SCOPE_LINK) return -EINVAL; if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST) return -EINVAL; if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) return -ENETDOWN; nh->nh_dev = dev; dev_hold(dev); nh->nh_scope = RT_SCOPE_LINK; return 0; } memset(&fld, 0, sizeof(fld)); fld.daddr = nh->nh_gw; fld.flowidn_oif = nh->nh_oif; fld.flowidn_scope = r->rtm_scope + 1; if (fld.flowidn_scope < RT_SCOPE_LINK) fld.flowidn_scope = RT_SCOPE_LINK; if ((err = dn_fib_lookup(&fld, &res)) != 0) return err; err = -EINVAL; if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) goto out; nh->nh_scope = res.scope; nh->nh_oif = DN_FIB_RES_OIF(res); nh->nh_dev = DN_FIB_RES_DEV(res); if (nh->nh_dev == NULL) goto out; dev_hold(nh->nh_dev); err = -ENETDOWN; if (!(nh->nh_dev->flags & IFF_UP)) goto out; err = 0; out: dn_fib_res_put(&res); return err; } else { struct net_device *dev; if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK)) return -EINVAL; dev = __dev_get_by_index(&init_net, nh->nh_oif); if (dev == NULL || dev->dn_ptr == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) return -ENETDOWN; nh->nh_dev = dev; dev_hold(nh->nh_dev); nh->nh_scope = RT_SCOPE_HOST; } return 0; } struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta *rta, const struct nlmsghdr *nlh, int *errp) { int err; struct dn_fib_info *fi = NULL; struct dn_fib_info *ofi; int nhs = 1; if (r->rtm_type > RTN_MAX) goto err_inval; if (dn_fib_props[r->rtm_type].scope > r->rtm_scope) goto err_inval; if (rta->rta_mp) { nhs = dn_fib_count_nhs(rta->rta_mp); if (nhs == 0) goto err_inval; } fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); err = -ENOBUFS; if (fi == NULL) goto failure; fi->fib_protocol = r->rtm_protocol; fi->fib_nhs = nhs; fi->fib_flags = r->rtm_flags; if (rta->rta_priority) fi->fib_priority = *rta->rta_priority; if (rta->rta_mx) { int attrlen = RTA_PAYLOAD(rta->rta_mx); struct rtattr *attr = RTA_DATA(rta->rta_mx); while(RTA_OK(attr, attrlen)) { unsigned flavour = attr->rta_type; if (flavour) { if (flavour > RTAX_MAX) goto err_inval; fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr); } attr = RTA_NEXT(attr, attrlen); } } if (rta->rta_prefsrc) memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 2); if (rta->rta_mp) { if ((err = dn_fib_get_nhs(fi, rta->rta_mp, r)) != 0) goto failure; if (rta->rta_oif && fi->fib_nh->nh_oif != *rta->rta_oif) goto err_inval; if (rta->rta_gw && memcmp(&fi->fib_nh->nh_gw, rta->rta_gw, 2)) goto err_inval; } else { struct dn_fib_nh *nh = fi->fib_nh; if (rta->rta_oif) nh->nh_oif = *rta->rta_oif; if (rta->rta_gw) memcpy(&nh->nh_gw, rta->rta_gw, 2); nh->nh_flags = r->rtm_flags; nh->nh_weight = 1; } if (r->rtm_type == RTN_NAT) { if (rta->rta_gw == NULL || nhs != 1 || rta->rta_oif) goto err_inval; memcpy(&fi->fib_nh->nh_gw, rta->rta_gw, 2); goto link_it; } if (dn_fib_props[r->rtm_type].error) { if (rta->rta_gw || rta->rta_oif || rta->rta_mp) goto err_inval; goto link_it; } if (r->rtm_scope > RT_SCOPE_HOST) goto err_inval; if (r->rtm_scope == RT_SCOPE_HOST) { struct dn_fib_nh *nh = fi->fib_nh; /* Local address is added */ if (nhs != 1 || nh->nh_gw) goto err_inval; nh->nh_scope = RT_SCOPE_NOWHERE; nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif); err = -ENODEV; if (nh->nh_dev == NULL) goto failure; } else { change_nexthops(fi) { if ((err = dn_fib_check_nh(r, fi, nh)) != 0) goto failure; } endfor_nexthops(fi) } if (fi->fib_prefsrc) { if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL || memcmp(&fi->fib_prefsrc, rta->rta_dst, 2)) if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL) goto err_inval; } link_it: if ((ofi = dn_fib_find_info(fi)) != NULL) { fi->fib_dead = 1; dn_fib_free_info(fi); ofi->fib_treeref++; return ofi; } fi->fib_treeref++; atomic_inc(&fi->fib_clntref); spin_lock(&dn_fib_info_lock); fi->fib_next = dn_fib_info_list; fi->fib_prev = NULL; if (dn_fib_info_list) dn_fib_info_list->fib_prev = fi; dn_fib_info_list = fi; spin_unlock(&dn_fib_info_lock); return fi; err_inval: err = -EINVAL; failure: *errp = err; if (fi) { fi->fib_dead = 1; dn_fib_free_info(fi); } return NULL; } int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn *fld, struct dn_fib_res *res) { int err = dn_fib_props[type].error; if (err == 0) { if (fi->fib_flags & RTNH_F_DEAD) return 1; res->fi = fi; switch (type) { case RTN_NAT: DN_FIB_RES_RESET(*res); atomic_inc(&fi->fib_clntref); return 0; case RTN_UNICAST: case RTN_LOCAL: for_nexthops(fi) { if (nh->nh_flags & RTNH_F_DEAD) continue; if (!fld->flowidn_oif || fld->flowidn_oif == nh->nh_oif) break; } if (nhsel < fi->fib_nhs) { res->nh_sel = nhsel; atomic_inc(&fi->fib_clntref); return 0; } endfor_nexthops(fi); res->fi = NULL; return 1; default: if (net_ratelimit()) printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", type); res->fi = NULL; return -EINVAL; } } return err; } void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res) { struct dn_fib_info *fi = res->fi; int w; spin_lock_bh(&dn_fib_multipath_lock); if (fi->fib_power <= 0) { int power = 0; change_nexthops(fi) { if (!(nh->nh_flags&RTNH_F_DEAD)) { power += nh->nh_weight; nh->nh_power = nh->nh_weight; } } endfor_nexthops(fi); fi->fib_power = power; if (power < 0) { spin_unlock_bh(&dn_fib_multipath_lock); res->nh_sel = 0; return; } } w = jiffies % fi->fib_power; change_nexthops(fi) { if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { if ((w -= nh->nh_power) <= 0) { nh->nh_power--; fi->fib_power--; res->nh_sel = nhsel; spin_unlock_bh(&dn_fib_multipath_lock); return; } } } endfor_nexthops(fi); res->nh_sel = 0; spin_unlock_bh(&dn_fib_multipath_lock); } static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta) { int i; for(i = 1; i <= RTA_MAX; i++) { struct rtattr *attr = rta[i-1]; if (attr) { if (RTA_PAYLOAD(attr) < 4 && RTA_PAYLOAD(attr) != 2) return -EINVAL; if (i != RTA_MULTIPATH && i != RTA_METRICS && i != RTA_TABLE) rta[i-1] = (struct rtattr *)RTA_DATA(attr); } } return 0; } static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct dn_fib_table *tb; struct rtattr **rta = arg; struct rtmsg *r = NLMSG_DATA(nlh); if (!net_eq(net, &init_net)) return -EINVAL; if (dn_fib_check_attr(r, rta)) return -EINVAL; tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 0); if (tb) return tb->delete(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb)); return -ESRCH; } static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct dn_fib_table *tb; struct rtattr **rta = arg; struct rtmsg *r = NLMSG_DATA(nlh); if (!net_eq(net, &init_net)) return -EINVAL; if (dn_fib_check_attr(r, rta)) return -EINVAL; tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 1); if (tb) return tb->insert(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb)); return -ENOBUFS; } static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa) { struct dn_fib_table *tb; struct { struct nlmsghdr nlh; struct rtmsg rtm; } req; struct dn_kern_rta rta; memset(&req.rtm, 0, sizeof(req.rtm)); memset(&rta, 0, sizeof(rta)); if (type == RTN_UNICAST) tb = dn_fib_get_table(RT_MIN_TABLE, 1); else tb = dn_fib_get_table(RT_TABLE_LOCAL, 1); if (tb == NULL) return; req.nlh.nlmsg_len = sizeof(req); req.nlh.nlmsg_type = cmd; req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND; req.nlh.nlmsg_pid = 0; req.nlh.nlmsg_seq = 0; req.rtm.rtm_dst_len = dst_len; req.rtm.rtm_table = tb->n; req.rtm.rtm_protocol = RTPROT_KERNEL; req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST); req.rtm.rtm_type = type; rta.rta_dst = &dst; rta.rta_prefsrc = &ifa->ifa_local; rta.rta_oif = &ifa->ifa_dev->dev->ifindex; if (cmd == RTM_NEWROUTE) tb->insert(tb, &req.rtm, &rta, &req.nlh, NULL); else tb->delete(tb, &req.rtm, &rta, &req.nlh, NULL); } static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa) { fib_magic(RTM_NEWROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); #if 0 if (!(dev->flags&IFF_UP)) return; /* In the future, we will want to add default routes here */ #endif } static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) { int found_it = 0; struct net_device *dev; struct dn_dev *dn_db; struct dn_ifaddr *ifa2; ASSERT_RTNL(); /* Scan device list */ rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { dn_db = rcu_dereference(dev->dn_ptr); if (dn_db == NULL) continue; for (ifa2 = rcu_dereference(dn_db->ifa_list); ifa2 != NULL; ifa2 = rcu_dereference(ifa2->ifa_next)) { if (ifa2->ifa_local == ifa->ifa_local) { found_it = 1; break; } } } rcu_read_unlock(); if (found_it == 0) { fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); if (dnet_addr_type(ifa->ifa_local) != RTN_LOCAL) { if (dn_fib_sync_down(ifa->ifa_local, NULL, 0)) dn_fib_flush(); } } } static void dn_fib_disable_addr(struct net_device *dev, int force) { if (dn_fib_sync_down(0, dev, force)) dn_fib_flush(); dn_rt_cache_flush(0); neigh_ifdown(&dn_neigh_table, dev); } static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr; switch (event) { case NETDEV_UP: dn_fib_add_ifaddr(ifa); dn_fib_sync_up(ifa->ifa_dev->dev); dn_rt_cache_flush(-1); break; case NETDEV_DOWN: dn_fib_del_ifaddr(ifa); if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) { dn_fib_disable_addr(ifa->ifa_dev->dev, 1); } else { dn_rt_cache_flush(-1); } break; } return NOTIFY_DONE; } static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force) { int ret = 0; int scope = RT_SCOPE_NOWHERE; if (force) scope = -1; for_fib_info() { /* * This makes no sense for DECnet.... we will almost * certainly have more than one local address the same * over all our interfaces. It needs thinking about * some more. */ if (local && fi->fib_prefsrc == local) { fi->fib_flags |= RTNH_F_DEAD; ret++; } else if (dev && fi->fib_nhs) { int dead = 0; change_nexthops(fi) { if (nh->nh_flags&RTNH_F_DEAD) dead++; else if (nh->nh_dev == dev && nh->nh_scope != scope) { spin_lock_bh(&dn_fib_multipath_lock); nh->nh_flags |= RTNH_F_DEAD; fi->fib_power -= nh->nh_power; nh->nh_power = 0; spin_unlock_bh(&dn_fib_multipath_lock); dead++; } } endfor_nexthops(fi) if (dead == fi->fib_nhs) { fi->fib_flags |= RTNH_F_DEAD; ret++; } } } endfor_fib_info(); return ret; } static int dn_fib_sync_up(struct net_device *dev) { int ret = 0; if (!(dev->flags&IFF_UP)) return 0; for_fib_info() { int alive = 0; change_nexthops(fi) { if (!(nh->nh_flags&RTNH_F_DEAD)) { alive++; continue; } if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) continue; if (nh->nh_dev != dev || dev->dn_ptr == NULL) continue; alive++; spin_lock_bh(&dn_fib_multipath_lock); nh->nh_power = 0; nh->nh_flags &= ~RTNH_F_DEAD; spin_unlock_bh(&dn_fib_multipath_lock); } endfor_nexthops(fi); if (alive > 0) { fi->fib_flags &= ~RTNH_F_DEAD; ret++; } } endfor_fib_info(); return ret; } static struct notifier_block dn_fib_dnaddr_notifier = { .notifier_call = dn_fib_dnaddr_event, }; void __exit dn_fib_cleanup(void) { dn_fib_table_cleanup(); dn_fib_rules_cleanup(); unregister_dnaddr_notifier(&dn_fib_dnaddr_notifier); } void __init dn_fib_init(void) { dn_fib_table_init(); dn_fib_rules_init(); register_dnaddr_notifier(&dn_fib_dnaddr_notifier); rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, NULL); rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, NULL); }
gpl-2.0
trader418/android_kernel_samsung_hlte_N
arch/mips/loongson/common/pci.c
8773
2775
/* * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <pci.h> #include <loongson.h> static struct resource loongson_pci_mem_resource = { .name = "pci memory space", .start = LOONGSON_PCI_MEM_START, .end = LOONGSON_PCI_MEM_END, .flags = IORESOURCE_MEM, }; static struct resource loongson_pci_io_resource = { .name = "pci io space", .start = LOONGSON_PCI_IO_START, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct pci_controller loongson_pci_controller = { .pci_ops = &loongson_pci_ops, .io_resource = &loongson_pci_io_resource, .mem_resource = &loongson_pci_mem_resource, .mem_offset = 0x00000000UL, .io_offset = 0x00000000UL, }; static void __init setup_pcimap(void) { /* * local to PCI mapping for CPU accessing PCI space * CPU address space [256M,448M] is window for accessing pci space * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M] * * pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0 * [<2G] [384M,448M] [320M,384M] [0M,64M] */ LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 | LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) | LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) | LOONGSON_PCIMAP_WIN(0, 0); /* * PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M] */ LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */ /* size: 256M, burst transmission, pre-fetch enable, 64bit */ LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul; LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful; LOONGSON_PCI_HIT1_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT1_SEL_H = 0x00000000ul; LOONGSON_PCI_HIT2_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT2_SEL_H = 0x00000000ul; /* avoid deadlock of PCI reading/writing lock operation */ LOONGSON_PCI_ISR4C = 0xd2000001ul; /* can not change gnt to break pci transfer when device's gnt not deassert for some broken device */ LOONGSON_PXARB_CFG = 0x00fe0105ul; #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG /* * set cpu addr window2 to map CPU address space to PCI address space */ LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC, LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE); #endif } static int __init pcibios_init(void) { setup_pcimap(); loongson_pci_controller.io_map_base = mips_io_port_base; register_pci_controller(&loongson_pci_controller); return 0; } arch_initcall(pcibios_init);
gpl-2.0
yank555-lu/SGS3-JB
arch/mips/loongson/common/pci.c
8773
2775
/* * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <pci.h> #include <loongson.h> static struct resource loongson_pci_mem_resource = { .name = "pci memory space", .start = LOONGSON_PCI_MEM_START, .end = LOONGSON_PCI_MEM_END, .flags = IORESOURCE_MEM, }; static struct resource loongson_pci_io_resource = { .name = "pci io space", .start = LOONGSON_PCI_IO_START, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct pci_controller loongson_pci_controller = { .pci_ops = &loongson_pci_ops, .io_resource = &loongson_pci_io_resource, .mem_resource = &loongson_pci_mem_resource, .mem_offset = 0x00000000UL, .io_offset = 0x00000000UL, }; static void __init setup_pcimap(void) { /* * local to PCI mapping for CPU accessing PCI space * CPU address space [256M,448M] is window for accessing pci space * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M] * * pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0 * [<2G] [384M,448M] [320M,384M] [0M,64M] */ LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 | LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) | LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) | LOONGSON_PCIMAP_WIN(0, 0); /* * PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M] */ LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */ /* size: 256M, burst transmission, pre-fetch enable, 64bit */ LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul; LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful; LOONGSON_PCI_HIT1_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT1_SEL_H = 0x00000000ul; LOONGSON_PCI_HIT2_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT2_SEL_H = 0x00000000ul; /* avoid deadlock of PCI reading/writing lock operation */ LOONGSON_PCI_ISR4C = 0xd2000001ul; /* can not change gnt to break pci transfer when device's gnt not deassert for some broken device */ LOONGSON_PXARB_CFG = 0x00fe0105ul; #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG /* * set cpu addr window2 to map CPU address space to PCI address space */ LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC, LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE); #endif } static int __init pcibios_init(void) { setup_pcimap(); loongson_pci_controller.io_map_base = mips_io_port_base; register_pci_controller(&loongson_pci_controller); return 0; } arch_initcall(pcibios_init);
gpl-2.0
WhiteDawn/Whatever-Flo-Android-Kernel
arch/mips/lasat/sysctl.c
8773
6089
/* * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Routines specific to the LASAT boards */ #include <linux/types.h> #include <asm/lasat/lasat.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/uaccess.h> #include <asm/time.h> #ifdef CONFIG_DS1603 #include "ds1603.h" #endif /* And the same for proc */ int proc_dolasatstring(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dostring(table, write, buffer, lenp, ppos); if ((!write) || r) return r; lasat_write_eeprom_info(); return 0; } /* proc function to write EEPROM after changing int entry */ int proc_dolasatint(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dointvec(table, write, buffer, lenp, ppos); if ((!write) || r) return r; lasat_write_eeprom_info(); return 0; } #ifdef CONFIG_DS1603 static int rtctmp; /* proc function to read/write RealTime Clock */ int proc_dolasatrtc(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct timespec ts; int r; if (!write) { read_persistent_clock(&ts); rtctmp = ts.tv_sec; /* check for time < 0 and set to 0 */ if (rtctmp < 0) rtctmp = 0; } r = proc_dointvec(table, write, buffer, lenp, ppos); if (r) return r; if (write) rtc_mips_set_mmss(rtctmp); return 0; } #endif #ifdef CONFIG_INET int proc_lasat_ip(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int ip; char *p, c; int len; char ipbuf[32]; if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { len = 0; p = buffer; while (len < *lenp) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; len++; } if (len >= sizeof(ipbuf)-1) len = sizeof(ipbuf) - 1; if (copy_from_user(ipbuf, buffer, len)) return -EFAULT; ipbuf[len] = 0; *ppos += *lenp; /* Now see if we can convert it to a valid IP */ ip = in_aton(ipbuf); *(unsigned int *)(table->data) = ip; lasat_write_eeprom_info(); } else { ip = *(unsigned int *)(table->data); sprintf(ipbuf, "%d.%d.%d.%d", (ip) & 0xff, (ip >> 8) & 0xff, (ip >> 16) & 0xff, (ip >> 24) & 0xff); len = strlen(ipbuf); if (len > *lenp) len = *lenp; if (len) if (copy_to_user(buffer, ipbuf, len)) return -EFAULT; if (len < *lenp) { if (put_user('\n', ((char *) buffer) + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } #endif int proc_lasat_prid(ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int r; r = proc_dointvec(table, write, buffer, lenp, ppos); if (r < 0) return r; if (write) { lasat_board_info.li_eeprom_info.prid = lasat_board_info.li_prid; lasat_write_eeprom_info(); lasat_init_board_info(); } return 0; } extern int lasat_boot_to_service; static ctl_table lasat_table[] = { { .procname = "cpu-hz", .data = &lasat_board_info.li_cpu_hz, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bus-hz", .data = &lasat_board_info.li_bus_hz, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bmid", .data = &lasat_board_info.li_bmid, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "prid", .data = &lasat_board_info.li_prid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_prid, }, #ifdef CONFIG_INET { .procname = "ipaddr", .data = &lasat_board_info.li_eeprom_info.ipaddr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_ip, }, { .procname = "netmask", .data = &lasat_board_info.li_eeprom_info.netmask, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_lasat_ip, }, #endif { .procname = "passwd_hash", .data = &lasat_board_info.li_eeprom_info.passwd_hash, .maxlen = sizeof(lasat_board_info.li_eeprom_info.passwd_hash), .mode = 0600, .proc_handler = proc_dolasatstring, }, { .procname = "boot-service", .data = &lasat_boot_to_service, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DS1603 { .procname = "rtc", .data = &rtctmp, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dolasatrtc, }, #endif { .procname = "namestr", .data = &lasat_board_info.li_namestr, .maxlen = sizeof(lasat_board_info.li_namestr), .mode = 0444, .proc_handler = proc_dostring, }, { .procname = "typestr", .data = &lasat_board_info.li_typestr, .maxlen = sizeof(lasat_board_info.li_typestr), .mode = 0444, .proc_handler = proc_dostring, }, {} }; static ctl_table lasat_root_table[] = { { .procname = "lasat", .mode = 0555, .child = lasat_table }, {} }; static int __init lasat_register_sysctl(void) { struct ctl_table_header *lasat_table_header; lasat_table_header = register_sysctl_table(lasat_root_table); if (!lasat_table_header) { printk(KERN_ERR "Unable to register LASAT sysctl\n"); return -ENOMEM; } return 0; } __initcall(lasat_register_sysctl);
gpl-2.0
Tekcafe/Test-kernel
net/ieee802154/wpan-class.c
9285
5219
/* * Copyright (C) 2007, 2008, 2009 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <net/wpan-phy.h> #include "ieee802154.h" #define MASTER_SHOW_COMPLEX(name, format_string, args...) \ static ssize_t name ## _show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \ int ret; \ \ mutex_lock(&phy->pib_lock); \ ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ mutex_unlock(&phy->pib_lock); \ return ret; \ } #define MASTER_SHOW(field, format_string) \ MASTER_SHOW_COMPLEX(field, format_string, phy->field) MASTER_SHOW(current_channel, "%d"); MASTER_SHOW(current_page, "%d"); MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB", ((signed char) (phy->transmit_power << 2)) >> 2, (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 ); MASTER_SHOW(cca_mode, "%d"); static ssize_t channels_supported_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); int ret; int i, len = 0; mutex_lock(&phy->pib_lock); for (i = 0; i < 32; i++) { ret = snprintf(buf + len, PAGE_SIZE - len, "%#09x\n", phy->channels_supported[i]); if (ret < 0) break; len += ret; } mutex_unlock(&phy->pib_lock); return len; } static struct device_attribute pmib_attrs[] = { __ATTR_RO(current_channel), __ATTR_RO(current_page), __ATTR_RO(channels_supported), __ATTR_RO(transmit_power), __ATTR_RO(cca_mode), {}, }; static void wpan_phy_release(struct device *d) { struct wpan_phy *phy = container_of(d, struct wpan_phy, dev); kfree(phy); } static struct class wpan_phy_class = { .name = "ieee802154", .dev_release = wpan_phy_release, .dev_attrs = pmib_attrs, }; static DEFINE_MUTEX(wpan_phy_mutex); static int wpan_phy_idx; static int wpan_phy_match(struct device *dev, void *data) { return !strcmp(dev_name(dev), (const char *)data); } struct wpan_phy *wpan_phy_find(const char *str) { struct device *dev; if (WARN_ON(!str)) return NULL; dev = class_find_device(&wpan_phy_class, NULL, (void *)str, wpan_phy_match); if (!dev) return NULL; return container_of(dev, struct wpan_phy, dev); } EXPORT_SYMBOL(wpan_phy_find); struct wpan_phy_iter_data { int (*fn)(struct wpan_phy *phy, void *data); void *data; }; static int wpan_phy_iter(struct device *dev, void *_data) { struct wpan_phy_iter_data *wpid = _data; struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); return wpid->fn(phy, wpid->data); } int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data) { struct wpan_phy_iter_data wpid = { .fn = fn, .data = data, }; return class_for_each_device(&wpan_phy_class, NULL, &wpid, wpan_phy_iter); } EXPORT_SYMBOL(wpan_phy_for_each); static int wpan_phy_idx_valid(int idx) { return idx >= 0; } struct wpan_phy *wpan_phy_alloc(size_t priv_size) { struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, GFP_KERNEL); if (!phy) goto out; mutex_lock(&wpan_phy_mutex); phy->idx = wpan_phy_idx++; if (unlikely(!wpan_phy_idx_valid(phy->idx))) { wpan_phy_idx--; mutex_unlock(&wpan_phy_mutex); kfree(phy); goto out; } mutex_unlock(&wpan_phy_mutex); mutex_init(&phy->pib_lock); device_initialize(&phy->dev); dev_set_name(&phy->dev, "wpan-phy%d", phy->idx); phy->dev.class = &wpan_phy_class; phy->current_channel = -1; /* not initialised */ phy->current_page = 0; /* for compatibility */ return phy; out: return NULL; } EXPORT_SYMBOL(wpan_phy_alloc); int wpan_phy_register(struct wpan_phy *phy) { return device_add(&phy->dev); } EXPORT_SYMBOL(wpan_phy_register); void wpan_phy_unregister(struct wpan_phy *phy) { device_del(&phy->dev); } EXPORT_SYMBOL(wpan_phy_unregister); void wpan_phy_free(struct wpan_phy *phy) { put_device(&phy->dev); } EXPORT_SYMBOL(wpan_phy_free); static int __init wpan_phy_class_init(void) { int rc; rc = class_register(&wpan_phy_class); if (rc) goto err; rc = ieee802154_nl_init(); if (rc) goto err_nl; return 0; err_nl: class_unregister(&wpan_phy_class); err: return rc; } subsys_initcall(wpan_phy_class_init); static void __exit wpan_phy_class_exit(void) { ieee802154_nl_exit(); class_unregister(&wpan_phy_class); } module_exit(wpan_phy_class_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); MODULE_AUTHOR("Dmitry Eremin-Solenikov");
gpl-2.0
TheTypoMaster/SM-G360T1_kernel
arch/parisc/math-emu/sfmpy.c
14149
10577
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfmpy.c $Revision: 1.1 $ * * Purpose: * Single Precision Floating-point Multiply * * External Interfaces: * sgl_fmpy(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * Single Precision Floating-point Multiply */ int sgl_fmpy( sgl_floating_point *srcptr1, sgl_floating_point *srcptr2, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int opnd1, opnd2, opnd3, result; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; opnd1 = *srcptr1; opnd2 = *srcptr2; /* * set sign bit of result */ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result); else Sgl_setzero(result); /* * check first operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd1)) { if (Sgl_iszero_mantissa(opnd1)) { if (Sgl_isnotnan(opnd2)) { if (Sgl_iszero_exponentmantissa(opnd2)) { /* * invalid since operands are infinity * and zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } /* * return infinity */ Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd1); } /* * is second operand a signaling NaN? */ else if (Sgl_is_signalingnan(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); *dstptr = opnd2; return(NOEXCEPTION); } /* * return quiet NaN */ *dstptr = opnd1; return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd2)) { if (Sgl_iszero_mantissa(opnd2)) { if (Sgl_iszero_exponentmantissa(opnd1)) { /* invalid since operands are zero & infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(opnd2); *dstptr = opnd2; return(NOEXCEPTION); } /* * return infinity */ Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); } /* * return quiet NaN */ *dstptr = opnd2; return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS; /* * Generate mantissa */ if (Sgl_isnotzero_exponent(opnd1)) { /* set hidden bit */ Sgl_clear_signexponent_set_hidden(opnd1); } else { /* check for zero */ if (Sgl_iszero_mantissa(opnd1)) { Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* is denormalized, adjust exponent */ Sgl_clear_signexponent(opnd1); Sgl_leftshiftby1(opnd1); Sgl_normalize(opnd1,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Sgl_isnotzero_exponent(opnd2)) { Sgl_clear_signexponent_set_hidden(opnd2); } else { /* check for zero */ if (Sgl_iszero_mantissa(opnd2)) { Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* is denormalized; want to normalize */ Sgl_clear_signexponent(opnd2); Sgl_leftshiftby1(opnd2); Sgl_normalize(opnd2,dest_exponent); } /* Multiply two source mantissas together */ Sgl_leftshiftby4(opnd2); /* make room for guard bits */ Sgl_setzero(opnd3); /* * Four bits at a time are inspected in each loop, and a * simple shift and add multiply algorithm is used. */ for (count=1;count<SGL_P;count+=4) { stickybit |= Slow4(opnd3); Sgl_rightshiftby4(opnd3); if (Sbit28(opnd1)) Sall(opnd3) += (Sall(opnd2) << 3); if (Sbit29(opnd1)) Sall(opnd3) += (Sall(opnd2) << 2); if (Sbit30(opnd1)) Sall(opnd3) += (Sall(opnd2) << 1); if (Sbit31(opnd1)) Sall(opnd3) += Sall(opnd2); Sgl_rightshiftby4(opnd1); } /* make sure result is left-justified */ if (Sgl_iszero_sign(opnd3)) { Sgl_leftshiftby1(opnd3); } else { /* result mantissa >= 2. */ dest_exponent++; } /* check for denormalized result */ while (Sgl_iszero_sign(opnd3)) { Sgl_leftshiftby1(opnd3); dest_exponent--; } /* * check for guard, sticky and inexact bits */ stickybit |= Sgl_all(opnd3) << (SGL_BITLENGTH - SGL_EXP_LENGTH + 1); guardbit = Sbit24(opnd3); inexact = guardbit | stickybit; /* re-align mantissa */ Sgl_rightshiftby8(opnd3); /* * round result */ if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) { Sgl_clear_signexponent(opnd3); switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) Sgl_increment(opnd3); break; case ROUNDMINUS: if (Sgl_isone_sign(result)) Sgl_increment(opnd3); break; case ROUNDNEAREST: if (guardbit) { if (stickybit || Sgl_isone_lowmantissa(opnd3)) Sgl_increment(opnd3); } } if (Sgl_isone_hidden(opnd3)) dest_exponent++; } Sgl_set_mantissa(result,opnd3); /* * Test for overflow */ if (dest_exponent >= SGL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,ovfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } inexact = TRUE; Set_overflowflag(); /* set result to infinity or largest number */ Sgl_setoverflow(result); } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,unfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact); /* return zero or smallest number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); } break; } if (is_tiny) Set_underflowflag(); } Sgl_set_exponentmantissa(result,opnd3); } else Sgl_set_exponent(result,dest_exponent); *dstptr = result; /* check for inexact */ if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
ISTweak/android_kernel_sony_blue_hayabusa
arch/parisc/math-emu/dfdiv.c
14149
12636
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/dfdiv.c $Revision: 1.1 $ * * Purpose: * Double Precision Floating-point Divide * * External Interfaces: * dbl_fdiv(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "dbl_float.h" /* * Double Precision Floating-point Divide */ int dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2, dbl_floating_point * dstptr, unsigned int *status) { register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2; register unsigned int opnd3p1, opnd3p2, resultp1, resultp2; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2); Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2); /* * set sign bit of result */ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) Dbl_setnegativezerop1(resultp1); else Dbl_setzerop1(resultp1); /* * check first operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd1p1)) { if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { if (Dbl_isnotnan(opnd2p1,opnd2p2)) { if (Dbl_isinfinity(opnd2p1,opnd2p2)) { /* * invalid since both operands * are infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * return infinity */ Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd1p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd1p1); } /* * is second operand a signaling NaN? */ else if (Dbl_is_signalingnan(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * return quiet NaN */ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr); return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd2p1)) { if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) { /* * return zero */ Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); } /* * return quiet NaN */ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * check for division by zero */ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) { if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) { /* invalid since both operands are zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } if (Is_divisionbyzerotrap_enabled()) return(DIVISIONBYZEROEXCEPTION); Set_divisionbyzeroflag(); Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS; /* * Generate mantissa */ if (Dbl_isnotzero_exponent(opnd1p1)) { /* set hidden bit */ Dbl_clear_signexponent_set_hidden(opnd1p1); } else { /* check for zero */ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* is denormalized, want to normalize */ Dbl_clear_signexponent(opnd1p1); Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_normalize(opnd1p1,opnd1p2,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Dbl_isnotzero_exponent(opnd2p1)) { Dbl_clear_signexponent_set_hidden(opnd2p1); } else { /* is denormalized; want to normalize */ Dbl_clear_signexponent(opnd2p1); Dbl_leftshiftby1(opnd2p1,opnd2p2); while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) { dest_exponent+=8; Dbl_leftshiftby8(opnd2p1,opnd2p2); } if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) { dest_exponent+=4; Dbl_leftshiftby4(opnd2p1,opnd2p2); } while (Dbl_iszero_hidden(opnd2p1)) { dest_exponent++; Dbl_leftshiftby1(opnd2p1,opnd2p2); } } /* Divide the source mantissas */ /* * A non-restoring divide algorithm is used. */ Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); Dbl_setzero(opnd3p1,opnd3p2); for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) { Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_leftshiftby1(opnd3p1,opnd3p2); if (Dbl_iszero_sign(opnd1p1)) { Dbl_setone_lowmantissap2(opnd3p2); Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } else { Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2); } } if (count <= DBL_P) { Dbl_leftshiftby1(opnd3p1,opnd3p2); Dbl_setone_lowmantissap2(opnd3p2); Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count)); if (Dbl_iszero_hidden(opnd3p1)) { Dbl_leftshiftby1(opnd3p1,opnd3p2); dest_exponent--; } } else { if (Dbl_iszero_hidden(opnd3p1)) { /* need to get one more bit of result */ Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_leftshiftby1(opnd3p1,opnd3p2); if (Dbl_iszero_sign(opnd1p1)) { Dbl_setone_lowmantissap2(opnd3p2); Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } else { Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2); } dest_exponent--; } if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE; stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2); } inexact = guardbit | stickybit; /* * round result */ if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) { Dbl_clear_signexponent(opnd3p1); switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); } } if (Dbl_isone_hidden(opnd3p1)) dest_exponent++; } Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2); /* * Test for overflow */ if (dest_exponent >= DBL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } Set_overflowflag(); /* set result to infinity or largest number */ Dbl_setoverflow(resultp1,resultp2); inexact = TRUE; } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit, stickybit,inexact); /* return rounded number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); } break; } if (is_tiny) Set_underflowflag(); } Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2); } else Dbl_set_exponent(resultp1,dest_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
MaccKing/CBx
arch/parisc/math-emu/sfdiv.c
14149
11205
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfdiv.c $Revision: 1.1 $ * * Purpose: * Single Precision Floating-point Divide * * External Interfaces: * sgl_fdiv(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * Single Precision Floating-point Divide */ int sgl_fdiv (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2, sgl_floating_point * dstptr, unsigned int *status) { register unsigned int opnd1, opnd2, opnd3, result; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; opnd1 = *srcptr1; opnd2 = *srcptr2; /* * set sign bit of result */ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result); else Sgl_setzero(result); /* * check first operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd1)) { if (Sgl_iszero_mantissa(opnd1)) { if (Sgl_isnotnan(opnd2)) { if (Sgl_isinfinity(opnd2)) { /* * invalid since both operands * are infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } /* * return infinity */ Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd1); } /* * is second operand a signaling NaN? */ else if (Sgl_is_signalingnan(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); *dstptr = opnd2; return(NOEXCEPTION); } /* * return quiet NaN */ *dstptr = opnd1; return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd2)) { if (Sgl_iszero_mantissa(opnd2)) { /* * return zero */ Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); } /* * return quiet NaN */ *dstptr = opnd2; return(NOEXCEPTION); } /* * check for division by zero */ if (Sgl_iszero_exponentmantissa(opnd2)) { if (Sgl_iszero_exponentmantissa(opnd1)) { /* invalid since both operands are zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } if (Is_divisionbyzerotrap_enabled()) return(DIVISIONBYZEROEXCEPTION); Set_divisionbyzeroflag(); Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Sgl_exponent(opnd1) - Sgl_exponent(opnd2) + SGL_BIAS; /* * Generate mantissa */ if (Sgl_isnotzero_exponent(opnd1)) { /* set hidden bit */ Sgl_clear_signexponent_set_hidden(opnd1); } else { /* check for zero */ if (Sgl_iszero_mantissa(opnd1)) { Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* is denormalized; want to normalize */ Sgl_clear_signexponent(opnd1); Sgl_leftshiftby1(opnd1); Sgl_normalize(opnd1,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Sgl_isnotzero_exponent(opnd2)) { Sgl_clear_signexponent_set_hidden(opnd2); } else { /* is denormalized; want to normalize */ Sgl_clear_signexponent(opnd2); Sgl_leftshiftby1(opnd2); while(Sgl_iszero_hiddenhigh7mantissa(opnd2)) { Sgl_leftshiftby8(opnd2); dest_exponent += 8; } if(Sgl_iszero_hiddenhigh3mantissa(opnd2)) { Sgl_leftshiftby4(opnd2); dest_exponent += 4; } while(Sgl_iszero_hidden(opnd2)) { Sgl_leftshiftby1(opnd2); dest_exponent += 1; } } /* Divide the source mantissas */ /* * A non_restoring divide algorithm is used. */ Sgl_subtract(opnd1,opnd2,opnd1); Sgl_setzero(opnd3); for (count=1;count<=SGL_P && Sgl_all(opnd1);count++) { Sgl_leftshiftby1(opnd1); Sgl_leftshiftby1(opnd3); if (Sgl_iszero_sign(opnd1)) { Sgl_setone_lowmantissa(opnd3); Sgl_subtract(opnd1,opnd2,opnd1); } else Sgl_addition(opnd1,opnd2,opnd1); } if (count <= SGL_P) { Sgl_leftshiftby1(opnd3); Sgl_setone_lowmantissa(opnd3); Sgl_leftshift(opnd3,SGL_P-count); if (Sgl_iszero_hidden(opnd3)) { Sgl_leftshiftby1(opnd3); dest_exponent--; } } else { if (Sgl_iszero_hidden(opnd3)) { /* need to get one more bit of result */ Sgl_leftshiftby1(opnd1); Sgl_leftshiftby1(opnd3); if (Sgl_iszero_sign(opnd1)) { Sgl_setone_lowmantissa(opnd3); Sgl_subtract(opnd1,opnd2,opnd1); } else Sgl_addition(opnd1,opnd2,opnd1); dest_exponent--; } if (Sgl_iszero_sign(opnd1)) guardbit = TRUE; stickybit = Sgl_all(opnd1); } inexact = guardbit | stickybit; /* * round result */ if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) { Sgl_clear_signexponent(opnd3); switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) Sgl_increment_mantissa(opnd3); break; case ROUNDMINUS: if (Sgl_isone_sign(result)) Sgl_increment_mantissa(opnd3); break; case ROUNDNEAREST: if (guardbit) { if (stickybit || Sgl_isone_lowmantissa(opnd3)) Sgl_increment_mantissa(opnd3); } } if (Sgl_isone_hidden(opnd3)) dest_exponent++; } Sgl_set_mantissa(result,opnd3); /* * Test for overflow */ if (dest_exponent >= SGL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,ovfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } Set_overflowflag(); /* set result to infinity or largest number */ Sgl_setoverflow(result); inexact = TRUE; } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,unfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact); /* return rounded number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); } break; } if (is_tiny) Set_underflowflag(); } Sgl_set_exponentmantissa(result,opnd3); } else Sgl_set_exponent(result,dest_exponent); *dstptr = result; /* check for inexact */ if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
CyanogenMod/htc-kernel-supersonic
arch/parisc/math-emu/fcnvfxt.c
14149
8658
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $ * * Purpose: * Single Floating-point to Single Fixed-point /w truncated result * Single Floating-point to Double Fixed-point /w truncated result * Double Floating-point to Single Fixed-point /w truncated result * Double Floating-point to Double Fixed-point /w truncated result * * External Interfaces: * dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /* * Convert single floating-point to single fixed-point format * with truncated result */ /*ARGSUSED*/ int sgl_to_sgl_fcnvfxt( sgl_floating_point *srcptr, unsigned int *nullptr, int *dstptr, unsigned int *status) { register unsigned int src, temp; register int src_exponent, result; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP) { /* check for MININT */ if ((src_exponent > SGL_FX_MAX_EXP + 1) || Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) { if (Sgl_iszero_sign(src)) result = 0x7fffffff; else result = 0x80000000; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } } /* * Generate result */ if (src_exponent >= 0) { temp = src; Sgl_clear_signexponent_set_hidden(temp); Int_from_sgl_mantissa(temp,src_exponent); if (Sgl_isone_sign(src)) result = -Sgl_all(temp); else result = Sgl_all(temp); *dstptr = result; /* check for inexact */ if (Sgl_isinexact_to_fix(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Single Floating-point to Double Fixed-point */ /*ARGSUSED*/ int sgl_to_dbl_fcnvfxt( sgl_floating_point *srcptr, unsigned int *nullptr, dbl_integer *dstptr, unsigned int *status) { register int src_exponent, resultp1; register unsigned int src, temp, resultp2; src = *srcptr; src_exponent = Sgl_exponent(src) - SGL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP) { /* check for MININT */ if ((src_exponent > DBL_FX_MAX_EXP + 1) || Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) { if (Sgl_iszero_sign(src)) { resultp1 = 0x7fffffff; resultp2 = 0xffffffff; } else { resultp1 = 0x80000000; resultp2 = 0; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Dint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Dint_set_minint(resultp1,resultp2); Dint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate result */ if (src_exponent >= 0) { temp = src; Sgl_clear_signexponent_set_hidden(temp); Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2); if (Sgl_isone_sign(src)) { Dint_setone_sign(resultp1,resultp2); } Dint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isinexact_to_fix(src,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Dint_setzero(resultp1,resultp2); Dint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Sgl_isnotzero_exponentmantissa(src)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Single Fixed-point */ /*ARGSUSED*/ int dbl_to_sgl_fcnvfxt( dbl_floating_point *srcptr, unsigned int *nullptr, int *dstptr, unsigned int *status) { register unsigned int srcp1, srcp2, tempp1, tempp2; register int src_exponent, result; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > SGL_FX_MAX_EXP) { /* check for MININT */ if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) { if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff; else result = 0x80000000; if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); *dstptr = result; return(NOEXCEPTION); } } /* * Generate result */ if (src_exponent >= 0) { tempp1 = srcp1; tempp2 = srcp2; Dbl_clear_signexponent_set_hidden(tempp1); Int_from_dbl_mantissa(tempp1,tempp2,src_exponent); if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP)) result = -Dbl_allp1(tempp1); else result = Dbl_allp1(tempp1); *dstptr = result; /* check for inexact */ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { *dstptr = 0; /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); } /* * Double Floating-point to Double Fixed-point */ /*ARGSUSED*/ int dbl_to_dbl_fcnvfxt( dbl_floating_point *srcptr, unsigned int *nullptr, dbl_integer *dstptr, unsigned int *status) { register int src_exponent, resultp1; register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2; Dbl_copyfromptr(srcptr,srcp1,srcp2); src_exponent = Dbl_exponent(srcp1) - DBL_BIAS; /* * Test for overflow */ if (src_exponent > DBL_FX_MAX_EXP) { /* check for MININT */ if ((src_exponent > DBL_FX_MAX_EXP + 1) || Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) { if (Dbl_iszero_sign(srcp1)) { resultp1 = 0x7fffffff; resultp2 = 0xffffffff; } else { resultp1 = 0x80000000; resultp2 = 0; } if (Is_invalidtrap_enabled()) { return(INVALIDEXCEPTION); } Set_invalidflag(); Dint_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } /* * Generate result */ if (src_exponent >= 0) { tempp1 = srcp1; tempp2 = srcp2; Dbl_clear_signexponent_set_hidden(tempp1); Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent, resultp1,resultp2); if (Dbl_isone_sign(srcp1)) { Dint_setone_sign(resultp1,resultp2); } Dint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } else { Dint_setzero(resultp1,resultp2); Dint_copytoptr(resultp1,resultp2,dstptr); /* check for inexact */ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } } return(NOEXCEPTION); }
gpl-2.0
rbauduin/mptcp
arch/parisc/math-emu/sfmpy.c
14149
10577
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfmpy.c $Revision: 1.1 $ * * Purpose: * Single Precision Floating-point Multiply * * External Interfaces: * sgl_fmpy(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * Single Precision Floating-point Multiply */ int sgl_fmpy( sgl_floating_point *srcptr1, sgl_floating_point *srcptr2, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int opnd1, opnd2, opnd3, result; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; opnd1 = *srcptr1; opnd2 = *srcptr2; /* * set sign bit of result */ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result); else Sgl_setzero(result); /* * check first operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd1)) { if (Sgl_iszero_mantissa(opnd1)) { if (Sgl_isnotnan(opnd2)) { if (Sgl_iszero_exponentmantissa(opnd2)) { /* * invalid since operands are infinity * and zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } /* * return infinity */ Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd1); } /* * is second operand a signaling NaN? */ else if (Sgl_is_signalingnan(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); *dstptr = opnd2; return(NOEXCEPTION); } /* * return quiet NaN */ *dstptr = opnd1; return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(opnd2)) { if (Sgl_iszero_mantissa(opnd2)) { if (Sgl_iszero_exponentmantissa(opnd1)) { /* invalid since operands are zero & infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(opnd2); *dstptr = opnd2; return(NOEXCEPTION); } /* * return infinity */ Sgl_setinfinity_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(opnd2)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(opnd2); } /* * return quiet NaN */ *dstptr = opnd2; return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS; /* * Generate mantissa */ if (Sgl_isnotzero_exponent(opnd1)) { /* set hidden bit */ Sgl_clear_signexponent_set_hidden(opnd1); } else { /* check for zero */ if (Sgl_iszero_mantissa(opnd1)) { Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* is denormalized, adjust exponent */ Sgl_clear_signexponent(opnd1); Sgl_leftshiftby1(opnd1); Sgl_normalize(opnd1,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Sgl_isnotzero_exponent(opnd2)) { Sgl_clear_signexponent_set_hidden(opnd2); } else { /* check for zero */ if (Sgl_iszero_mantissa(opnd2)) { Sgl_setzero_exponentmantissa(result); *dstptr = result; return(NOEXCEPTION); } /* is denormalized; want to normalize */ Sgl_clear_signexponent(opnd2); Sgl_leftshiftby1(opnd2); Sgl_normalize(opnd2,dest_exponent); } /* Multiply two source mantissas together */ Sgl_leftshiftby4(opnd2); /* make room for guard bits */ Sgl_setzero(opnd3); /* * Four bits at a time are inspected in each loop, and a * simple shift and add multiply algorithm is used. */ for (count=1;count<SGL_P;count+=4) { stickybit |= Slow4(opnd3); Sgl_rightshiftby4(opnd3); if (Sbit28(opnd1)) Sall(opnd3) += (Sall(opnd2) << 3); if (Sbit29(opnd1)) Sall(opnd3) += (Sall(opnd2) << 2); if (Sbit30(opnd1)) Sall(opnd3) += (Sall(opnd2) << 1); if (Sbit31(opnd1)) Sall(opnd3) += Sall(opnd2); Sgl_rightshiftby4(opnd1); } /* make sure result is left-justified */ if (Sgl_iszero_sign(opnd3)) { Sgl_leftshiftby1(opnd3); } else { /* result mantissa >= 2. */ dest_exponent++; } /* check for denormalized result */ while (Sgl_iszero_sign(opnd3)) { Sgl_leftshiftby1(opnd3); dest_exponent--; } /* * check for guard, sticky and inexact bits */ stickybit |= Sgl_all(opnd3) << (SGL_BITLENGTH - SGL_EXP_LENGTH + 1); guardbit = Sbit24(opnd3); inexact = guardbit | stickybit; /* re-align mantissa */ Sgl_rightshiftby8(opnd3); /* * round result */ if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) { Sgl_clear_signexponent(opnd3); switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) Sgl_increment(opnd3); break; case ROUNDMINUS: if (Sgl_isone_sign(result)) Sgl_increment(opnd3); break; case ROUNDNEAREST: if (guardbit) { if (stickybit || Sgl_isone_lowmantissa(opnd3)) Sgl_increment(opnd3); } } if (Sgl_isone_hidden(opnd3)) dest_exponent++; } Sgl_set_mantissa(result,opnd3); /* * Test for overflow */ if (dest_exponent >= SGL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,ovfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } inexact = TRUE; Set_overflowflag(); /* set result to infinity or largest number */ Sgl_setoverflow(result); } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Sgl_setwrapped_exponent(result,dest_exponent,unfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); if (Sgl_isone_hiddenoverflow(opnd3)) is_tiny = FALSE; Sgl_decrement(opnd3); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact); /* return zero or smallest number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Sgl_iszero_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDMINUS: if (Sgl_isone_sign(result)) { Sgl_increment(opnd3); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd3))) { Sgl_increment(opnd3); } break; } if (is_tiny) Set_underflowflag(); } Sgl_set_exponentmantissa(result,opnd3); } else Sgl_set_exponent(result,dest_exponent); *dstptr = result; /* check for inexact */ if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
aznrice/KinGk_msm8960
fs/jbd2/transaction.c
70
68366
/* * linux/fs/jbd2/transaction.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Generic filesystem transaction handling code; part of the ext2fs * journaling system. * * This file manages transactions (compound commits managed by the * journaling code) and handles (individual atomic operations by the * filesystem). */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hrtimer.h> #include <linux/backing-dev.h> #include <linux/module.h> static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); static void __jbd2_journal_unfile_buffer(struct journal_head *jh); /* * jbd2_get_transaction: obtain a new transaction_t object. * * Simply allocate and initialise a new transaction. Create it in * RUNNING state and add it to the current journal (which should not * have an existing running transaction: we only make a new transaction * once we have started to commit the old one). * * Preconditions: * The journal MUST be locked. We don't perform atomic mallocs on the * new transaction and we can't block without protecting against other * processes trying to touch the journal while it is in transition. * */ static transaction_t * jbd2_get_transaction(journal_t *journal, transaction_t *transaction) { transaction->t_journal = journal; transaction->t_state = T_RUNNING; transaction->t_start_time = ktime_get(); transaction->t_tid = journal->j_transaction_sequence++; transaction->t_expires = jiffies + journal->j_commit_interval; spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, 0); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); /* Set up the commit timer for the new transaction. */ journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); journal->j_running_transaction = transaction; transaction->t_max_wait = 0; transaction->t_start = jiffies; return transaction; } /* * Handle management. * * A handle_t is an object which represents a single atomic update to a * filesystem, and which tracks all of the modifications which form part * of that one update. */ /* * Update transaction's maximum wait time, if debugging is enabled. * * In order for t_max_wait to be reliable, it must be protected by a * lock. But doing so will mean that start_this_handle() can not be * run in parallel on SMP systems, which limits our scalability. So * unless debugging is enabled, we no longer update t_max_wait, which * means that maximum wait time reported by the jbd2_run_stats * tracepoint will always be zero. */ static inline void update_t_max_wait(transaction_t *transaction, unsigned long ts) { #ifdef CONFIG_JBD2_DEBUG if (jbd2_journal_enable_debug && time_after(transaction->t_start, ts)) { ts = jbd2_time_diff(ts, transaction->t_start); spin_lock(&transaction->t_handle_lock); if (ts > transaction->t_max_wait) transaction->t_max_wait = ts; spin_unlock(&transaction->t_handle_lock); } #endif } /* * start_this_handle: Given a handle, deal with any locking or stalling * needed to make sure that there is enough journal space for the handle * to begin. Attach the handle to a transaction and set up the * transaction's buffer credits. */ static int start_this_handle(journal_t *journal, handle_t *handle, int gfp_mask) { transaction_t *transaction, *new_transaction = NULL; tid_t tid; int needed, need_to_start; int nblocks = handle->h_buffer_credits; unsigned long ts = jiffies; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", current->comm, nblocks, journal->j_max_transaction_buffers); return -ENOSPC; } alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask); if (!new_transaction) { /* * If __GFP_FS is not present, then we may be * being called from inside the fs writeback * layer, so we MUST NOT fail. Since * __GFP_NOFAIL is going away, we will arrange * to retry the allocation ourselves. */ if ((gfp_mask & __GFP_FS) == 0) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto alloc_transaction; } return -ENOMEM; } } jbd_debug(3, "New handle %p going live.\n", handle); /* * We need to hold j_state_lock until t_updates has been incremented, * for proper journal barrier handling */ repeat: read_lock(&journal->j_state_lock); BUG_ON(journal->j_flags & JBD2_UNMOUNT); if (is_journal_aborted(journal) || (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { read_unlock(&journal->j_state_lock); kfree(new_transaction); return -EROFS; } /* Wait on the journal's transaction barrier if necessary */ if (journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); goto repeat; } if (!journal->j_running_transaction) { read_unlock(&journal->j_state_lock); if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); /* Wait on the journal's transaction barrier if necessary */ if (journal->j_barrier_count) { printk(KERN_WARNING "JBD: %s: wait for transaction barrier\n", __func__); write_unlock(&journal->j_state_lock); goto repeat; } if (!journal->j_running_transaction) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } write_unlock(&journal->j_state_lock); goto repeat; } transaction = journal->j_running_transaction; /* * If the current transaction is locked down for commit, wait for the * lock to be released. */ if (transaction->t_state == T_LOCKED) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * If there is not enough space left in the log to write all potential * buffers requested by this operation, we need to stall pending a log * checkpoint to free some more log space. */ needed = atomic_add_return(nblocks, &transaction->t_outstanding_credits); if (needed > journal->j_max_transaction_buffers) { /* * If the current transaction is already too large, then start * to commit it: we can then go back and attach this handle to * a new transaction. */ DEFINE_WAIT(wait); jbd_debug(2, "Handle %p starting new commit...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * The commit code assumes that it can get enough log space * without forcing a checkpoint. This is *critical* for * correctness: a checkpoint of a buffer which is also * associated with a committing transaction creates a deadlock, * so commit simply cannot force through checkpoints. * * We must therefore ensure the necessary space in the journal * *before* starting to dirty potentially checkpointed buffers * in the new transaction. * * The worst part is, any transaction currently committing can * reduce the free space arbitrarily. Be careful to account for * those buffers when checkpointing. */ /* * @@@ AKPM: This seems rather over-defensive. We're giving commit * a _lot_ of headroom: 1/4 of the journal plus the size of * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. * Also, this test is inconsistent with the matching one in * jbd2_journal_extend(). */ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); read_unlock(&journal->j_state_lock); write_lock(&journal->j_state_lock); if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) __jbd2_log_wait_for_space(journal); write_unlock(&journal->j_state_lock); goto repeat; } /* OK, account for the buffers that this operation expects to * use and add the handle to the running transaction. */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", handle, nblocks, atomic_read(&transaction->t_outstanding_credits), __jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); lock_map_acquire(&handle->h_lockdep_map); kfree(new_transaction); return 0; } static struct lock_class_key jbd2_handle_key; /* Allocate a new handle. This should probably be in a slab... */ static handle_t *new_handle(int nblocks) { handle_t *handle = jbd2_alloc_handle(GFP_NOFS); if (!handle) return NULL; memset(handle, 0, sizeof(*handle)); handle->h_buffer_credits = nblocks; handle->h_ref = 1; lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle", &jbd2_handle_key, 0); return handle; } /** * handle_t *jbd2_journal_start() - Obtain a new handle. * @journal: Journal to start transaction on. * @nblocks: number of block buffer we might modify * * We make sure that the transaction can guarantee at least nblocks of * modified buffers in the log. We block until the log can guarantee * that much space. * * This function is visible to journal users (like ext3fs), so is not * called with the journal already locked. * * Return a pointer to a newly allocated handle, or an ERR_PTR() value * on failure. */ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) { handle_t *handle = journal_current_handle(); int err; if (!journal) return ERR_PTR(-EROFS); if (handle) { J_ASSERT(handle->h_transaction->t_journal == journal); handle->h_ref++; return handle; } handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); current->journal_info = handle; err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { jbd2_free_handle(handle); current->journal_info = NULL; handle = ERR_PTR(err); } return handle; } EXPORT_SYMBOL(jbd2__journal_start); handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { return jbd2__journal_start(journal, nblocks, GFP_NOFS); } EXPORT_SYMBOL(jbd2_journal_start); /** * int jbd2_journal_extend() - extend buffer credits. * @handle: handle to 'extend' * @nblocks: nr blocks to try to extend by. * * Some transactions, such as large extends and truncates, can be done * atomically all at once or in several stages. The operation requests * a credit for a number of buffer modications in advance, but can * extend its credit if it needs more. * * jbd2_journal_extend tries to give the running handle more buffer credits. * It does not guarantee that allocation - this is a best-effort only. * The calling process MUST be able to deal cleanly with a failure to * extend here. * * Return 0 on success, non-zero on failure. * * return code < 0 implies an error * return code > 0 implies normal transaction-full status. */ int jbd2_journal_extend(handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int result; int wanted; result = -EIO; if (is_handle_aborted(handle)) goto out; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ if (handle->h_transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } spin_lock(&transaction->t_handle_lock); wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks; if (wanted > journal->j_max_transaction_buffers) { jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); goto unlock; } if (wanted > __jbd2_log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); goto unlock; } handle->h_buffer_credits += nblocks; atomic_add(nblocks, &transaction->t_outstanding_credits); result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); unlock: spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); out: return result; } /** * int jbd2_journal_restart() - restart a handle . * @handle: handle to restart * @nblocks: nr credits requested * * Restart a handle for a multi-transaction filesystem * operation. * * If the jbd2_journal_extend() call above fails to grant new buffer credits * to a running handle, a call to jbd2_journal_restart will commit the * handle's transaction so far and reattach the handle to a new * transaction capabable of guaranteeing the requested number of * credits. */ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; tid_t tid; int need_to_start, ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; /* * First unlink the handle from its current transaction, and start the * commit on that. */ J_ASSERT(atomic_read(&transaction->t_updates) > 0); J_ASSERT(journal_current_handle() == handle); read_lock(&journal->j_state_lock); spin_lock(&transaction->t_handle_lock); atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; ret = start_this_handle(journal, handle, gfp_mask); return ret; } EXPORT_SYMBOL(jbd2__journal_restart); int jbd2_journal_restart(handle_t *handle, int nblocks) { return jbd2__journal_restart(handle, nblocks, GFP_NOFS); } EXPORT_SYMBOL(jbd2_journal_restart); /** * void jbd2_journal_lock_updates () - establish a transaction barrier. * @journal: Journal to establish a barrier on. * * This locks out any further updates from being started, and blocks * until all existing updates have completed, returning only once the * journal is in a quiescent state with no updates running. * * The journal lock should not be held on entry. */ void jbd2_journal_lock_updates(journal_t *journal) { DEFINE_WAIT(wait); write_lock(&journal->j_state_lock); ++journal->j_barrier_count; /* Wait until there are no running updates */ while (1) { transaction_t *transaction = journal->j_running_transaction; if (!transaction) break; spin_lock(&transaction->t_handle_lock); if (!atomic_read(&transaction->t_updates)) { spin_unlock(&transaction->t_handle_lock); break; } prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); spin_unlock(&transaction->t_handle_lock); write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_updates, &wait); write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); /* * We have now established a barrier against other normal updates, but * we also need to barrier against other jbd2_journal_lock_updates() calls * to make sure that we serialise special journal-locked operations * too. */ mutex_lock(&journal->j_barrier); } /** * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier * @journal: Journal to release the barrier on. * * Release a transaction barrier obtained with jbd2_journal_lock_updates(). * * Should be called without the journal lock held. */ void jbd2_journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); mutex_unlock(&journal->j_barrier); write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_transaction_locked); } static void warn_dirty_buffer(struct buffer_head *bh) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " "There's a risk of filesystem corruption in case of system " "crash.\n", bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } /* * If the buffer is already part of the current transaction, then there * is nothing we need to do. If it is already part of a prior * transaction which we are still committing to disk, then we need to * make sure that we do not overwrite the old copy: we do copy-out to * preserve the copy going to disk. We also account the buffer against * the handle's metadata buffer credits (unless the buffer is already * part of the transaction, that is). * */ static int do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; transaction_t *transaction; journal_t *journal; int error; char *frozen_buffer = NULL; int need_copy = 0; if (is_handle_aborted(handle)) return -EROFS; transaction = handle->h_transaction; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); JBUFFER_TRACE(jh, "entry"); repeat: bh = jh2bh(jh); /* @@@ Need to check for errors here at some point. */ lock_buffer(bh); jbd_lock_bh_state(bh); /* We now hold the buffer lock so it is safe to query the buffer * state. Is the buffer dirty? * * If so, there are two possibilities. The buffer may be * non-journaled, and undergoing a quite legitimate writeback. * Otherwise, it is journaled, and we don't expect dirty buffers * in that state (the buffers should be marked JBD_Dirty * instead.) So either the IO is being done under our own * control and this is a bug, or it's a third party IO such as * dump(8) (which may leave the buffer scheduled for read --- * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ if (buffer_dirty(bh)) { /* * First question: is this buffer already part of the current * transaction or the existing committing transaction? */ if (jh->b_transaction) { J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_transaction == journal->j_committing_transaction); if (jh->b_next_transaction) J_ASSERT_JH(jh, jh->b_next_transaction == transaction); warn_dirty_buffer(bh); } /* * In any case we need to clean the dirty flag and we must * do it under the buffer lock to be sure we don't race * with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); set_buffer_jbddirty(bh); } unlock_buffer(bh); error = -EROFS; if (is_handle_aborted(handle)) { jbd_unlock_bh_state(bh); goto out; } error = 0; /* * The buffer is already part of this transaction if b_transaction or * b_next_transaction points to it */ if (jh->b_transaction == transaction || jh->b_next_transaction == transaction) goto done; /* * this is the first time this transaction is touching this buffer, * reset the modified flag */ jh->b_modified = 0; /* * If there is already a copy-out version of this buffer, then we don't * need to make another one */ if (jh->b_frozen_data) { JBUFFER_TRACE(jh, "has frozen data"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); jh->b_next_transaction = transaction; goto done; } /* Is there data here we need to preserve? */ if (jh->b_transaction && jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "owned by older transaction"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); /* There is one case we have to be very careful about. * If the committing transaction is currently writing * this buffer out to disk and has NOT made a copy-out, * then we cannot modify the buffer contents at all * right now. The essence of copy-out is that it is the * extra copy, not the primary copy, which gets * journaled. If the primary copy is already going to * disk then we cannot do copy-out here. */ if (jh->b_jlist == BJ_Shadow) { DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); wait_queue_head_t *wqh; wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "on shadow: sleep"); jbd_unlock_bh_state(bh); /* commit wakes up all shadow buffers after IO */ for ( ; ; ) { prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); if (jh->b_jlist != BJ_Shadow) break; schedule(); } finish_wait(wqh, &wait.wait); goto repeat; } /* Only do the copy if the currently-owning transaction * still needs it. If it is on the Forget list, the * committing transaction is past that stage. The * buffer had better remain locked during the kmalloc, * but that should be true --- we hold the journal lock * still and the buffer is already on the BUF_JOURNAL * list so won't be flushed. * * Subtle point, though: if this is a get_undo_access, * then we will be relying on the frozen_data to contain * the new value of the committed_data record after the * transaction, so we HAVE to force the frozen_data copy * in that case. */ if (jh->b_jlist != BJ_Forget || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", __func__); JBUFFER_TRACE(jh, "oom!"); error = -ENOMEM; jbd_lock_bh_state(bh); goto done; } goto repeat; } jh->b_frozen_data = frozen_buffer; frozen_buffer = NULL; need_copy = 1; } jh->b_next_transaction = transaction; } /* * Finally, if the buffer is not journaled right now, we need to make * sure it doesn't get written to disk before the caller actually * commits the new data */ if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); } done: if (need_copy) { struct page *page; int offset; char *source; J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), "Possible IO failure.\n"); page = jh2bh(jh)->b_page; offset = offset_in_page(jh2bh(jh)->b_data); source = kmap_atomic(page, KM_USER0); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); kunmap_atomic(source, KM_USER0); /* * Now that the frozen data is saved off, we need to store * any matching triggers. */ jh->b_frozen_triggers = jh->b_triggers; } jbd_unlock_bh_state(bh); /* * If we are about to journal a buffer, then any revoke pending on it is * no longer valid */ jbd2_journal_cancel_revoke(handle, jh); out: if (unlikely(frozen_buffer)) /* It's usually NULL */ jbd2_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; } /** * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. * @handle: transaction to add buffer modifications to * @bh: bh to be used for metadata writes * * Returns an error code or 0 on success. * * In full data journalling mode the buffer may be of type BJ_AsyncData, * because we're write()ing a buffer which is also part of a shared mapping. */ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { struct journal_head *jh = jbd2_journal_add_journal_head(bh); int rc; /* We do not want to get caught playing with fields which the * log thread also manipulates. Make sure that the buffer * completes any outstanding IO before proceeding. */ rc = do_get_write_access(handle, jh, 0); jbd2_journal_put_journal_head(jh); return rc; } /* * When the user wants to journal a newly created buffer_head * (ie. getblk() returned a new buffer and we are going to populate it * manually rather than reading off disk), then we need to keep the * buffer_head locked until it has been completely filled with new * data. In this case, we should be able to make the assertion that * the bh is not already part of an existing transaction. * * The buffer should already be locked by the caller by this point. * There is no lock ranking violation: it was a newly created, * unlocked buffer beforehand. */ /** * int jbd2_journal_get_create_access () - notify intent to use newly created bh * @handle: transaction to new buffer to * @bh: new buffer. * * Call this if you create a new bh. */ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd_debug(5, "journal_head %p\n", jh); err = -EROFS; if (is_handle_aborted(handle)) goto out; err = 0; JBUFFER_TRACE(jh, "entry"); /* * The buffer may already belong to this transaction due to pre-zeroing * in the filesystem's new_block code. It may also be on the previous, * committing transaction's lists, but it HAS to be in Forget state in * that case: the transaction must have deleted the buffer for it to be * reused here. */ jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); J_ASSERT_JH(jh, (jh->b_transaction == transaction || jh->b_transaction == NULL || (jh->b_transaction == journal->j_committing_transaction && jh->b_jlist == BJ_Forget))); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { /* * Previous jbd2_journal_forget() could have left the buffer * with jbddirty bit set because it was being committed. When * the commit finished, we've filed the buffer for * checkpointing and marked it dirty. Now we are reallocating * the buffer so the transaction freeing it must have * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "file as BJ_Reserved"); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); } else if (jh->b_transaction == journal->j_committing_transaction) { /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "set next transaction"); jh->b_next_transaction = transaction; } spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); /* * akpm: I added this. ext3_alloc_branch can pick up new indirect * blocks which contain freed but then revoked metadata. We need * to cancel the revoke in case we end up freeing it yet again * and the reallocating as data - this would cause a second revoke, * which hits an assertion error. */ JBUFFER_TRACE(jh, "cancelling revoke"); jbd2_journal_cancel_revoke(handle, jh); out: jbd2_journal_put_journal_head(jh); return err; } /** * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with * non-rewindable consequences * @handle: transaction * @bh: buffer to undo * * Sometimes there is a need to distinguish between metadata which has * been committed to disk and that which has not. The ext3fs code uses * this for freeing and allocating space, we have to make sure that we * do not reuse freed space until the deallocation has been committed, * since if we overwrote that space we would make the delete * un-rewindable in case of a crash. * * To deal with that, jbd2_journal_get_undo_access requests write access to a * buffer for parts of non-rewindable operations such as delete * operations on the bitmaps. The journaling code must keep a copy of * the buffer's contents prior to the undo_access call until such time * as we know that the buffer has definitely been committed to disk. * * We never need to know which transaction the committed data is part * of, buffers touched here are guaranteed to be dirtied later and so * will be committed to a new transaction in due course, at which point * we can discard the old committed data pointer. * * Returns error number or 0 on success. */ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) { int err; struct journal_head *jh = jbd2_journal_add_journal_head(bh); char *committed_data = NULL; JBUFFER_TRACE(jh, "entry"); /* * Do this first --- it can drop the journal lock, so we want to * make sure that obtaining the committed_data is done * atomically wrt. completion of any outstanding commits. */ err = do_get_write_access(handle, jh, 1); if (err) goto out; repeat: if (!jh->b_committed_data) { committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __func__); err = -ENOMEM; goto out; } } jbd_lock_bh_state(bh); if (!jh->b_committed_data) { /* Copy out the current buffer contents into the * preserved, committed copy. */ JBUFFER_TRACE(jh, "generate b_committed data"); if (!committed_data) { jbd_unlock_bh_state(bh); goto repeat; } jh->b_committed_data = committed_data; committed_data = NULL; memcpy(jh->b_committed_data, bh->b_data, bh->b_size); } jbd_unlock_bh_state(bh); out: jbd2_journal_put_journal_head(jh); if (unlikely(committed_data)) jbd2_free(committed_data, bh->b_size); return err; } /** * void jbd2_journal_set_triggers() - Add triggers for commit writeout * @bh: buffer to trigger on * @type: struct jbd2_buffer_trigger_type containing the trigger(s). * * Set any triggers on this journal_head. This is always safe, because * triggers for a committing buffer will be saved off, and triggers for * a running transaction will match the buffer in that transaction. * * Call with NULL to clear the triggers. */ void jbd2_journal_set_triggers(struct buffer_head *bh, struct jbd2_buffer_trigger_type *type) { struct journal_head *jh = bh2jh(bh); jh->b_triggers = type; } void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers) { struct buffer_head *bh = jh2bh(jh); if (!triggers || !triggers->t_frozen) return; triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); } void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers) { if (!triggers || !triggers->t_abort) return; triggers->t_abort(triggers, jh2bh(jh)); } /** * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata * @handle: transaction to add buffer to. * @bh: buffer to mark * * mark dirty metadata which needs to be journaled as part of the current * transaction. * * The buffer is placed on the transaction's metadata list and is marked * as belonging to the transaction. * * Returns error number or 0 on success. * * Special care needs to be taken if the buffer already belongs to the * current committing transaction (in which case we should have frozen * data present for that commit). In that case, we don't relink the * buffer: that only gets done when the old transaction finally * completes its commit. */ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh = bh2jh(bh); jbd_debug(5, "journal_head %p\n", jh); JBUFFER_TRACE(jh, "entry"); if (is_handle_aborted(handle)) goto out; jbd_lock_bh_state(bh); if (jh->b_modified == 0) { /* * This buffer's got modified and becoming part * of the transaction. This needs to be done * once a transaction -bzzz */ jh->b_modified = 1; J_ASSERT_JH(jh, handle->h_buffer_credits > 0); handle->h_buffer_credits--; } /* * fastpath, to avoid expensive locking. If this buffer is already * on the running transaction's metadata list there is nothing to do. * Nobody can take it off again because there is a handle open. * I _think_ we're OK here with SMP barriers - a mistaken decision will * result in this test being false, so we go in and take the locks. */ if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { JBUFFER_TRACE(jh, "fastpath"); J_ASSERT_JH(jh, jh->b_transaction == journal->j_running_transaction); goto out_unlock_bh; } set_buffer_jbddirty(bh); /* * Metadata already on the current transaction list doesn't * need to be filed. Metadata on another transaction's list must * be committing, and will be refiled once the commit completes: * leave it alone for now. */ if (jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "already on other transaction"); J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); J_ASSERT_JH(jh, jh->b_next_transaction == transaction); /* And this case is illegal: we can't reuse another * transaction's data buffer, ever. */ goto out_unlock_bh; } /* That test should have eliminated the following case: */ J_ASSERT_JH(jh, jh->b_frozen_data == NULL); JBUFFER_TRACE(jh, "file as BJ_Metadata"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); out: JBUFFER_TRACE(jh, "exit"); return 0; } /* * jbd2_journal_release_buffer: undo a get_write_access without any buffer * updates, if the update decided in the end that it didn't need access. * */ void jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh) { BUFFER_TRACE(bh, "entry"); } /** * void jbd2_journal_forget() - bforget() for potentially-journaled buffers. * @handle: transaction handle * @bh: bh to 'forget' * * We can only do the bforget if there are no commits pending against the * buffer. If the buffer is dirty in the current running transaction we * can safely unlink it. * * bh may not be a journalled buffer at all - it may be a non-JBD * buffer which came off the hashtable. Check for this. * * Decrements bh->b_count by one. * * Allow this call even if the handle has aborted --- it may be part of * the caller's cleanup after an abort. */ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; BUFFER_TRACE(bh, "entry"); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); if (!buffer_jbd(bh)) goto not_jbd; jh = bh2jh(bh); /* Critical error: attempting to delete a bitmap buffer, maybe? * Don't do any jbd operations, and return an error. */ if (!J_EXPECT_JH(jh, !jh->b_committed_data, "inconsistent data on disk")) { err = -EIO; goto not_jbd; } /* keep track of wether or not this transaction modified us */ was_modified = jh->b_modified; /* * The buffer's going from the transaction, we must drop * all references -bzzz */ jh->b_modified = 0; if (jh->b_transaction == handle->h_transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part * of this transaction, then we can just drop it from * the transaction immediately. */ clear_buffer_dirty(bh); clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); /* * we only want to drop a reference if this transaction * modified the buffer */ if (was_modified) drop_reserve = 1; /* * We are no longer going to journal this buffer. * However, the commit of this transaction is still * important to the buffer: the delete that we are now * processing might obsolete an old log entry, so by * committing, we can satisfy the buffer's checkpoint. * * So, if we have a checkpoint on the buffer, we should * now refile the buffer on our BJ_Forget list so that * we know to remove the checkpoint after we commit. */ if (jh->b_cp_transaction) { __jbd2_journal_temp_unlink_buffer(jh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); } else { __jbd2_journal_unfile_buffer(jh); if (!buffer_jbd(bh)) { spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __bforget(bh); goto drop; } } } else if (jh->b_transaction) { J_ASSERT_JH(jh, (jh->b_transaction == journal->j_committing_transaction)); /* However, if the buffer is still owned by a prior * (committing) transaction, we can't drop it yet... */ JBUFFER_TRACE(jh, "belongs to older transaction"); /* ... but we CAN drop it from the new transaction if we * have also modified it since the original commit. */ if (jh->b_next_transaction) { J_ASSERT(jh->b_next_transaction == transaction); jh->b_next_transaction = NULL; /* * only drop a reference if this transaction modified * the buffer */ if (was_modified) drop_reserve = 1; } } not_jbd: spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); drop: if (drop_reserve) { /* no need to reserve log space for this block -bzzz */ handle->h_buffer_credits++; } return err; } /** * int jbd2_journal_stop() - complete a transaction * @handle: tranaction to complete. * * All done for a particular handle. * * There is not much action needed here. We just return any remaining * buffer credits to the transaction and remove the handle. The only * complication is that we need to start a commit operation if the * filesystem is marked for synchronous update. * * jbd2_journal_stop itself will not usually return an error, but it may * do so in unusual circumstances. In particular, expect it to * return -EIO if a jbd2_journal_abort has been executed since the * transaction began. */ int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int err, wait_for_commit = 0; tid_t tid; pid_t pid; J_ASSERT(journal_current_handle() == handle); if (is_handle_aborted(handle)) err = -EIO; else { J_ASSERT(atomic_read(&transaction->t_updates) > 0); err = 0; } if (--handle->h_ref > 0) { jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, handle->h_ref); return err; } jbd_debug(4, "Handle %p going down\n", handle); /* * Implement synchronous transaction batching. If the handle * was synchronous, don't force a commit immediately. Let's * yield and let another thread piggyback onto this * transaction. Keep doing that while new threads continue to * arrive. It doesn't cost much - we're about to run a commit * and sleep on IO anyway. Speeds up many-threaded, many-dir * operations by 30x or more... * * We try and optimize the sleep time against what the * underlying disk can do, instead of having a static sleep * time. This is useful for the case where our storage is so * fast that it is more optimal to go ahead and force a flush * and wait for the transaction to be committed than it is to * wait for an arbitrary amount of time for new writers to * join the transaction. We achieve this by measuring how * long it takes to commit a transaction, and compare it with * how long this transaction has been running, and if run time * < commit time then we sleep for the delta and commit. This * greatly helps super fast disks that would see slowdowns as * more threads started doing fsyncs. * * But don't do this if this process was the most recent one * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. */ pid = current->pid; if (handle->h_sync && journal->j_last_sync_writer != pid) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; read_lock(&journal->j_state_lock); commit_time = journal->j_average_commit_time; read_unlock(&journal->j_state_lock); trans_time = ktime_to_ns(ktime_sub(ktime_get(), transaction->t_start_time)); commit_time = max_t(u64, commit_time, 1000*journal->j_min_batch_time); commit_time = min_t(u64, commit_time, 1000*journal->j_max_batch_time); if (trans_time < commit_time) { ktime_t expires = ktime_add_ns(ktime_get(), commit_time); set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); } } if (handle->h_sync) transaction->t_synchronous_commit = 1; current->journal_info = NULL; atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); /* * If the handle is marked SYNC, we need to set another commit * going! We also want to force a commit if the current * transaction is occupying too much of the log, or if the * transaction is too old now. */ if (handle->h_sync || (atomic_read(&transaction->t_outstanding_credits) > journal->j_max_transaction_buffers) || time_after_eq(jiffies, transaction->t_expires)) { /* Do this even for aborted journals: an abort still * completes the commit thread, it just doesn't write * anything to disk. */ jbd_debug(2, "transaction too old, requesting commit for " "handle %p\n", handle); /* This is non-blocking */ jbd2_log_start_commit(journal, transaction->t_tid); /* * Special case: JBD2_SYNC synchronous updates require us * to wait for the commit to complete. */ if (handle->h_sync && !(current->flags & PF_MEMALLOC)) wait_for_commit = 1; } /* * Once we drop t_updates, if it goes to zero the transaction * could start committing on us and eventually disappear. So * once we do this, we must not dereference transaction * pointer again. */ tid = transaction->t_tid; if (atomic_dec_and_test(&transaction->t_updates)) { wake_up(&journal->j_wait_updates); if (journal->j_barrier_count) wake_up(&journal->j_wait_transaction_locked); } if (wait_for_commit) err = jbd2_log_wait_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); jbd2_free_handle(handle); return err; } /** * int jbd2_journal_force_commit() - force any uncommitted transactions * @journal: journal to force * * For synchronous operations: force any uncommitted transactions * to disk. May seem kludgy, but it reuses all the handle batching * code in a very simple manner. */ int jbd2_journal_force_commit(journal_t *journal) { handle_t *handle; int ret; handle = jbd2_journal_start(journal, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); } else { handle->h_sync = 1; ret = jbd2_journal_stop(handle); } return ret; } /* * * List management code snippets: various functions for manipulating the * transaction buffer lists. * */ /* * Append a buffer to a transaction list, given the transaction's list head * pointer. * * j_list_lock is held. * * jbd_lock_bh_state(jh2bh(jh)) is held. */ static inline void __blist_add_buffer(struct journal_head **list, struct journal_head *jh) { if (!*list) { jh->b_tnext = jh->b_tprev = jh; *list = jh; } else { /* Insert at the tail of the list to preserve order */ struct journal_head *first = *list, *last = first->b_tprev; jh->b_tprev = last; jh->b_tnext = first; last->b_tnext = first->b_tprev = jh; } } /* * Remove a buffer from a transaction list, given the transaction's list * head pointer. * * Called with j_list_lock held, and the journal may not be locked. * * jbd_lock_bh_state(jh2bh(jh)) is held. */ static inline void __blist_del_buffer(struct journal_head **list, struct journal_head *jh) { if (*list == jh) { *list = jh->b_tnext; if (*list == jh) *list = NULL; } jh->b_tprev->b_tnext = jh->b_tnext; jh->b_tnext->b_tprev = jh->b_tprev; } /* * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list, * t_log_list or t_reserved_list. If the caller is holding onto a copy of one * of these pointers, it could go bad. Generally the caller needs to re-read * the pointer from the transaction_t. * * Called under j_list_lock. The journal may not be locked. */ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); if (jh->b_jlist != BJ_None) J_ASSERT_JH(jh, transaction != NULL); switch (jh->b_jlist) { case BJ_None: return; case BJ_Metadata: transaction->t_nr_buffers--; J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_IO: list = &transaction->t_iobuf_list; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_LogCtl: list = &transaction->t_log_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_del_buffer(list, jh); jh->b_jlist = BJ_None; if (test_clear_buffer_jbddirty(bh)) mark_buffer_dirty(bh); /* Expose it to the VM */ } /* * Remove buffer from all transactions. * * Called with bh_state lock and j_list_lock * * jh and bh may be already freed when this function returns. */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); } void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); } /* * Called from jbd2_journal_try_to_free_buffers(). * * Called under jbd_lock_bh_state(bh) */ static void __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) { struct journal_head *jh; jh = bh2jh(bh); if (buffer_locked(bh) || buffer_dirty(bh)) goto out; if (jh->b_next_transaction != NULL) goto out; spin_lock(&journal->j_list_lock); if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { /* written-back checkpointed metadata buffer */ if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); __jbd2_journal_remove_checkpoint(jh); } } spin_unlock(&journal->j_list_lock); out: return; } /** * int jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free * @gfp_mask: we use the mask to detect how hard should we try to release * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to * release the buffers. * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN * so try_to_free_buffers() can reap them. * * This function returns non-zero if we wish try_to_free_buffers() * to be called. We do this if the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. * * This complicates JBD locking somewhat. We aren't protected by the * BKL here. We wish to remove the buffer from its committing or * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. * * This may *change* the value of transaction_t->t_datalist, so anyone * who looks at t_datalist needs to lock against this function. * * Even worse, someone may be doing a jbd2_journal_dirty_data on this * buffer. So we need to lock against that. jbd2_journal_dirty_data() * will come out of the lock with the buffer dirty, which makes it * ineligible for release here. * * Who else is affected by this? hmm... Really the only contender * is do_get_write_access() - it could be looking at the buffer while * journal_try_to_free_buffer() is changing its state. But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? * * Return 0 on failure, 1 on success */ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page, gfp_t gfp_mask) { struct buffer_head *head; struct buffer_head *bh; int ret = 0; J_ASSERT(PageLocked(page)); head = page_buffers(page); bh = head; do { struct journal_head *jh; /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of * jbd2_journal_put_journal_head(). */ jh = jbd2_journal_grab_journal_head(bh); if (!jh) continue; jbd_lock_bh_state(bh); __journal_try_to_free_buffer(journal, bh); jbd2_journal_put_journal_head(jh); jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: return ret; } /* * This buffer is no longer needed. If it is on an older transaction's * checkpoint list we need to record it on this transaction's forget list * to pin this buffer (and hence its checkpointing transaction) down until * this transaction commits. If the buffer isn't on a checkpoint list, we * release it. * Returns non-zero if JBD no longer has an interest in the buffer. * * Called under j_list_lock. * * Called under jbd_lock_bh_state(bh). */ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) { int may_free = 1; struct buffer_head *bh = jh2bh(jh); if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); __jbd2_journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in * __journal_file_buffer */ clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); __jbd2_journal_unfile_buffer(jh); } return may_free; } /* * jbd2_journal_invalidatepage * * This code is tricky. It has a number of cases to deal with. * * There are two invariants which this code relies on: * * i_size must be updated on disk before we start calling invalidatepage on the * data. * * This is done in ext3 by defining an ext3_setattr method which * updates i_size before truncate gets going. By maintaining this * invariant, we can be sure that it is safe to throw away any buffers * attached to the current transaction: once the transaction commits, * we know that the data will not be needed. * * Note however that we can *not* throw away data belonging to the * previous, committing transaction! * * Any disk blocks which *are* part of the previous, committing * transaction (and which therefore cannot be discarded immediately) are * not going to be reused in the new running transaction * * The bitmap committed_data images guarantee this: any block which is * allocated in one transaction and removed in the next will be marked * as in-use in the committed_data bitmap, so cannot be reused until * the next transaction to delete the block commits. This means that * leaving committing buffers dirty is quite safe: the disk blocks * cannot be reallocated to a different file and so buffer aliasing is * not possible. * * * The above applies mainly to ordered data mode. In writeback mode we * don't make guarantees about the order in which data hits disk --- in * particular we don't guarantee that new dirty data is flushed before * transaction commit --- so it is always safe just to discard data * immediately in that mode. --sct */ /* * The journal_unmap_buffer helper function returns zero if the buffer * concerned remains pinned as an anonymous buffer belonging to an older * transaction. * * We're outside-transaction here. Either or both of j_running_transaction * and j_committing_transaction may be NULL. */ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) { transaction_t *transaction; struct journal_head *jh; int may_free = 1; int ret; BUFFER_TRACE(bh, "entry"); /* * It is safe to proceed here without the j_list_lock because the * buffers cannot be stolen by try_to_free_buffers as long as we are * holding the page lock. --sct */ if (!buffer_jbd(bh)) goto zap_buffer_unlocked; /* OK, we have data buffer in journaled mode */ write_lock(&journal->j_state_lock); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); jh = jbd2_journal_grab_journal_head(bh); if (!jh) goto zap_buffer_no_jh; /* * We cannot remove the buffer from checkpoint lists until the * transaction adding inode to orphan list (let's call it T) * is committed. Otherwise if the transaction changing the * buffer would be cleaned from the journal before T is * committed, a crash will cause that the correct contents of * the buffer will be lost. On the other hand we have to * clear the buffer dirty bit at latest at the moment when the * transaction marking the buffer as freed in the filesystem * structures is committed because from that moment on the * buffer can be reallocated and used by a different page. * Since the block hasn't been freed yet but the inode has * already been added to orphan list, it is safe for us to add * the buffer to BJ_Forget list of the newest transaction. */ transaction = jh->b_transaction; if (transaction == NULL) { /* First case: not on any transaction. If it * has no checkpoint link, then we can zap it: * it's a writeback-mode buffer so we don't care * if it hits disk safely. */ if (!jh->b_cp_transaction) { JBUFFER_TRACE(jh, "not on any transaction: zap"); goto zap_buffer; } if (!buffer_dirty(bh)) { /* bdflush has written it. We can drop it now */ goto zap_buffer; } /* OK, it must be in the journal but still not * written fully to disk: it's metadata or * journaled data... */ if (journal->j_running_transaction) { /* ... and once the current transaction has * committed, the buffer won't be needed any * longer. */ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); ret = __dispose_buffer(jh, journal->j_running_transaction); jbd2_journal_put_journal_head(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); write_unlock(&journal->j_state_lock); return ret; } else { /* There is no currently-running transaction. So the * orphan record which we wrote for this file must have * passed into commit. We must attach this buffer to * the committing transaction, if it exists. */ if (journal->j_committing_transaction) { JBUFFER_TRACE(jh, "give to committing trans"); ret = __dispose_buffer(jh, journal->j_committing_transaction); jbd2_journal_put_journal_head(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); write_unlock(&journal->j_state_lock); return ret; } else { /* The orphan record's transaction has * committed. We can cleanse this buffer */ clear_buffer_jbddirty(bh); goto zap_buffer; } } } else if (transaction == journal->j_committing_transaction) { JBUFFER_TRACE(jh, "on committing transaction"); /* * The buffer is committing, we simply cannot touch * it. So we just set j_next_transaction to the * running transaction (if there is one) and mark * buffer as freed so that commit code knows it should * clear dirty bits when it is done with the buffer. */ set_buffer_freed(bh); if (journal->j_running_transaction && buffer_jbddirty(bh)) jh->b_next_transaction = journal->j_running_transaction; jbd2_journal_put_journal_head(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); write_unlock(&journal->j_state_lock); return 0; } else { /* Good, the buffer belongs to the running transaction. * We are writing our own transaction's data, not any * previous one's, so it is safe to throw it away * (remember that we expect the filesystem to have set * i_size already for this truncate so recovery will not * expose the disk blocks we are discarding here.) */ J_ASSERT_JH(jh, transaction == journal->j_running_transaction); JBUFFER_TRACE(jh, "on running transaction"); may_free = __dispose_buffer(jh, transaction); } zap_buffer: jbd2_journal_put_journal_head(jh); zap_buffer_no_jh: spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); write_unlock(&journal->j_state_lock); zap_buffer_unlocked: clear_buffer_dirty(bh); J_ASSERT_BH(bh, !buffer_jbddirty(bh)); clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; } /** * void jbd2_journal_invalidatepage() * @journal: journal to use for flush... * @page: page to flush * @offset: length of page to invalidate. * * Reap page buffers containing data after offset in page. * */ void jbd2_journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; int may_free = 1; if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) return; /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ head = bh = page_buffers(page); do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh); unlock_buffer(bh); } curr_off = next_off; bh = next; } while (bh != head); if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } } /* * File a buffer on the given transaction list. */ void __jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { struct journal_head **list = NULL; int was_dirty = 0; struct buffer_head *bh = jh2bh(jh); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_transaction == NULL); if (jh->b_transaction && jh->b_jlist == jlist) return; if (jlist == BJ_Metadata || jlist == BJ_Reserved || jlist == BJ_Shadow || jlist == BJ_Forget) { /* * For metadata buffers, we track dirty bit in buffer_jbddirty * instead of buffer_dirty. We should not see a dirty bit set * here because we clear it in do_get_write_access but e.g. * tune2fs can modify the sb and set the dirty bit at any time * so we try to gracefully handle that. */ if (buffer_dirty(bh)) warn_dirty_buffer(bh); if (test_clear_buffer_dirty(bh) || test_clear_buffer_jbddirty(bh)) was_dirty = 1; } if (jh->b_transaction) __jbd2_journal_temp_unlink_buffer(jh); else jbd2_journal_grab_journal_head(bh); jh->b_transaction = transaction; switch (jlist) { case BJ_None: J_ASSERT_JH(jh, !jh->b_committed_data); J_ASSERT_JH(jh, !jh->b_frozen_data); return; case BJ_Metadata: transaction->t_nr_buffers++; list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_IO: list = &transaction->t_iobuf_list; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_LogCtl: list = &transaction->t_log_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_add_buffer(list, jh); jh->b_jlist = jlist; if (was_dirty) set_buffer_jbddirty(bh); } void jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { jbd_lock_bh_state(jh2bh(jh)); spin_lock(&transaction->t_journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, jlist); spin_unlock(&transaction->t_journal->j_list_lock); jbd_unlock_bh_state(jh2bh(jh)); } /* * Remove a buffer from its current buffer list in preparation for * dropping it from its current transaction entirely. If the buffer has * already started to be used by a subsequent transaction, refile the * buffer on that transaction's metadata list. * * Called under j_list_lock * Called under jbd_lock_bh_state(jh2bh(jh)) * * jh and bh may be already free when this function returns */ void __jbd2_journal_refile_buffer(struct journal_head *jh) { int was_dirty, jlist; struct buffer_head *bh = jh2bh(jh); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); if (jh->b_transaction) assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); /* If the buffer is now unused, just drop it. */ if (jh->b_next_transaction == NULL) { __jbd2_journal_unfile_buffer(jh); return; } /* * It has been modified by a later transaction: add it to the new * transaction's metadata list. */ was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not * take a new one. */ jh->b_transaction = jh->b_next_transaction; jh->b_next_transaction = NULL; if (buffer_freed(bh)) jlist = BJ_Forget; else if (jh->b_modified) jlist = BJ_Metadata; else jlist = BJ_Reserved; __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist); J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); if (was_dirty) set_buffer_jbddirty(bh); } /* * __jbd2_journal_refile_buffer() with necessary locking added. We take our * bh reference so that we can safely unlock bh. * * The jh and bh may be freed by this call. */ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_refile_buffer(jh); jbd_unlock_bh_state(bh); spin_unlock(&journal->j_list_lock); __brelse(bh); } /* * File inode in the inode list of the handle's transaction */ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; if (is_handle_aborted(handle)) return -EIO; jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, transaction->t_tid); /* * First check whether inode isn't already on the transaction's * lists without taking the lock. Note that this check is safe * without the lock as we cannot race with somebody removing inode * from the transaction. The reason is that we remove inode from the * transaction only in journal_release_jbd_inode() and when we commit * the transaction. We are guarded from the first case by holding * a reference to the inode. We are safe against the second case * because if jinode->i_transaction == transaction, commit code * cannot touch the transaction because we hold reference to it, * and if jinode->i_next_transaction == transaction, commit code * will only file the inode where we want it. */ if (jinode->i_transaction == transaction || jinode->i_next_transaction == transaction) return 0; spin_lock(&journal->j_list_lock); if (jinode->i_transaction == transaction || jinode->i_next_transaction == transaction) goto done; /* * We only ever set this variable to 1 so the test is safe. Since * t_need_data_flush is likely to be set, we do the test to save some * cacheline bouncing */ if (!transaction->t_need_data_flush) transaction->t_need_data_flush = 1; /* On some different transaction's list - should be * the committing one */ if (jinode->i_transaction) { J_ASSERT(jinode->i_next_transaction == NULL); J_ASSERT(jinode->i_transaction == journal->j_committing_transaction); jinode->i_next_transaction = transaction; goto done; } /* Not on any transaction list... */ J_ASSERT(!jinode->i_next_transaction); jinode->i_transaction = transaction; list_add(&jinode->i_list, &transaction->t_inode_list); done: spin_unlock(&journal->j_list_lock); return 0; } /* * File truncate and transaction commit interact with each other in a * non-trivial way. If a transaction writing data block A is * committing, we cannot discard the data by truncate until we have * written them. Otherwise if we crashed after the transaction with * write has committed but before the transaction with truncate has * committed, we could see stale data in block A. This function is a * helper to solve this problem. It starts writeout of the truncated * part in case it is in the committing transaction. * * Filesystem code must call this function when inode is journaled in * ordered mode before truncation happens and after the inode has been * placed on orphan list with the new inode size. The second condition * avoids the race that someone writes new data and we start * committing the transaction after this function has been called but * before a transaction for truncate is started (and furthermore it * allows us to optimize the case where the addition to orphan list * happens in the same transaction as write --- we don't have to write * any data in such case). */ int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *jinode, loff_t new_size) { transaction_t *inode_trans, *commit_trans; int ret = 0; /* This is a quick check to avoid locking if not necessary */ if (!jinode->i_transaction) goto out; /* Locks are here just to force reading of recent values, it is * enough that the transaction was not committing before we started * a transaction adding the inode to orphan list */ read_lock(&journal->j_state_lock); commit_trans = journal->j_committing_transaction; read_unlock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); inode_trans = jinode->i_transaction; spin_unlock(&journal->j_list_lock); if (inode_trans == commit_trans) { ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping, new_size, LLONG_MAX); if (ret) jbd2_journal_abort(journal, ret); } out: return ret; }
gpl-2.0
UNGLinux/UNGKernel
drivers/cpufreq/freq_table.c
70
6522
/* * linux/drivers/cpufreq/freq_table.c * * Copyright (C) 2002 - 2003 Dominik Brodowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int min_freq = ~0; unsigned int max_freq = 0; unsigned int i; for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { pr_debug("table entry %u is invalid, skipping\n", i); continue; } pr_debug("table entry %u: %u kHz, %u driver_data\n", i, freq, table[i].driver_data); if (freq < min_freq) min_freq = freq; if (freq > max_freq) max_freq = freq; } policy->min = policy->cpuinfo.min_freq = min_freq; policy->max = policy->cpuinfo.max_freq = max_freq; if (policy->min == ~0) return -EINVAL; else return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0; unsigned int i; unsigned int count = 0; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) count++; else if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!count) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int target_freq, unsigned int relation, unsigned int *index) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, .frequency = 0, }; struct cpufreq_frequency_table suboptimal = { .driver_data = ~0, .frequency = 0, }; unsigned int i; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { case CPUFREQ_RELATION_H: suboptimal.frequency = ~0; break; case CPUFREQ_RELATION_L: optimal.frequency = ~0; break; } for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq < policy->min) || (freq > policy->max)) continue; switch (relation) { case CPUFREQ_RELATION_H: if (freq <= target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; } } else { if (freq <= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.driver_data = i; } } break; case CPUFREQ_RELATION_L: if (freq >= target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; optimal.driver_data = i; } } else { if (freq >= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.driver_data = i; } } break; } } if (optimal.driver_data > i) { if (suboptimal.driver_data > i) return -EINVAL; *index = suboptimal.driver_data; } else *index = optimal.driver_data; pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].driver_data); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); /** * show_available_freqs - show available frequencies for the specified CPU */ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) { unsigned int i = 0; unsigned int cpu = policy->cpu; ssize_t count = 0; struct cpufreq_frequency_table *table; if (!per_cpu(cpufreq_show_table, cpu)) return -ENODEV; table = per_cpu(cpufreq_show_table, cpu); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; count += sprintf(&buf[count], "%d ", table[i].frequency); } count += sprintf(&buf[count], "\n"); return count; } struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { .attr = { .name = "scaling_available_frequencies", .mode = 0444, }, .show = show_available_freqs, }; EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* * if you use these, you must assure that the frequency table is valid * all the time between get_attr and put_attr! */ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) { pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", policy->cpu, policy->last_cpu); per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table, policy->last_cpu); per_cpu(cpufreq_show_table, policy->last_cpu) = NULL; } struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) { return per_cpu(cpufreq_show_table, cpu); } EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL");
gpl-2.0
lovers-fancy/SC-05D_Kernel
drivers/video/msm/mipi_truly.c
326
6464
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_truly.h" static struct msm_panel_common_pdata *mipi_truly_pdata; static struct dsi_buf truly_tx_buf; static struct dsi_buf truly_rx_buf; #define TRULY_CMD_DELAY 0 #define TRULY_SLEEP_OFF_DELAY 150 #define TRULY_DISPLAY_ON_DELAY 150 #define GPIO_TRULY_LCD_RESET 129 static int prev_bl = 17; static char extend_cmd_enable[4] = {0xB9, 0xFF, 0x83, 0x69}; static char display_setting[16] = { 0xB2, 0x00, 0x23, 0x62, 0x62, 0x70, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x01, }; static char wave_cycle_setting[6] = {0xB4, 0x00, 0x1D, 0x5F, 0x0E, 0x06}; static char gip_setting[27] = { 0xD5, 0x00, 0x04, 0x03, 0x00, 0x01, 0x05, 0x1C, 0x70, 0x01, 0x03, 0x00, 0x00, 0x40, 0x06, 0x51, 0x07, 0x00, 0x00, 0x41, 0x06, 0x50, 0x07, 0x07, 0x0F, 0x04, 0x00, }; static char power_setting[20] = { 0xB1, 0x01, 0x00, 0x34, 0x06, 0x00, 0x0F, 0x0F, 0x2A, 0x32, 0x3F, 0x3F, 0x07, 0x3A, 0x01, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, }; static char vcom_setting[3] = {0xB6, 0x56, 0x56}; static char pannel_setting[2] = {0xCC, 0x02}; static char gamma_setting[35] = { 0xE0, 0x00, 0x1D, 0x22, 0x38, 0x3D, 0x3F, 0x2E, 0x4A, 0x06, 0x0D, 0x0F, 0x13, 0x15, 0x13, 0x16, 0x10, 0x19, 0x00, 0x1D, 0x22, 0x38, 0x3D, 0x3F, 0x2E, 0x4A, 0x06, 0x0D, 0x0F, 0x13, 0x15, 0x13, 0x16, 0x10, 0x19, }; static char mipi_setting[14] = { 0xBA, 0x00, 0xA0, 0xC6, 0x00, 0x0A, 0x00, 0x10, 0x30, 0x6F, 0x02, 0x11, 0x18, 0x40, }; static char exit_sleep[2] = {0x11, 0x00}; static char display_on[2] = {0x29, 0x00}; static char display_off[2] = {0x28, 0x00}; static char enter_sleep[2] = {0x10, 0x00}; static struct dsi_cmd_desc truly_display_off_cmds[] = { {DTYPE_DCS_WRITE, 1, 0, 0, 10, sizeof(display_off), display_off}, {DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(enter_sleep), enter_sleep} }; static struct dsi_cmd_desc truly_display_on_cmds[] = { {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(extend_cmd_enable), extend_cmd_enable}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(display_setting), display_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(wave_cycle_setting), wave_cycle_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(gip_setting), gip_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(power_setting), power_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(vcom_setting), vcom_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(pannel_setting), pannel_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(gamma_setting), gamma_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(mipi_setting), mipi_setting}, {DTYPE_DCS_WRITE, 1, 0, 0, TRULY_SLEEP_OFF_DELAY, sizeof(exit_sleep), exit_sleep}, {DTYPE_DCS_WRITE, 1, 0, 0, TRULY_DISPLAY_ON_DELAY, sizeof(display_on), display_on}, }; static int mipi_truly_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; msleep(20); mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_on_cmds, ARRAY_SIZE(truly_display_on_cmds)); return 0; } static int mipi_truly_lcd_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds, ARRAY_SIZE(truly_display_off_cmds)); return 0; } #define BL_LEVEL 17 static void mipi_truly_set_backlight(struct msm_fb_data_type *mfd) { int step = 0, i = 0; int bl_level = mfd->bl_level; /* real backlight level, 1 - max, 16 - min, 17 - off */ bl_level = BL_LEVEL - bl_level; if (bl_level > prev_bl) { step = bl_level - prev_bl; if (bl_level == BL_LEVEL) step--; } else if (bl_level < prev_bl) { step = bl_level + 16 - prev_bl; } else { pr_debug("%s: no change\n", __func__); return; } if (bl_level == BL_LEVEL) { /* turn off backlight */ mipi_truly_pdata->pmic_backlight(0); } else { if (prev_bl == BL_LEVEL) { /* turn on backlight */ mipi_truly_pdata->pmic_backlight(1); udelay(30); } /* adjust backlight level */ for (i = 0; i < step; i++) { mipi_truly_pdata->pmic_backlight(0); udelay(1); mipi_truly_pdata->pmic_backlight(1); udelay(1); } } msleep(20); prev_bl = bl_level; return; } static int __devinit mipi_truly_lcd_probe(struct platform_device *pdev) { if (pdev->id == 0) { mipi_truly_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = mipi_truly_lcd_probe, .driver = { .name = "mipi_truly", }, }; static struct msm_fb_panel_data truly_panel_data = { .on = mipi_truly_lcd_on, .off = mipi_truly_lcd_off, .set_backlight = mipi_truly_set_backlight, }; static int ch_used[3]; int mipi_truly_device_register(struct msm_panel_info *pinfo, u32 channel, u32 panel) { struct platform_device *pdev = NULL; int ret; if ((channel >= 3) || ch_used[channel]) return -ENODEV; ch_used[channel] = TRUE; pdev = platform_device_alloc("mipi_truly", (panel << 8)|channel); if (!pdev) return -ENOMEM; truly_panel_data.panel_info = *pinfo; ret = platform_device_add_data(pdev, &truly_panel_data, sizeof(truly_panel_data)); if (ret) { pr_err("%s: platform_device_add_data failed!\n", __func__); goto err_device_put; } ret = platform_device_add(pdev); if (ret) { pr_err("%s: platform_device_register failed!\n", __func__); goto err_device_put; } return 0; err_device_put: platform_device_put(pdev); return ret; } static int __init mipi_truly_lcd_init(void) { mipi_dsi_buf_alloc(&truly_tx_buf, DSI_BUF_SIZE); mipi_dsi_buf_alloc(&truly_rx_buf, DSI_BUF_SIZE); return platform_driver_register(&this_driver); } module_init(mipi_truly_lcd_init);
gpl-2.0
aduggan/rpi-linux
drivers/mfd/intel_soc_pmic_bxtwc.c
326
11870
/* * MFD core driver for Intel Broxton Whiskey Cove PMIC * * Copyright (C) 2015 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/acpi.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mfd/core.h> #include <linux/mfd/intel_bxtwc.h> #include <asm/intel_pmc_ipc.h> /* PMIC device registers */ #define REG_ADDR_MASK 0xFF00 #define REG_ADDR_SHIFT 8 #define REG_OFFSET_MASK 0xFF /* Interrupt Status Registers */ #define BXTWC_IRQLVL1 0x4E02 #define BXTWC_PWRBTNIRQ 0x4E03 #define BXTWC_THRM0IRQ 0x4E04 #define BXTWC_THRM1IRQ 0x4E05 #define BXTWC_THRM2IRQ 0x4E06 #define BXTWC_BCUIRQ 0x4E07 #define BXTWC_ADCIRQ 0x4E08 #define BXTWC_CHGR0IRQ 0x4E09 #define BXTWC_CHGR1IRQ 0x4E0A #define BXTWC_GPIOIRQ0 0x4E0B #define BXTWC_GPIOIRQ1 0x4E0C #define BXTWC_CRITIRQ 0x4E0D /* Interrupt MASK Registers */ #define BXTWC_MIRQLVL1 0x4E0E #define BXTWC_MPWRTNIRQ 0x4E0F #define BXTWC_MTHRM0IRQ 0x4E12 #define BXTWC_MTHRM1IRQ 0x4E13 #define BXTWC_MTHRM2IRQ 0x4E14 #define BXTWC_MBCUIRQ 0x4E15 #define BXTWC_MADCIRQ 0x4E16 #define BXTWC_MCHGR0IRQ 0x4E17 #define BXTWC_MCHGR1IRQ 0x4E18 #define BXTWC_MGPIO0IRQ 0x4E19 #define BXTWC_MGPIO1IRQ 0x4E1A #define BXTWC_MCRITIRQ 0x4E1B /* Whiskey Cove PMIC share same ACPI ID between different platforms */ #define BROXTON_PMIC_WC_HRV 4 /* Manage in two IRQ chips since mask registers are not consecutive */ enum bxtwc_irqs { /* Level 1 */ BXTWC_PWRBTN_LVL1_IRQ = 0, BXTWC_TMU_LVL1_IRQ, BXTWC_THRM_LVL1_IRQ, BXTWC_BCU_LVL1_IRQ, BXTWC_ADC_LVL1_IRQ, BXTWC_CHGR_LVL1_IRQ, BXTWC_GPIO_LVL1_IRQ, BXTWC_CRIT_LVL1_IRQ, /* Level 2 */ BXTWC_PWRBTN_IRQ, }; enum bxtwc_irqs_level2 { /* Level 2 */ BXTWC_THRM0_IRQ = 0, BXTWC_THRM1_IRQ, BXTWC_THRM2_IRQ, BXTWC_BCU_IRQ, BXTWC_ADC_IRQ, BXTWC_CHGR0_IRQ, BXTWC_CHGR1_IRQ, BXTWC_GPIO0_IRQ, BXTWC_GPIO1_IRQ, BXTWC_CRIT_IRQ, }; static const struct regmap_irq bxtwc_regmap_irqs[] = { REGMAP_IRQ_REG(BXTWC_PWRBTN_LVL1_IRQ, 0, BIT(0)), REGMAP_IRQ_REG(BXTWC_TMU_LVL1_IRQ, 0, BIT(1)), REGMAP_IRQ_REG(BXTWC_THRM_LVL1_IRQ, 0, BIT(2)), REGMAP_IRQ_REG(BXTWC_BCU_LVL1_IRQ, 0, BIT(3)), REGMAP_IRQ_REG(BXTWC_ADC_LVL1_IRQ, 0, BIT(4)), REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)), REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)), REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)), REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03), }; static const struct regmap_irq bxtwc_regmap_irqs_level2[] = { REGMAP_IRQ_REG(BXTWC_THRM0_IRQ, 0, 0xff), REGMAP_IRQ_REG(BXTWC_THRM1_IRQ, 1, 0xbf), REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 9, 0x03), }; static struct regmap_irq_chip bxtwc_regmap_irq_chip = { .name = "bxtwc_irq_chip", .status_base = BXTWC_IRQLVL1, .mask_base = BXTWC_MIRQLVL1, .irqs = bxtwc_regmap_irqs, .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs), .num_regs = 2, }; static struct regmap_irq_chip bxtwc_regmap_irq_chip_level2 = { .name = "bxtwc_irq_chip_level2", .status_base = BXTWC_THRM0IRQ, .mask_base = BXTWC_MTHRM0IRQ, .irqs = bxtwc_regmap_irqs_level2, .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_level2), .num_regs = 10, }; static struct resource gpio_resources[] = { DEFINE_RES_IRQ_NAMED(BXTWC_GPIO0_IRQ, "GPIO0"), DEFINE_RES_IRQ_NAMED(BXTWC_GPIO1_IRQ, "GPIO1"), }; static struct resource adc_resources[] = { DEFINE_RES_IRQ_NAMED(BXTWC_ADC_IRQ, "ADC"), }; static struct resource charger_resources[] = { DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "CHARGER"), DEFINE_RES_IRQ_NAMED(BXTWC_CHGR1_IRQ, "CHARGER1"), }; static struct resource thermal_resources[] = { DEFINE_RES_IRQ(BXTWC_THRM0_IRQ), DEFINE_RES_IRQ(BXTWC_THRM1_IRQ), DEFINE_RES_IRQ(BXTWC_THRM2_IRQ), }; static struct resource bcu_resources[] = { DEFINE_RES_IRQ_NAMED(BXTWC_BCU_IRQ, "BCU"), }; static struct mfd_cell bxt_wc_dev[] = { { .name = "bxt_wcove_gpadc", .num_resources = ARRAY_SIZE(adc_resources), .resources = adc_resources, }, { .name = "bxt_wcove_thermal", .num_resources = ARRAY_SIZE(thermal_resources), .resources = thermal_resources, }, { .name = "bxt_wcove_ext_charger", .num_resources = ARRAY_SIZE(charger_resources), .resources = charger_resources, }, { .name = "bxt_wcove_bcu", .num_resources = ARRAY_SIZE(bcu_resources), .resources = bcu_resources, }, { .name = "bxt_wcove_gpio", .num_resources = ARRAY_SIZE(gpio_resources), .resources = gpio_resources, }, { .name = "bxt_wcove_region", }, }; static int regmap_ipc_byte_reg_read(void *context, unsigned int reg, unsigned int *val) { int ret; int i2c_addr; u8 ipc_in[2]; u8 ipc_out[4]; struct intel_soc_pmic *pmic = context; if (reg & REG_ADDR_MASK) i2c_addr = (reg & REG_ADDR_MASK) >> REG_ADDR_SHIFT; else { i2c_addr = BXTWC_DEVICE1_ADDR; if (!i2c_addr) { dev_err(pmic->dev, "I2C address not set\n"); return -EINVAL; } } reg &= REG_OFFSET_MASK; ipc_in[0] = reg; ipc_in[1] = i2c_addr; ret = intel_pmc_ipc_command(PMC_IPC_PMIC_ACCESS, PMC_IPC_PMIC_ACCESS_READ, ipc_in, sizeof(ipc_in), (u32 *)ipc_out, 1); if (ret) { dev_err(pmic->dev, "Failed to read from PMIC\n"); return ret; } *val = ipc_out[0]; return 0; } static int regmap_ipc_byte_reg_write(void *context, unsigned int reg, unsigned int val) { int ret; int i2c_addr; u8 ipc_in[3]; struct intel_soc_pmic *pmic = context; if (reg & REG_ADDR_MASK) i2c_addr = (reg & REG_ADDR_MASK) >> REG_ADDR_SHIFT; else { i2c_addr = BXTWC_DEVICE1_ADDR; if (!i2c_addr) { dev_err(pmic->dev, "I2C address not set\n"); return -EINVAL; } } reg &= REG_OFFSET_MASK; ipc_in[0] = reg; ipc_in[1] = i2c_addr; ipc_in[2] = val; ret = intel_pmc_ipc_command(PMC_IPC_PMIC_ACCESS, PMC_IPC_PMIC_ACCESS_WRITE, ipc_in, sizeof(ipc_in), NULL, 0); if (ret) { dev_err(pmic->dev, "Failed to write to PMIC\n"); return ret; } return 0; } /* sysfs interfaces to r/w PMIC registers, required by initial script */ static unsigned long bxtwc_reg_addr; static ssize_t bxtwc_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "0x%lx\n", bxtwc_reg_addr); } static ssize_t bxtwc_reg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { if (kstrtoul(buf, 0, &bxtwc_reg_addr)) { dev_err(dev, "Invalid register address\n"); return -EINVAL; } return (ssize_t)count; } static ssize_t bxtwc_val_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; unsigned int val; struct intel_soc_pmic *pmic = dev_get_drvdata(dev); ret = regmap_read(pmic->regmap, bxtwc_reg_addr, &val); if (ret < 0) { dev_err(dev, "Failed to read 0x%lx\n", bxtwc_reg_addr); return -EIO; } return sprintf(buf, "0x%02x\n", val); } static ssize_t bxtwc_val_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; unsigned int val; struct intel_soc_pmic *pmic = dev_get_drvdata(dev); ret = kstrtouint(buf, 0, &val); if (ret) return ret; ret = regmap_write(pmic->regmap, bxtwc_reg_addr, val); if (ret) { dev_err(dev, "Failed to write value 0x%02x to address 0x%lx", val, bxtwc_reg_addr); return -EIO; } return count; } static DEVICE_ATTR(addr, S_IWUSR | S_IRUSR, bxtwc_reg_show, bxtwc_reg_store); static DEVICE_ATTR(val, S_IWUSR | S_IRUSR, bxtwc_val_show, bxtwc_val_store); static struct attribute *bxtwc_attrs[] = { &dev_attr_addr.attr, &dev_attr_val.attr, NULL }; static const struct attribute_group bxtwc_group = { .attrs = bxtwc_attrs, }; static const struct regmap_config bxtwc_regmap_config = { .reg_bits = 16, .val_bits = 8, .reg_write = regmap_ipc_byte_reg_write, .reg_read = regmap_ipc_byte_reg_read, }; static int bxtwc_probe(struct platform_device *pdev) { int ret; acpi_handle handle; acpi_status status; unsigned long long hrv; struct intel_soc_pmic *pmic; handle = ACPI_HANDLE(&pdev->dev); status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv); if (ACPI_FAILURE(status)) { dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n"); return -ENODEV; } if (hrv != BROXTON_PMIC_WC_HRV) { dev_err(&pdev->dev, "Invalid PMIC hardware revision: %llu\n", hrv); return -ENODEV; } pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); if (!pmic) return -ENOMEM; ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(&pdev->dev, "Invalid IRQ\n"); return ret; } pmic->irq = ret; dev_set_drvdata(&pdev->dev, pmic); pmic->dev = &pdev->dev; pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic, &bxtwc_regmap_config); if (IS_ERR(pmic->regmap)) { ret = PTR_ERR(pmic->regmap); dev_err(&pdev->dev, "Failed to initialise regmap: %d\n", ret); return ret; } ret = regmap_add_irq_chip(pmic->regmap, pmic->irq, IRQF_ONESHOT | IRQF_SHARED, 0, &bxtwc_regmap_irq_chip, &pmic->irq_chip_data); if (ret) { dev_err(&pdev->dev, "Failed to add IRQ chip\n"); return ret; } ret = regmap_add_irq_chip(pmic->regmap, pmic->irq, IRQF_ONESHOT | IRQF_SHARED, 0, &bxtwc_regmap_irq_chip_level2, &pmic->irq_chip_data_level2); if (ret) { dev_err(&pdev->dev, "Failed to add secondary IRQ chip\n"); goto err_irq_chip_level2; } ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL); if (ret) { dev_err(&pdev->dev, "Failed to add devices\n"); goto err_mfd; } ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group); if (ret) { dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret); goto err_sysfs; } return 0; err_sysfs: mfd_remove_devices(&pdev->dev); err_mfd: regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2); err_irq_chip_level2: regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); return ret; } static int bxtwc_remove(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev); sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group); mfd_remove_devices(&pdev->dev); regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2); return 0; } static void bxtwc_shutdown(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev); disable_irq(pmic->irq); } #ifdef CONFIG_PM_SLEEP static int bxtwc_suspend(struct device *dev) { struct intel_soc_pmic *pmic = dev_get_drvdata(dev); disable_irq(pmic->irq); return 0; } static int bxtwc_resume(struct device *dev) { struct intel_soc_pmic *pmic = dev_get_drvdata(dev); enable_irq(pmic->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume); static const struct acpi_device_id bxtwc_acpi_ids[] = { { "INT34D3", }, { } }; MODULE_DEVICE_TABLE(acpi, pmic_acpi_ids); static struct platform_driver bxtwc_driver = { .probe = bxtwc_probe, .remove = bxtwc_remove, .shutdown = bxtwc_shutdown, .driver = { .name = "BXTWC PMIC", .pm = &bxtwc_pm_ops, .acpi_match_table = ACPI_PTR(bxtwc_acpi_ids), }, }; module_platform_driver(bxtwc_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Qipeng Zha<qipeng.zha@intel.com>");
gpl-2.0
RWTH-OS/linux
drivers/iio/common/st_sensors/st_sensors_buffer.c
326
2466
/* * STMicroelectronics sensors buffer library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> * * Licensed under the GPL-2. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include <linux/interrupt.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/irqreturn.h> #include <linux/iio/common/st_sensors.h> static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) { int i; struct st_sensor_data *sdata = iio_priv(indio_dev); unsigned int num_data_channels = sdata->num_data_channels; for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) { const struct iio_chan_spec *channel = &indio_dev->channels[i]; unsigned int bytes_to_read = DIV_ROUND_UP(channel->scan_type.realbits + channel->scan_type.shift, 8); unsigned int storage_bytes = channel->scan_type.storagebits >> 3; buf = PTR_ALIGN(buf, storage_bytes); if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, channel->address, bytes_to_read, buf, sdata->multiread_bit) < bytes_to_read) return -EIO; /* Advance the buffer pointer */ buf += storage_bytes; } return 0; } irqreturn_t st_sensors_trigger_handler(int irq, void *p) { int len; struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct st_sensor_data *sdata = iio_priv(indio_dev); s64 timestamp; /* * If we do timetamping here, do it before reading the values, because * once we've read the values, new interrupts can occur (when using * the hardware trigger) and the hw_timestamp may get updated. * By storing it in a local variable first, we are safe. */ if (iio_trigger_using_own(indio_dev)) timestamp = sdata->hw_timestamp; else timestamp = iio_get_time_ns(indio_dev); len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); if (len < 0) goto st_sensors_get_buffer_element_error; iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, timestamp); st_sensors_get_buffer_element_error: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } EXPORT_SYMBOL(st_sensors_trigger_handler); MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); MODULE_DESCRIPTION("STMicroelectronics ST-sensors buffer"); MODULE_LICENSE("GPL v2");
gpl-2.0
vadonka/lge-kernel-kang
net/sched/sch_tbf.c
582
10529
/* * net/sched/sch_tbf.c Token Bucket Filter queue. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs - * original idea by Martin Devera * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> /* Simple Token Bucket Filter. ======================================= SOURCE. ------- None. Description. ------------ A data flow obeys TBF with rate R and depth B, if for any time interval t_i...t_f the number of transmitted bits does not exceed B + R*(t_f-t_i). Packetized version of this definition: The sequence of packets of sizes s_i served at moments t_i obeys TBF, if for any i<=k: s_i+....+s_k <= B + R*(t_k - t_i) Algorithm. ---------- Let N(t_i) be B/R initially and N(t) grow continuously with time as: N(t+delta) = min{B/R, N(t) + delta} If the first packet in queue has length S, it may be transmitted only at the time t_* when S/R <= N(t_*), and in this case N(t) jumps: N(t_* + 0) = N(t_* - 0) - S/R. Actually, QoS requires two TBF to be applied to a data stream. One of them controls steady state burst size, another one with rate P (peak rate) and depth M (equal to link MTU) limits bursts at a smaller time scale. It is easy to see that P>R, and B>M. If P is infinity, this double TBF is equivalent to a single one. When TBF works in reshaping mode, latency is estimated as: lat = max ((L-B)/R, (L-M)/P) NOTES. ------ If TBF throttles, it starts a watchdog timer, which will wake it up when it is ready to transmit. Note that the minimal timer resolution is 1/HZ. If no new packets arrive during this period, or if the device is not awaken by EOI for some previous packet, TBF can stop its activity for 1/HZ. This means, that with depth B, the maximal rate is R_crit = B*HZ F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes. Note that the peak rate TBF is much more tough: with MTU 1500 P_crit = 150Kbytes/sec. So, if you need greater peak rates, use alpha with HZ=1000 :-) With classful TBF, limit is just kept for backwards compatibility. It is passed to the default bfifo qdisc - if the inner qdisc is changed the limit is not effective anymore. */ struct tbf_sched_data { /* Parameters */ u32 limit; /* Maximal length of backlog: bytes */ u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ u32 mtu; u32 max_size; struct qdisc_rate_table *R_tab; struct qdisc_rate_table *P_tab; /* Variables */ long tokens; /* Current number of B tokens */ long ptokens; /* Current number of P tokens */ psched_time_t t_c; /* Time check-point */ struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ struct qdisc_watchdog watchdog; /* Watchdog timer */ }; #define L2T(q,L) qdisc_l2t((q)->R_tab,L) #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); int ret; if (qdisc_pkt_len(skb) > q->max_size) return qdisc_reshape_fail(skb, sch); ret = qdisc_enqueue(skb, q->qdisc); if (ret != 0) { if (net_xmit_drop_count(ret)) sch->qstats.drops++; return ret; } sch->q.qlen++; sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; return 0; } static unsigned int tbf_drop(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); unsigned int len = 0; if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; sch->qstats.drops++; } return len; } static struct sk_buff *tbf_dequeue(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; skb = q->qdisc->ops->peek(q->qdisc); if (skb) { psched_time_t now; long toks; long ptoks = 0; unsigned int len = qdisc_pkt_len(skb); now = psched_get_time(); toks = psched_tdiff_bounded(now, q->t_c, q->buffer); if (q->P_tab) { ptoks = toks + q->ptokens; if (ptoks > (long)q->mtu) ptoks = q->mtu; ptoks -= L2T_P(q, len); } toks += q->tokens; if (toks > (long)q->buffer) toks = q->buffer; toks -= L2T(q, len); if ((toks|ptoks) >= 0) { skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) return NULL; q->t_c = now; q->tokens = toks; q->ptokens = ptoks; sch->q.qlen--; sch->flags &= ~TCQ_F_THROTTLED; return skb; } qdisc_watchdog_schedule(&q->watchdog, now + max_t(long, -toks, -ptoks)); /* Maybe we have a shorter packet in the queue, which can be sent now. It sounds cool, but, however, this is wrong in principle. We MUST NOT reorder packets under these circumstances. Really, if we split the flow into independent subflows, it would be a very good solution. This is the main idea of all FQ algorithms (cf. CSZ, HPFQ, HFSC) */ sch->qstats.overlimits++; } return NULL; } static void tbf_reset(struct Qdisc* sch) { struct tbf_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); sch->q.qlen = 0; q->t_c = psched_get_time(); q->tokens = q->buffer; q->ptokens = q->mtu; qdisc_watchdog_cancel(&q->watchdog); } static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, }; static int tbf_change(struct Qdisc* sch, struct nlattr *opt) { int err; struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_TBF_PTAB + 1]; struct tc_tbf_qopt *qopt; struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; int max_size,n; err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); if (err < 0) return err; err = -EINVAL; if (tb[TCA_TBF_PARMS] == NULL) goto done; qopt = nla_data(tb[TCA_TBF_PARMS]); rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); if (rtab == NULL) goto done; if (qopt->peakrate.rate) { if (qopt->peakrate.rate > qopt->rate.rate) ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); if (ptab == NULL) goto done; } for (n = 0; n < 256; n++) if (rtab->data[n] > qopt->buffer) break; max_size = (n << qopt->rate.cell_log)-1; if (ptab) { int size; for (n = 0; n < 256; n++) if (ptab->data[n] > qopt->mtu) break; size = (n << qopt->peakrate.cell_log)-1; if (size < max_size) max_size = size; } if (max_size < 0) goto done; if (qopt->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); if (IS_ERR(child)) { err = PTR_ERR(child); goto done; } } sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(q->qdisc); q->qdisc = child; } q->limit = qopt->limit; q->mtu = qopt->mtu; q->max_size = max_size; q->buffer = qopt->buffer; q->tokens = q->buffer; q->ptokens = q->mtu; swap(q->R_tab, rtab); swap(q->P_tab, ptab); sch_tree_unlock(sch); err = 0; done: if (rtab) qdisc_put_rtab(rtab); if (ptab) qdisc_put_rtab(ptab); return err; } static int tbf_init(struct Qdisc* sch, struct nlattr *opt) { struct tbf_sched_data *q = qdisc_priv(sch); if (opt == NULL) return -EINVAL; q->t_c = psched_get_time(); qdisc_watchdog_init(&q->watchdog, sch); q->qdisc = &noop_qdisc; return tbf_change(sch, opt); } static void tbf_destroy(struct Qdisc *sch) { struct tbf_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); if (q->P_tab) qdisc_put_rtab(q->P_tab); if (q->R_tab) qdisc_put_rtab(q->R_tab); qdisc_destroy(q->qdisc); } static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *nest; struct tc_tbf_qopt opt; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; opt.limit = q->limit; opt.rate = q->R_tab->rate; if (q->P_tab) opt.peakrate = q->P_tab->rate; else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = q->mtu; opt.buffer = q->buffer; NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct tbf_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct tbf_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) { struct tbf_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long tbf_get(struct Qdisc *sch, u32 classid) { return 1; } static void tbf_put(struct Qdisc *sch, unsigned long arg) { } static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops tbf_class_ops = { .graft = tbf_graft, .leaf = tbf_leaf, .get = tbf_get, .put = tbf_put, .walk = tbf_walk, .dump = tbf_dump_class, }; static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &tbf_class_ops, .id = "tbf", .priv_size = sizeof(struct tbf_sched_data), .enqueue = tbf_enqueue, .dequeue = tbf_dequeue, .peek = qdisc_peek_dequeued, .drop = tbf_drop, .init = tbf_init, .reset = tbf_reset, .destroy = tbf_destroy, .change = tbf_change, .dump = tbf_dump, .owner = THIS_MODULE, }; static int __init tbf_module_init(void) { return register_qdisc(&tbf_qdisc_ops); } static void __exit tbf_module_exit(void) { unregister_qdisc(&tbf_qdisc_ops); } module_init(tbf_module_init) module_exit(tbf_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
getitnowmarketing/mecha_2.6.32
arch/x86/kernel/early_printk.c
582
6068
#include <linux/console.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/screen_info.h> #include <linux/usb/ch9.h> #include <linux/pci_regs.h> #include <linux/pci_ids.h> #include <linux/errno.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/fcntl.h> #include <asm/setup.h> #include <xen/hvc-console.h> #include <asm/pci-direct.h> #include <asm/fixmap.h> #include <asm/pgtable.h> #include <linux/usb/ehci_def.h> /* Simple VGA output */ #define VGABASE (__ISA_IO_base + 0xb8000) static int max_ypos = 25, max_xpos = 80; static int current_ypos = 25, current_xpos; static void early_vga_write(struct console *con, const char *str, unsigned n) { char c; int i, k, j; while ((c = *str++) != '\0' && n-- > 0) { if (current_ypos >= max_ypos) { /* scroll 1 line up */ for (k = 1, j = 0; k < max_ypos; k++, j++) { for (i = 0; i < max_xpos; i++) { writew(readw(VGABASE+2*(max_xpos*k+i)), VGABASE + 2*(max_xpos*j + i)); } } for (i = 0; i < max_xpos; i++) writew(0x720, VGABASE + 2*(max_xpos*j + i)); current_ypos = max_ypos-1; } if (c == '\n') { current_xpos = 0; current_ypos++; } else if (c != '\r') { writew(((0x7 << 8) | (unsigned short) c), VGABASE + 2*(max_xpos*current_ypos + current_xpos++)); if (current_xpos >= max_xpos) { current_xpos = 0; current_ypos++; } } } } static struct console early_vga_console = { .name = "earlyvga", .write = early_vga_write, .flags = CON_PRINTBUFFER, .index = -1, }; /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ static int early_serial_base = 0x3f8; /* ttyS0 */ #define XMTRDY 0x20 #define DLAB 0x80 #define TXR 0 /* Transmit register (WRITE) */ #define RXR 0 /* Receive register (READ) */ #define IER 1 /* Interrupt Enable */ #define IIR 2 /* Interrupt ID */ #define FCR 2 /* FIFO control */ #define LCR 3 /* Line control */ #define MCR 4 /* Modem control */ #define LSR 5 /* Line Status */ #define MSR 6 /* Modem Status */ #define DLL 0 /* Divisor Latch Low */ #define DLH 1 /* Divisor latch High */ static int early_serial_putc(unsigned char ch) { unsigned timeout = 0xffff; while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) cpu_relax(); outb(ch, early_serial_base + TXR); return timeout ? 0 : -1; } static void early_serial_write(struct console *con, const char *s, unsigned n) { while (*s && n-- > 0) { if (*s == '\n') early_serial_putc('\r'); early_serial_putc(*s); s++; } } #define DEFAULT_BAUD 9600 static __init void early_serial_init(char *s) { unsigned char c; unsigned divisor; unsigned baud = DEFAULT_BAUD; char *e; if (*s == ',') ++s; if (*s) { unsigned port; if (!strncmp(s, "0x", 2)) { early_serial_base = simple_strtoul(s, &e, 16); } else { static const int __initconst bases[] = { 0x3f8, 0x2f8 }; if (!strncmp(s, "ttyS", 4)) s += 4; port = simple_strtoul(s, &e, 10); if (port > 1 || s == e) port = 0; early_serial_base = bases[port]; } s += strcspn(s, ","); if (*s == ',') s++; } outb(0x3, early_serial_base + LCR); /* 8n1 */ outb(0, early_serial_base + IER); /* no interrupt */ outb(0, early_serial_base + FCR); /* no fifo */ outb(0x3, early_serial_base + MCR); /* DTR + RTS */ if (*s) { baud = simple_strtoul(s, &e, 0); if (baud == 0 || s == e) baud = DEFAULT_BAUD; } divisor = 115200 / baud; c = inb(early_serial_base + LCR); outb(c | DLAB, early_serial_base + LCR); outb(divisor & 0xff, early_serial_base + DLL); outb((divisor >> 8) & 0xff, early_serial_base + DLH); outb(c & ~DLAB, early_serial_base + LCR); } static struct console early_serial_console = { .name = "earlyser", .write = early_serial_write, .flags = CON_PRINTBUFFER, .index = -1, }; /* Direct interface for emergencies */ static struct console *early_console = &early_vga_console; static int __initdata early_console_initialized; asmlinkage void early_printk(const char *fmt, ...) { char buf[512]; int n; va_list ap; va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); early_console->write(early_console, buf, n); va_end(ap); } static inline void early_console_register(struct console *con, int keep_early) { if (early_console->index != -1) { printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", con->name); return; } early_console = con; if (keep_early) early_console->flags &= ~CON_BOOT; else early_console->flags |= CON_BOOT; register_console(early_console); } static int __init setup_early_printk(char *buf) { int keep; if (!buf) return 0; if (early_console_initialized) return 0; early_console_initialized = 1; keep = (strstr(buf, "keep") != NULL); while (*buf != '\0') { if (!strncmp(buf, "serial", 6)) { buf += 6; early_serial_init(buf); early_console_register(&early_serial_console, keep); if (!strncmp(buf, ",ttyS", 5)) buf += 5; } if (!strncmp(buf, "ttyS", 4)) { early_serial_init(buf + 4); early_console_register(&early_serial_console, keep); } if (!strncmp(buf, "vga", 3) && boot_params.screen_info.orig_video_isVGA == 1) { max_xpos = boot_params.screen_info.orig_video_cols; max_ypos = boot_params.screen_info.orig_video_lines; current_ypos = boot_params.screen_info.orig_y; early_console_register(&early_vga_console, keep); } #ifdef CONFIG_EARLY_PRINTK_DBGP if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4)) early_console_register(&early_dbgp_console, keep); #endif #ifdef CONFIG_HVC_XEN if (!strncmp(buf, "xen", 3)) early_console_register(&xenboot_console, keep); #endif buf++; } return 0; } early_param("earlyprintk", setup_early_printk);
gpl-2.0
InfinitiveOS-Devices/kernel_xiaomi_armani
drivers/media/platform/msm/camera_v2/sensor/hi256.c
838
41929
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_sensor.h" #include "msm_cci.h" #include "msm_camera_io_util.h" #define HI256_SENSOR_NAME "hi256" #define PLATFORM_DRIVER_NAME "msm_camera_hi256" #define CONFIG_MSMB_CAMERA_DEBUG #undef CDBG #ifdef CONFIG_MSMB_CAMERA_DEBUG #define CDBG(fmt, args...) pr_err(fmt, ##args) #else #define CDBG(fmt, args...) do { } while (0) #endif DEFINE_MSM_MUTEX(hi256_mut); static struct msm_sensor_ctrl_t hi256_s_ctrl; static struct msm_sensor_power_setting hi256_power_setting[] = { { .seq_type = SENSOR_GPIO, .seq_val = SENSOR_GPIO_STANDBY, .config_val = GPIO_OUT_LOW, .delay = 0, }, { .seq_type = SENSOR_GPIO, .seq_val = SENSOR_GPIO_STANDBY, .config_val = GPIO_OUT_HIGH, .delay = 0, }, { .seq_type = SENSOR_VREG, .seq_val = CAM_VIO, .config_val = 0, .delay = 0, }, { .seq_type = SENSOR_VREG, .seq_val = CAM_VANA, .config_val = 0, .delay = 0, }, { .seq_type = SENSOR_VREG, .seq_val = CAM_VDIG, .config_val = 0, .delay = 0, }, { .seq_type = SENSOR_CLK, .seq_val = SENSOR_CAM_MCLK, .config_val = 24000000, .delay = 5, }, { .seq_type = SENSOR_GPIO, .seq_val = SENSOR_GPIO_STANDBY, .config_val = GPIO_OUT_LOW, .delay = 0, }, { .seq_type = SENSOR_GPIO, .seq_val = SENSOR_GPIO_RESET, .config_val = GPIO_OUT_LOW, .delay = 10, }, { .seq_type = SENSOR_GPIO, .seq_val = SENSOR_GPIO_RESET, .config_val = GPIO_OUT_HIGH, .delay = 1, }, { .seq_type = SENSOR_I2C_MUX, .seq_val = 0, .config_val = 0, .delay = 0, }, }; static struct msm_camera_i2c_reg_conf hi256_uxga_settings[] = { {0x03, 0x00}, {0x01, 0xf1}, {0x03, 0x20}, {0x10, 0x1c}, {0x03, 0x22}, {0x10, 0x69}, {0x03, 0x00}, {0x12, 0x00}, {0x20, 0x00}, {0x21, 0x0a}, {0x22, 0x00}, {0x23, 0x0a}, {0x40, 0x01}, {0x41, 0x68}, {0x42, 0x00}, {0x43, 0x12}, {0x03, 0x10}, {0x3f, 0x00}, {0x03, 0x12}, {0x20, 0x0f}, {0x21, 0x0f}, {0x90, 0x5d}, {0x03, 0x13}, {0x80, 0xfd}, {0x03, 0x00}, {0x10, 0x00}, {0x03, 0x48}, {0x72, 0x81}, {0x30, 0x0c}, {0x31, 0x80}, {0x03, 0x20}, {0x10, 0x9c}, {0x03, 0x22}, {0x10, 0xe9}, }; static struct msm_camera_i2c_reg_conf hi256_start_settings[] = { {0x03, 0x00}, {0x01, 0xf0}, }; static struct msm_camera_i2c_reg_conf hi256_stop_settings[] = { {0x03, 0x00}, {0x01, 0xf1}, }; static struct msm_camera_i2c_reg_conf hi256_recommend_settings[] = { {0x01, 0xf1}, {0x01, 0xf3}, {0x01, 0xf1}, {0x08, 0x0f}, {0x0a, 0x00}, {0x03, 0x20}, {0x10, 0x1c}, {0x03, 0x22}, {0x10, 0x69}, {0x03, 0x00}, {0x10, 0x13}, {0x11, 0x90}, /* no H/V flip */ {0x12, 0x00}, {0x0b, 0xaa}, {0x0c, 0xaa}, {0x0d, 0xaa}, {0x20, 0x00}, {0x21, 0x06}, {0x22, 0x00}, {0x23, 0x05}, {0x24, 0x04}, {0x25, 0xb0}, {0x26, 0x06}, {0x27, 0x40}, {0x40, 0x01}, {0x41, 0x18}, {0x42, 0x00}, {0x43, 0x02}, {0x45, 0x04}, {0x46, 0x18}, {0x47, 0xd8}, {0x80, 0x2e}, {0x81, 0x7e}, {0x82, 0x90}, {0x83, 0x00}, {0x84, 0x0c}, {0x85, 0x00}, {0x90, 0x0a}, {0x91, 0x0a}, {0x92, 0x78}, {0x93, 0x70}, {0x94, 0xff}, {0x95, 0xff}, {0x96, 0xdc}, {0x97, 0xfe}, {0x98, 0x38}, {0xa0, 0x48}, {0xa2, 0x48}, {0xa4, 0x48}, {0xa6, 0x48}, {0xa8, 0x49}, {0xaa, 0x49}, {0xac, 0x49}, {0xae, 0x49}, {0x99, 0x43}, {0x9a, 0x43}, {0x9b, 0x43}, {0x9c, 0x43}, {0x03, 0x02}, {0x12, 0x03}, {0x13, 0x03}, {0x16, 0x00}, {0x17, 0x8C}, {0x18, 0x4c}, {0x19, 0x00}, {0x1a, 0x39}, {0x1c, 0x09}, {0x1d, 0x40}, {0x1e, 0x30}, {0x1f, 0x10}, {0x20, 0x77}, {0x21, 0xde}, {0x22, 0xa7}, {0x23, 0x30}, {0x27, 0x3c}, {0x2b, 0x80}, {0x2e, 0x11}, {0x2f, 0xa1}, {0x30, 0x05}, {0x50, 0x20}, {0x52, 0x01}, {0x53, 0xc1}, {0x55, 0x1c}, {0x56, 0x11}, {0x5d, 0xa2}, {0x5e, 0x5a}, {0x60, 0x87}, {0x61, 0x99}, {0x62, 0x88}, {0x63, 0x97}, {0x64, 0x88}, {0x65, 0x97}, {0x67, 0x0c}, {0x68, 0x0c}, {0x69, 0x0c}, {0x72, 0x89}, {0x73, 0x96}, {0x74, 0x89}, {0x75, 0x96}, {0x76, 0x89}, {0x77, 0x96}, {0x7c, 0x85}, {0x7d, 0xaf}, {0x80, 0x01}, {0x81, 0x7f}, {0x82, 0x13}, {0x83, 0x24}, {0x84, 0x7d}, {0x85, 0x81}, {0x86, 0x7d}, {0x87, 0x81}, {0x92, 0x48}, {0x93, 0x54}, {0x94, 0x7d}, {0x95, 0x81}, {0x96, 0x7d}, {0x97, 0x81}, {0xa0, 0x02}, {0xa1, 0x7b}, {0xa2, 0x02}, {0xa3, 0x7b}, {0xa4, 0x7b}, {0xa5, 0x02}, {0xa6, 0x7b}, {0xa7, 0x02}, {0xa8, 0x85}, {0xa9, 0x8c}, {0xaa, 0x85}, {0xab, 0x8c}, {0xac, 0x10}, {0xad, 0x16}, {0xae, 0x10}, {0xaf, 0x16}, {0xb0, 0x99}, {0xb1, 0xa3}, {0xb2, 0xa4}, {0xb3, 0xae}, {0xb4, 0x9b}, {0xb5, 0xa2}, {0xb6, 0xa6}, {0xb7, 0xac}, {0xb8, 0x9b}, {0xb9, 0x9f}, {0xba, 0xa6}, {0xbb, 0xaa}, {0xbc, 0x9b}, {0xbd, 0x9f}, {0xbe, 0xa6}, {0xbf, 0xaa}, {0xc4, 0x2c}, {0xc5, 0x43}, {0xc6, 0x63}, {0xc7, 0x79}, {0xc8, 0x2d}, {0xc9, 0x42}, {0xca, 0x2d}, {0xcb, 0x42}, {0xcc, 0x64}, {0xcd, 0x78}, {0xce, 0x64}, {0xcf, 0x78}, {0xd0, 0x0a}, {0xd1, 0x09}, {0xd4, 0x0a}, {0xd5, 0x0a}, {0xd6, 0x78}, {0xd7, 0x70}, {0xe0, 0xc4}, {0xe1, 0xc4}, {0xe2, 0xc4}, {0xe3, 0xc4}, {0xe4, 0x00}, {0xe8, 0x80}, {0xe9, 0x40}, {0xea, 0x7f}, {0xf0, 0x01}, {0xf1, 0x01}, {0xf2, 0x01}, {0xf3, 0x01}, {0xf4, 0x01}, {0x03, 0x03}, {0x10, 0x10}, {0x03, 0x10}, {0x10, 0x03}, {0x12, 0x30}, {0x13, 0x0a}, {0x20, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0x30}, {0x35, 0x00}, {0x36, 0x00}, {0x38, 0x00}, {0x3e, 0x58}, {0x3f, 0x00}, {0x40, 0x80}, {0x41, 0x00}, {0x48, 0x90}, {0x60, 0x67}, {0x61, 0x95}, {0x62, 0x95}, {0x63, 0x50}, {0x64, 0x41}, {0x66, 0x42}, {0x67, 0x20}, {0x6a, 0x80}, {0x6b, 0x80}, {0x6c, 0x80}, {0x6d, 0x80}, {0x03, 0x11}, {0x10, 0x7f}, {0x11, 0x40}, {0x12, 0x0a}, {0x13, 0xbb}, {0x26, 0x31}, {0x27, 0x34}, {0x28, 0x0f}, {0x29, 0x10}, {0x2b, 0x30}, {0x2c, 0x32}, {0x30, 0x70}, {0x31, 0x10}, {0x32, 0x58}, {0x33, 0x09}, {0x34, 0x06}, {0x35, 0x03}, {0x36, 0x70}, {0x37, 0x18}, {0x38, 0x58}, {0x39, 0x09}, {0x3a, 0x06}, {0x3b, 0x03}, {0x3c, 0x80}, {0x3d, 0x18}, {0x3e, 0x80}, {0x3f, 0x0D}, {0x40, 0x0A}, {0x41, 0x08}, {0x42, 0x80}, {0x43, 0x18}, {0x44, 0x80}, {0x45, 0x12}, {0x46, 0x10}, {0x47, 0x10}, {0x48, 0x90}, {0x49, 0x40}, {0x4a, 0x80}, {0x4b, 0x13}, {0x4c, 0x10}, {0x4d, 0x11}, {0x4e, 0x80}, {0x4f, 0x30}, {0x50, 0x80}, {0x51, 0x13}, {0x52, 0x10}, {0x53, 0x13}, {0x54, 0x11}, {0x55, 0x17}, {0x56, 0x20}, {0x57, 0x01}, {0x58, 0x00}, {0x59, 0x00}, {0x5a, 0x1f}, {0x5b, 0x3f}, {0x5c, 0x00}, {0x60, 0x3f}, {0x62, 0x10}, {0x70, 0x06}, {0x03, 0x12}, {0x20, 0x0f}, {0x21, 0x0f}, {0x25, 0x30}, {0x28, 0x00}, {0x29, 0x00}, {0x2a, 0x00}, {0x30, 0x30}, {0x31, 0x38}, {0x32, 0x42}, {0x33, 0x60}, {0x34, 0x70}, {0x35, 0x80}, {0x36, 0xa0}, {0x40, 0xa0}, {0x41, 0x40}, {0x42, 0xa0}, {0x43, 0x90}, {0x44, 0x90}, {0x45, 0x80}, {0x46, 0xb0}, {0x47, 0x55}, {0x48, 0xa0}, {0x49, 0x90}, {0x4a, 0x90}, {0x4b, 0x80}, {0x4c, 0xb0}, {0x4d, 0x40}, {0x4e, 0xb0}, {0x4f, 0xb0}, {0x50, 0xc0}, {0x51, 0x80}, {0x52, 0xb0}, {0x53, 0x60}, {0x54, 0xc0}, {0x55, 0xc0}, {0x56, 0xb0}, {0x57, 0x70}, {0x58, 0x90}, {0x59, 0x40}, {0x5a, 0xd0}, {0x5b, 0xd0}, {0x5c, 0xc0}, {0x5d, 0x70}, {0x5e, 0x88}, {0x5f, 0x40}, {0x60, 0xe0}, {0x61, 0xe0}, {0x62, 0xe0}, {0x63, 0x80}, {0x70, 0x15}, {0x71, 0x01}, {0x72, 0x18}, {0x73, 0x01}, {0x74, 0x25}, {0x75, 0x15}, {0x80, 0x20}, {0x81, 0x40}, {0x82, 0x65}, {0x85, 0x1a}, {0x88, 0x00}, {0x89, 0x00}, {0x90, 0x5d}, {0xD0, 0x0c}, {0xD1, 0x80}, {0xD2, 0x67}, {0xD3, 0x00}, {0xD4, 0x00}, {0xD5, 0x02}, {0xD6, 0xff}, {0xD7, 0x18}, {0x3b, 0x06}, {0x3c, 0x06}, {0xc5, 0x00}, {0xc6, 0x00}, {0x03, 0x13}, {0x10, 0xcb}, {0x11, 0x7b}, {0x12, 0x07}, {0x14, 0x00}, {0x20, 0x15}, {0x21, 0x13}, {0x22, 0x33}, {0x23, 0x05}, {0x24, 0x09}, {0x25, 0x0a}, {0x26, 0x18}, {0x27, 0x30}, {0x29, 0x12}, {0x2a, 0x50}, {0x2b, 0x02}, {0x2c, 0x02}, {0x25, 0x06}, {0x2d, 0x0c}, {0x2e, 0x12}, {0x2f, 0x12}, {0x50, 0x10}, {0x51, 0x14}, {0x52, 0x12}, {0x53, 0x0c}, {0x54, 0x0f}, {0x55, 0x0c}, {0x56, 0x10}, {0x57, 0x13}, {0x58, 0x12}, {0x59, 0x0c}, {0x5a, 0x0f}, {0x5b, 0x0c}, {0x5c, 0x25}, {0x5d, 0x25}, {0x5e, 0x25}, {0x5f, 0x25}, {0x60, 0x25}, {0x61, 0x25}, {0x62, 0x25}, {0x63, 0x25}, {0x64, 0x25}, {0x65, 0x25}, {0x66, 0x25}, {0x67, 0x25}, {0x68, 0x07}, {0x69, 0x07}, {0x6a, 0x07}, {0x6b, 0x05}, {0x6c, 0x05}, {0x6d, 0x05}, {0x6e, 0x07}, {0x6f, 0x07}, {0x70, 0x07}, {0x71, 0x05}, {0x72, 0x05}, {0x73, 0x05}, {0x80, 0x01}, {0x81, 0x1f}, {0x82, 0x05}, {0x83, 0x31}, {0x90, 0x05}, {0x91, 0x05}, {0x92, 0x33}, {0x93, 0x30}, {0x94, 0x03}, {0x95, 0x14}, {0x97, 0x20}, {0x99, 0x20}, {0xa0, 0x01}, {0xa1, 0x02}, {0xa2, 0x01}, {0xa3, 0x02}, {0xa4, 0x05}, {0xa5, 0x05}, {0xa6, 0x07}, {0xa7, 0x08}, {0xa8, 0x07}, {0xa9, 0x08}, {0xaa, 0x07}, {0xab, 0x08}, {0xb0, 0x22}, {0xb1, 0x2a}, {0xb2, 0x28}, {0xb3, 0x22}, {0xb4, 0x2a}, {0xb5, 0x28}, {0xb6, 0x22}, {0xb7, 0x2a}, {0xb8, 0x28}, {0xb9, 0x22}, {0xba, 0x2a}, {0xbb, 0x28}, {0xbc, 0x25}, {0xbd, 0x2a}, {0xbe, 0x27}, {0xbf, 0x25}, {0xc0, 0x2a}, {0xc1, 0x27}, {0xc2, 0x1e}, {0xc3, 0x24}, {0xc4, 0x20}, {0xc5, 0x1e}, {0xc6, 0x24}, {0xc7, 0x20}, {0xc8, 0x18}, {0xc9, 0x20}, {0xca, 0x1e}, {0xcb, 0x18}, {0xcc, 0x20}, {0xcd, 0x1e}, {0xce, 0x18}, {0xcf, 0x20}, {0xd0, 0x1e}, {0xd1, 0x18}, {0xd2, 0x20}, {0xd3, 0x1e}, {0x03, 0x14}, {0x10, 0x11}, {0x14, 0x80}, {0x15, 0x80}, {0x16, 0x80}, {0x17, 0x80}, {0x18, 0x80}, {0x19, 0x80}, {0x20, 0x80}, {0x21, 0x95}, {0x22, 0xdc}, {0x23, 0xcb}, {0x24, 0xcf}, {0x30, 0xc8}, {0x31, 0x2b}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0x90}, {0x40, 0x54}, {0x50, 0x4b}, {0x60, 0x42}, {0x70, 0x4b}, {0x03, 0x15}, {0x10, 0x0f}, {0x14, 0x46}, {0x15, 0x36}, {0x16, 0x26}, {0x17, 0x2f}, {0x30, 0x8f}, {0x31, 0x59}, {0x32, 0x0a}, {0x33, 0x15}, {0x34, 0x5b}, {0x35, 0x06}, {0x36, 0x07}, {0x37, 0x40}, {0x38, 0x87}, {0x40, 0x94}, {0x41, 0x20}, {0x42, 0x89}, {0x43, 0x84}, {0x44, 0x03}, {0x45, 0x01}, {0x46, 0x88}, {0x47, 0x9c}, {0x48, 0x28}, {0x50, 0x02}, {0x51, 0x82}, {0x52, 0x00}, {0x53, 0x07}, {0x54, 0x11}, {0x55, 0x98}, {0x56, 0x00}, {0x57, 0x0b}, {0x58, 0x8b}, {0x80, 0x03}, {0x85, 0x40}, {0x87, 0x02}, {0x88, 0x00}, {0x89, 0x00}, {0x8a, 0x00}, {0x03, 0x16}, {0x10, 0x31}, {0x18, 0x5e}, {0x19, 0x5d}, {0x1a, 0x0e}, {0x1b, 0x01}, {0x1c, 0xdc}, {0x1d, 0xfe}, {0x30, 0x00}, {0x31, 0x0a}, {0x32, 0x1f}, {0x33, 0x33}, {0x34, 0x53}, {0x35, 0x6c}, {0x36, 0x81}, {0x37, 0x94}, {0x38, 0xa4}, {0x39, 0xb3}, {0x3a, 0xc0}, {0x3b, 0xcb}, {0x3c, 0xd5}, {0x3d, 0xde}, {0x3e, 0xe6}, {0x3f, 0xee}, {0x40, 0xf5}, {0x41, 0xfc}, {0x42, 0xff}, {0x50, 0x00}, {0x51, 0x08}, {0x52, 0x1e}, {0x53, 0x36}, {0x54, 0x5a}, {0x55, 0x75}, {0x56, 0x8d}, {0x57, 0xa1}, {0x58, 0xb2}, {0x59, 0xbe}, {0x5a, 0xc9}, {0x5b, 0xd2}, {0x5c, 0xdb}, {0x5d, 0xe3}, {0x5e, 0xeb}, {0x5f, 0xf0}, {0x60, 0xf5}, {0x61, 0xf7}, {0x62, 0xf8}, {0x70, 0x00}, {0x71, 0x08}, {0x72, 0x17}, {0x73, 0x2f}, {0x74, 0x53}, {0x75, 0x6c}, {0x76, 0x81}, {0x77, 0x94}, {0x78, 0xa4}, {0x79, 0xb3}, {0x7a, 0xc0}, {0x7b, 0xcb}, {0x7c, 0xd5}, {0x7d, 0xde}, {0x7e, 0xe6}, {0x7f, 0xee}, {0x80, 0xf4}, {0x81, 0xfa}, {0x82, 0xff}, {0x03, 0x17}, {0x10, 0xf7}, {0xC4, 0x66}, {0xC5, 0x55}, {0x03, 0x20}, {0x11, 0x1c}, {0x1a, 0x08}, {0x20, 0x05}, {0x21, 0x30}, {0x22, 0x10}, {0x23, 0x00}, {0x24, 0x00}, {0x28, 0xe7}, {0x29, 0x0d}, {0x2a, 0xf0}, {0x2b, 0x34}, {0x30, 0x78}, {0x2c, 0xc2}, {0x2d, 0xff}, {0x2e, 0x33}, {0x30, 0xf8}, {0x32, 0x03}, {0x33, 0x2e}, {0x34, 0x30}, {0x35, 0xd4}, {0x36, 0xfe}, {0x37, 0x32}, {0x38, 0x04}, {0x39, 0x22}, {0x3a, 0xde}, {0x3b, 0x22}, {0x3c, 0xde}, {0x50, 0x45}, {0x51, 0x88}, {0x56, 0x03}, {0x57, 0xf7}, {0x58, 0x14}, {0x59, 0x88}, {0x5a, 0x04}, {0x60, 0x55}, {0x61, 0x55}, {0x62, 0x6A}, {0x63, 0xA9}, {0x64, 0x6A}, {0x65, 0xA9}, {0x66, 0x6B}, {0x67, 0xE9}, {0x68, 0x6B}, {0x69, 0xE9}, {0x6a, 0x6A}, {0x6b, 0xA9}, {0x6c, 0x6A}, {0x6d, 0xA9}, {0x6e, 0x55}, {0x6f, 0x55}, {0x70, 0x76}, {0x71, 0x82}, {0x76, 0x43}, {0x77, 0x04}, {0x78, 0x23}, {0x79, 0x46}, {0x7a, 0x23}, {0x7b, 0x22}, {0x7d, 0x23}, {0x83, 0x01}, {0x84, 0x5f}, {0x85, 0x90}, {0x86, 0x01}, {0x87, 0xe0}, {0x88, 0x04}, {0x89, 0x93}, {0x8a, 0xe0}, {0x8B, 0x75}, {0x8C, 0x30}, {0x8D, 0x61}, {0x8E, 0x80}, {0x9c, 0x16}, {0x9d, 0x80}, {0x9e, 0x01}, {0x9f, 0xe0}, {0xb0, 0x18}, {0xb1, 0x14}, {0xb2, 0x80}, {0xb3, 0x18}, {0xb4, 0x1a}, {0xb5, 0x44}, {0xb6, 0x2f}, {0xb7, 0x28}, {0xb8, 0x25}, {0xb9, 0x22}, {0xba, 0x21}, {0xbb, 0x20}, {0xbc, 0x32}, {0xbd, 0x30}, {0xc0, 0x10}, {0xc1, 0x2b}, {0xc2, 0x2b}, {0xc3, 0x2b}, {0xc4, 0x08}, {0xc8, 0x40}, {0xc9, 0x40}, {0x03, 0x22}, {0x10, 0xfd}, {0x11, 0x2e}, {0x19, 0x01}, {0x20, 0x10}, {0x21, 0x80}, {0x24, 0x01}, {0x30, 0x80}, {0x31, 0x80}, {0x38, 0x11}, {0x39, 0x34}, {0x40, 0xfa}, {0x41, 0x44}, {0x42, 0x33}, {0x43, 0xf6}, {0x44, 0x44}, {0x45, 0x33}, {0x46, 0x00}, {0x50, 0xb2}, {0x51, 0x81}, {0x52, 0x98}, {0x80, 0x38}, {0x81, 0x20}, {0x82, 0x38}, {0x83, 0x60}, {0x84, 0x0a}, {0x85, 0x60}, {0x86, 0x15}, {0x87, 0x49}, {0x88, 0x10}, {0x89, 0x50}, {0x8a, 0x20}, {0x8b, 0x41}, {0x8c, 0x39}, {0x8d, 0x34}, {0x8e, 0x28}, {0x8f, 0x52}, {0x90, 0x50}, {0x91, 0x4c}, {0x92, 0x49}, {0x93, 0x44}, {0x94, 0x3b}, {0x95, 0x37}, {0x96, 0x31}, {0x97, 0x28}, {0x98, 0x24}, {0x99, 0x20}, {0x9a, 0x20}, {0x9b, 0x88}, {0x9c, 0x88}, {0x9d, 0x48}, {0x9e, 0x38}, {0x9f, 0x30}, {0x8f, 0x53}, {0x90, 0x52}, {0x91, 0x51}, {0x92, 0x4e}, {0x93, 0x46}, {0x94, 0x3d}, {0x95, 0x34}, {0x96, 0x2e}, {0x97, 0x29}, {0x98, 0x22}, {0x99, 0x1c}, {0x9a, 0x18}, {0xa0, 0x60}, {0xa1, 0x34}, {0xa2, 0x6f}, {0xa3, 0xff}, {0xa4, 0x14}, {0xa5, 0x2c}, {0xa6, 0xcf}, {0xad, 0x40}, {0xae, 0x4a}, {0xaf, 0x28}, {0xb0, 0x26}, {0xb1, 0x00}, {0xb4, 0xea}, {0xb8, 0xa0}, {0xb9, 0x00}, {0x03, 0x48}, {0x70, 0x03}, {0x71, 0x30}, {0x72, 0x81}, {0x73, 0x10}, {0x70, 0x85}, {0x03, 0x48}, {0x03, 0x48}, {0x03, 0x48}, {0x03, 0x48}, {0x70, 0x95}, {0x10, 0x1c}, {0x11, 0x10}, {0x12, 0x00}, {0x14, 0x00}, {0x16, 0x04}, {0x18, 0x80}, {0x19, 0x00}, {0x1a, 0xa0}, {0x1b, 0x0d}, {0x1c, 0x01}, {0x1d, 0x0a}, {0x1e, 0x07}, {0x1f, 0x0b}, {0x23, 0x01}, {0x24, 0x1e}, {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x08}, {0x28, 0x00}, {0x30, 0x06}, {0x31, 0x40}, {0x32, 0x13}, {0x33, 0x0c}, {0x34, 0x04}, {0x35, 0x06}, {0x36, 0x01}, {0x37, 0x06}, {0x39, 0x4f}, {0x03, 0x20}, {0x10, 0x9c}, {0x03, 0x22}, {0x10, 0xe9}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x03, 0x00}, {0x0e, 0x03}, {0x0e, 0x73}, {0x03, 0x20}, {0x88, 0x01}, {0x89, 0x5f}, {0x8a, 0x90}, {0x03, 0x20}, {0x10, 0x9c}, {0x03, 0x22}, {0x10, 0xe9}, {0x03, 0x00}, {0x01, 0xf0}, }; static struct v4l2_subdev_info hi256_subdev_info[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 0, }, }; static struct msm_camera_i2c_reg_conf hi256_svga_settings[] = { {0x03, 0x20}, {0x10, 0x1c}, {0x03, 0x22}, {0x10, 0x69}, {0x03, 0x00}, {0x10, 0x13}, {0x12, 0x00}, {0x20, 0x00}, {0x21, 0x04}, {0x22, 0x00}, {0x23, 0x07}, {0x40, 0x01}, {0x41, 0x78}, {0x42, 0x00}, {0x43, 0x14}, {0x03, 0x10}, {0x3f, 0x02}, {0x03, 0x12}, {0x20, 0x0f}, {0x21, 0x0f}, {0x90, 0x5d}, {0x03, 0x13}, {0x80, 0x00}, {0x03, 0x48}, {0x72, 0x81}, {0x30, 0x06}, {0x31, 0x40}, {0x03, 0x20}, {0x10, 0x9c}, {0x03, 0x22}, {0x10, 0xe9}, }; static struct msm_camera_i2c_reg_conf hi256_sleep_settings[] = { {0x03, 0x00}, {0x01, 0xf1}, {0x03, 0x02}, {0x55, 0x10}, {0x01, 0xf1}, {0x01, 0xf3}, {0x01, 0xf1}, }; static struct msm_camera_i2c_reg_conf HI256_reg_saturation[11][3] = { { {0x03, 0x10}, {0x61, 0x1c}, {0x62, 0x1c}, }, { {0x03, 0x10}, {0x61, 0x30}, {0x62, 0x30}, }, { {0x03, 0x10}, {0x61, 0x44}, {0x62, 0x44}, }, { {0x03, 0x10}, {0x61, 0x58}, {0x62, 0x58}, }, { {0x03, 0x10}, {0x61, 0x6c}, {0x62, 0x6c}, }, { {0x03, 0x10}, {0x61, 0x95}, {0x62, 0x95}, }, { {0x03, 0x10}, {0x61, 0xa0}, {0x62, 0xa0}, }, { {0x03, 0x10}, {0x61, 0xa8}, {0x62, 0xa8}, }, { {0x03, 0x10}, {0x61, 0xbc}, {0x62, 0xbc}, }, { {0x03, 0x10}, {0x61, 0xd0}, {0x62, 0xd0}, }, { {0x03, 0x10}, {0x61, 0xe4}, {0x62, 0xe4}, }, }; static struct msm_camera_i2c_reg_conf HI256_reg_contrast[11][3] = { { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x1c}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x30}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x44}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x58}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x6c}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x90}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0x94}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0xa8}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0xbc}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0xd0}, }, { {0x03, 0x10}, {0x13, 0x02}, {0x48, 0xe4}, }, }; static struct msm_camera_i2c_reg_conf HI256_reg_sharpness[7][9] = { { {0x03, 0x13}, {0x20, 0x00}, {0x21, 0x00}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0x00}, {0x91, 0x00}, {0x94, 0x24}, {0x95, 0x65}, }, /* SHARPNESS LEVEL 0*/ { {0x03, 0x13}, {0x20, 0x04}, {0x21, 0x03}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0x08}, {0x91, 0x08}, {0x94, 0x24}, {0x95, 0x65}, }, /* SHARPNESS LEVEL 1*/ { {0x03, 0x13}, {0x20, 0x08}, {0x21, 0x07}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0x32}, {0x91, 0x32}, {0x94, 0x04}, {0x95, 0x0a}, }, /* SHARPNESS LEVEL 2*/ { {0x03, 0x13}, {0x20, 0x15}, {0x21, 0x15}, {0x23, 0x09}, {0x24, 0x11}, {0x90, 0x05}, {0x91, 0x05}, {0x94, 0x10}, {0x95, 0x5a}, }, /* SHARPNESS LEVEL 3*/ { {0x03, 0x13}, {0x20, 0x15}, {0x21, 0x15}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0xaf}, {0x91, 0xaf}, {0x94, 0x24}, {0x95, 0x65}, }, /* SHARPNESS LEVEL 4*/ { {0x03, 0x13}, {0x20, 0x20}, {0x21, 0x20}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0xdf}, {0x91, 0xdf}, {0x94, 0x24}, {0x95, 0x65}, }, /* SHARPNESS LEVEL 5*/ { {0x03, 0x13}, {0x20, 0x25}, {0x21, 0x25}, {0x23, 0x04}, {0x24, 0x80}, {0x90, 0xff}, {0x91, 0xff}, {0x94, 0x24}, {0x95, 0x65}, }, /* SHARPNESS LEVEL 6*/ }; static struct msm_camera_i2c_reg_conf HI256_reg_iso[7][3] = { /* auto */ { {0x03, 0x20}, {0x10, 0x9c}, {0xb0, 0x18}, }, /* auto hjt */ { {0x03, 0x20}, {0x10, 0x9c}, {0xb0, 0x18}, }, /* iso 100 */ { {0x03, 0x20}, {0x10, 0x0c}, {0xb0, 0x1B}, }, /* iso 200 */ { {0x03, 0x20}, {0x10, 0x0c}, {0xb0, 0x35}, }, /* iso 400 */ { {0x03, 0x20}, {0x10, 0x0c}, {0xb0, 0x65}, }, /* iso 800 */ { {0x03, 0x20}, {0x10, 0x0c}, {0xb0, 0x95}, }, /* iso 1600 */ { {0x03, 0x20}, {0x10, 0x0c}, {0xb0, 0xd0}, }, }; static struct msm_camera_i2c_reg_conf HI256_reg_exposure_compensation[5][2] = { /* -2 */ { {0x03, 0x10}, {0x40, 0x94}, }, /* -1 */ { {0x03, 0x10}, {0x40, 0x80}, }, /* 0 */ { {0x03, 0x10}, {0x40, 0x14}, }, /* 1 */ { {0x03, 0x10}, {0x40, 0x24}, }, /* 2 */ { {0x03, 0x10}, {0x40, 0x34}, }, }; static struct msm_camera_i2c_reg_conf HI256_reg_antibanding[][2] = { /* OFF */ { {0x03, 0x20}, {0x10, 0xcc}, }, /* 50Hz */ { {0x03, 0x20}, {0x10, 0x9c}, }, /* 60Hz */ { {0x03, 0x20}, {0x10, 0x8c}, }, /* AUTO */ { {0x03, 0x20}, {0x10, 0xcc}, }, }; static struct msm_camera_i2c_reg_conf HI256_reg_effect_normal[] = { /* normal: */ {0x03, 0x20}, {0x28, 0xe7}, {0x03, 0x10}, {0x11, 0x03}, {0x12, 0X30}, {0x13, 0x0a}, {0x44, 0x80}, {0x45, 0x80}, }; static struct msm_camera_i2c_reg_conf HI256_reg_effect_black_white[] = { /* B&W: */ {0x03, 0x20}, {0x28, 0xe7}, {0x03, 0x10}, {0x11, 0x03}, {0x12, 0x33}, {0x13, 0x02}, {0x44, 0x80}, {0x45, 0x80}, }; static struct msm_camera_i2c_reg_conf HI256_reg_effect_negative[] = { /* Negative: */ {0x03, 0x20}, {0x28, 0xe7}, {0x03, 0x10}, {0x11, 0x03}, {0x12, 0x08}, {0x13, 0x0a}, {0x14, 0x00}, }; static struct msm_camera_i2c_reg_conf HI256_reg_effect_old_movie[] = { /* Sepia(antique): */ {0x03, 0x20}, {0x28, 0xe7}, {0x03, 0x10}, {0x11, 0x03}, {0x12, 0x33}, {0x13, 0x0a}, {0x44, 0x25}, {0x45, 0xa6}, }; static struct msm_camera_i2c_reg_conf HI256_reg_effect_solarize[] = { {0x03, 0x20}, {0x28, 0xe7}, {0x03, 0x10}, {0x11, 0x0b}, {0x12, 0x00}, {0x13, 0x00}, {0x14, 0x00}, }; static struct msm_camera_i2c_reg_conf HI256_reg_scene_auto[] = { /* <SCENE_auto> */ {0x03, 0x20}, {0x10, 0x1c}, {0x18, 0x38}, {0x88, 0x01}, {0x89, 0x5f}, {0x8a, 0x90}, {0x10, 0x9c}, {0x18, 0x30}, }; static struct msm_camera_i2c_reg_conf HI256_reg_scene_portrait[] = { /* <CAMTUNING_SCENE_PORTRAIT> */ {0x03, 0x20}, {0x10, 0x1c}, {0x18, 0x38}, {0x88, 0x05}, {0x89, 0x7e}, {0x8a, 0x40}, {0x10, 0x9c}, {0x18, 0x30}, }; static struct msm_camera_i2c_reg_conf HI256_reg_scene_landscape[] = { /* <CAMTUNING_SCENE_LANDSCAPE> */ {0x03, 0x20}, {0x10, 0x1c}, {0x18, 0x38}, {0x88, 0x05}, {0x89, 0x7e}, {0x8a, 0x40}, {0x10, 0x9c}, {0x18, 0x30}, }; static struct msm_camera_i2c_reg_conf HI256_reg_scene_night[] = { /* <SCENE_NIGHT> */ {0x03, 0x20}, {0x10, 0x1c}, {0x18, 0x38}, {0x88, 0x09}, {0x89, 0x27}, {0x8a, 0xc0}, {0x10, 0x9c}, {0x18, 0x30}, }; static struct msm_camera_i2c_reg_conf HI256_reg_wb_auto[] = { /* Auto: */ {0x03, 0x22}, {0x11, 0x2e}, {0x83, 0x60}, {0x84, 0x0a}, {0x85, 0x60}, {0x86, 0x15}, {0x10, 0xfd}, }; static struct msm_camera_i2c_reg_conf HI256_reg_wb_sunny[] = { /* Sunny: */ {0x03, 0x22}, {0x11, 0x28}, {0x80, 0x33}, {0x82, 0x3d}, {0x83, 0x2e}, {0x84, 0x24}, {0x85, 0x43}, {0x86, 0x3d}, }; static struct msm_camera_i2c_reg_conf HI256_reg_wb_cloudy[] = { /* Cloudy: */ {0x03, 0x22}, {0x11, 0x28}, {0x80, 0x49}, {0x82, 0x24}, {0x83, 0x50}, {0x84, 0x45}, {0x85, 0x24}, {0x86, 0x1E}, }; static struct msm_camera_i2c_reg_conf HI256_reg_wb_office[] = { /* Office: */ {0x03, 0x22}, {0x11, 0x28}, {0x80, 0x20}, {0x82, 0x58}, {0x83, 0x27}, {0x84, 0x22}, {0x85, 0x58}, {0x86, 0x52}, }; static struct msm_camera_i2c_reg_conf HI256_reg_wb_home[] = { /* Home: */ {0x03, 0x22}, {0x11, 0x28}, {0x80, 0x29}, {0x82, 0x54}, {0x83, 0x2e}, {0x84, 0x23}, {0x85, 0x58}, {0x86, 0x4f}, }; static const struct i2c_device_id hi256_i2c_id[] = { {HI256_SENSOR_NAME, (kernel_ulong_t)&hi256_s_ctrl}, { } }; static int32_t msm_hi256_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { return msm_sensor_i2c_probe(client, id, &hi256_s_ctrl); } static struct i2c_driver hi256_i2c_driver = { .id_table = hi256_i2c_id, .probe = msm_hi256_i2c_probe, .driver = { .name = HI256_SENSOR_NAME, }, }; static struct msm_camera_i2c_client hi256_sensor_i2c_client = { .addr_type = MSM_CAMERA_I2C_BYTE_ADDR, }; static const struct of_device_id hi256_dt_match[] = { {.compatible = "shinetech,hi256", .data = &hi256_s_ctrl}, {} }; MODULE_DEVICE_TABLE(of, hi256_dt_match); static struct platform_driver hi256_platform_driver = { .driver = { .name = "shinetech,hi256", .owner = THIS_MODULE, .of_match_table = hi256_dt_match, }, }; static void hi256_i2c_write_table(struct msm_sensor_ctrl_t *s_ctrl, struct msm_camera_i2c_reg_conf *table, int num) { int i = 0; int rc = 0; for (i = 0; i < num; ++i) { rc = s_ctrl->sensor_i2c_client->i2c_func_tbl-> i2c_write( s_ctrl->sensor_i2c_client, table->reg_addr, table->reg_data, MSM_CAMERA_I2C_BYTE_DATA); if (rc < 0) { msleep(100); rc = s_ctrl->sensor_i2c_client->i2c_func_tbl-> i2c_write( s_ctrl->sensor_i2c_client, table->reg_addr, table->reg_data, MSM_CAMERA_I2C_BYTE_DATA); } table++; } } static int32_t hi256_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl) { hi256_i2c_write_table(s_ctrl, &hi256_sleep_settings[0], ARRAY_SIZE(hi256_sleep_settings)); return msm_sensor_power_down(s_ctrl); } static int32_t hi256_platform_probe(struct platform_device *pdev) { int32_t rc; const struct of_device_id *match; match = of_match_device(hi256_dt_match, &pdev->dev); rc = msm_sensor_platform_probe(pdev, match->data); return rc; } static int __init hi256_init_module(void) { int32_t rc; pr_info("%s:%d\n", __func__, __LINE__); rc = platform_driver_probe(&hi256_platform_driver, hi256_platform_probe); if (!rc) return rc; return i2c_add_driver(&hi256_i2c_driver); } static void __exit hi256_exit_module(void) { pr_info("%s:%d\n", __func__, __LINE__); if (hi256_s_ctrl.pdev) { msm_sensor_free_sensor_data(&hi256_s_ctrl); platform_driver_unregister(&hi256_platform_driver); } else i2c_del_driver(&hi256_i2c_driver); return; } static int32_t hi256_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl) { int32_t rc = 0; uint16_t chipid = 0; rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read( s_ctrl->sensor_i2c_client, s_ctrl->sensordata->slave_info->sensor_id_reg_addr, &chipid, MSM_CAMERA_I2C_BYTE_DATA); if (rc < 0) { pr_err("%s: %s: hi256 read id failed\n", __func__, s_ctrl->sensordata->sensor_name); return rc; } CDBG("%s: read id: %x expected id %x:\n", __func__, chipid, s_ctrl->sensordata->slave_info->sensor_id); if (chipid != s_ctrl->sensordata->slave_info->sensor_id) { pr_err("msm_sensor_match_id chip id doesnot match\n"); return -ENODEV; } return rc; } static void hi256_set_stauration(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); hi256_i2c_write_table(s_ctrl, &HI256_reg_saturation[value][0], ARRAY_SIZE(HI256_reg_saturation[value])); } static void hi256_set_contrast(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); hi256_i2c_write_table(s_ctrl, &HI256_reg_contrast[value][0], ARRAY_SIZE(HI256_reg_contrast[value])); } static void hi256_set_sharpness(struct msm_sensor_ctrl_t *s_ctrl, int value) { int val = value / 6; pr_debug("%s %d", __func__, value); hi256_i2c_write_table(s_ctrl, &HI256_reg_sharpness[val][0], ARRAY_SIZE(HI256_reg_sharpness[val])); } static void hi256_set_iso(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); hi256_i2c_write_table(s_ctrl, &HI256_reg_iso[value][0], ARRAY_SIZE(HI256_reg_iso[value])); } static void hi256_set_exposure_compensation(struct msm_sensor_ctrl_t *s_ctrl, int value) { int val = (value + 12) / 6; pr_debug("%s %d", __func__, val); hi256_i2c_write_table(s_ctrl, &HI256_reg_exposure_compensation[val][0], ARRAY_SIZE(HI256_reg_exposure_compensation[val])); } static void hi256_set_effect(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); switch (value) { case MSM_CAMERA_EFFECT_MODE_OFF: { hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_normal[0], ARRAY_SIZE(HI256_reg_effect_normal)); break; } case MSM_CAMERA_EFFECT_MODE_MONO: { hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_black_white[0], ARRAY_SIZE(HI256_reg_effect_black_white)); break; } case MSM_CAMERA_EFFECT_MODE_NEGATIVE: { hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_negative[0], ARRAY_SIZE(HI256_reg_effect_negative)); break; } case MSM_CAMERA_EFFECT_MODE_SEPIA: { hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_old_movie[0], ARRAY_SIZE(HI256_reg_effect_old_movie)); break; } case MSM_CAMERA_EFFECT_MODE_SOLARIZE: { hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_solarize[0], ARRAY_SIZE(HI256_reg_effect_solarize)); break; } default: hi256_i2c_write_table(s_ctrl, &HI256_reg_effect_normal[0], ARRAY_SIZE(HI256_reg_effect_normal)); } } static void hi256_set_antibanding(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); hi256_i2c_write_table(s_ctrl, &HI256_reg_antibanding[value][0], ARRAY_SIZE(HI256_reg_antibanding[value])); } static void hi256_set_scene_mode(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); switch (value) { case MSM_CAMERA_SCENE_MODE_OFF: { hi256_i2c_write_table(s_ctrl, &HI256_reg_scene_auto[0], ARRAY_SIZE(HI256_reg_scene_auto)); break; } case MSM_CAMERA_SCENE_MODE_NIGHT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_scene_night[0], ARRAY_SIZE(HI256_reg_scene_night)); break; } case MSM_CAMERA_SCENE_MODE_LANDSCAPE: { hi256_i2c_write_table(s_ctrl, &HI256_reg_scene_landscape[0], ARRAY_SIZE(HI256_reg_scene_landscape)); break; } case MSM_CAMERA_SCENE_MODE_PORTRAIT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_scene_portrait[0], ARRAY_SIZE(HI256_reg_scene_portrait)); break; } default: hi256_i2c_write_table(s_ctrl, &HI256_reg_scene_auto[0], ARRAY_SIZE(HI256_reg_scene_auto)); } } static void hi256_set_white_balance_mode(struct msm_sensor_ctrl_t *s_ctrl, int value) { pr_debug("%s %d", __func__, value); switch (value) { case MSM_CAMERA_WB_MODE_AUTO: { hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_auto[0], ARRAY_SIZE(HI256_reg_wb_auto)); break; } case MSM_CAMERA_WB_MODE_INCANDESCENT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_home[0], ARRAY_SIZE(HI256_reg_wb_home)); break; } case MSM_CAMERA_WB_MODE_DAYLIGHT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_sunny[0], ARRAY_SIZE(HI256_reg_wb_sunny)); break; } case MSM_CAMERA_WB_MODE_FLUORESCENT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_office[0], ARRAY_SIZE(HI256_reg_wb_office)); break; } case MSM_CAMERA_WB_MODE_CLOUDY_DAYLIGHT: { hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_cloudy[0], ARRAY_SIZE(HI256_reg_wb_cloudy)); break; } default: hi256_i2c_write_table(s_ctrl, &HI256_reg_wb_auto[0], ARRAY_SIZE(HI256_reg_wb_auto)); } } int32_t hi256_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp) { struct sensorb_cfg_data *cdata = (struct sensorb_cfg_data *)argp; long rc = 0; int32_t i = 0; mutex_lock(s_ctrl->msm_sensor_mutex); CDBG("%s:%d %s cfgtype = %d\n", __func__, __LINE__, s_ctrl->sensordata->sensor_name, cdata->cfgtype); switch (cdata->cfgtype) { case CFG_GET_SENSOR_INFO: memcpy(cdata->cfg.sensor_info.sensor_name, s_ctrl->sensordata->sensor_name, sizeof(cdata->cfg.sensor_info.sensor_name)); cdata->cfg.sensor_info.session_id = s_ctrl->sensordata->sensor_info->session_id; for (i = 0; i < SUB_MODULE_MAX; i++) cdata->cfg.sensor_info.subdev_id[i] = s_ctrl->sensordata->sensor_info->subdev_id[i]; CDBG("%s:%d sensor name %s\n", __func__, __LINE__, cdata->cfg.sensor_info.sensor_name); CDBG("%s:%d session id %d\n", __func__, __LINE__, cdata->cfg.sensor_info.session_id); for (i = 0; i < SUB_MODULE_MAX; i++) CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i, cdata->cfg.sensor_info.subdev_id[i]); break; case CFG_SET_INIT_SETTING: CDBG("init setting"); hi256_i2c_write_table(s_ctrl, &hi256_recommend_settings[0], ARRAY_SIZE(hi256_recommend_settings)); CDBG("init setting X"); break; case CFG_SET_RESOLUTION: { int val = 0; if (copy_from_user(&val, (void *)cdata->cfg.setting, sizeof(int))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } if (val == 0) hi256_i2c_write_table(s_ctrl, &hi256_uxga_settings[0], ARRAY_SIZE(hi256_uxga_settings)); else if (val == 1) hi256_i2c_write_table(s_ctrl, &hi256_svga_settings[0], ARRAY_SIZE(hi256_svga_settings)); break; } case CFG_SET_STOP_STREAM: hi256_i2c_write_table(s_ctrl, &hi256_stop_settings[0], ARRAY_SIZE(hi256_stop_settings)); break; case CFG_SET_START_STREAM: hi256_i2c_write_table(s_ctrl, &hi256_start_settings[0], ARRAY_SIZE(hi256_start_settings)); break; case CFG_GET_SENSOR_INIT_PARAMS: cdata->cfg.sensor_init_params = *s_ctrl->sensordata->sensor_init_params; CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__, __LINE__, cdata->cfg.sensor_init_params.modes_supported, cdata->cfg.sensor_init_params.position, cdata->cfg.sensor_init_params.sensor_mount_angle); break; case CFG_SET_SLAVE_INFO: { struct msm_camera_sensor_slave_info sensor_slave_info; struct msm_sensor_power_setting_array *power_setting_array; int slave_index = 0; if (copy_from_user(&sensor_slave_info, (void *)cdata->cfg.setting, sizeof(struct msm_camera_sensor_slave_info))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } /* Update sensor slave address */ if (sensor_slave_info.slave_addr) { s_ctrl->sensor_i2c_client->cci_client->sid = sensor_slave_info.slave_addr >> 1; } /* Update sensor address type */ s_ctrl->sensor_i2c_client->addr_type = sensor_slave_info.addr_type; /* Update power up / down sequence */ s_ctrl->power_setting_array = sensor_slave_info.power_setting_array; power_setting_array = &s_ctrl->power_setting_array; power_setting_array->power_setting = kzalloc( power_setting_array->size * sizeof(struct msm_sensor_power_setting), GFP_KERNEL); if (!power_setting_array->power_setting) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -ENOMEM; break; } if (copy_from_user(power_setting_array->power_setting, (void *) sensor_slave_info.power_setting_array.power_setting, power_setting_array->size * sizeof(struct msm_sensor_power_setting))) { kfree(power_setting_array->power_setting); pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } s_ctrl->free_power_setting = true; CDBG("%s sensor id %x\n", __func__, sensor_slave_info.slave_addr); CDBG("%s sensor addr type %d\n", __func__, sensor_slave_info.addr_type); CDBG("%s sensor reg %x\n", __func__, sensor_slave_info.sensor_id_info.sensor_id_reg_addr); CDBG("%s sensor id %x\n", __func__, sensor_slave_info.sensor_id_info.sensor_id); for (slave_index = 0; slave_index < power_setting_array->size; slave_index++) { CDBG("%s i %d power setting %d %d %ld %d\n", __func__, slave_index, power_setting_array->power_setting[slave_index]. seq_type, power_setting_array->power_setting[slave_index]. seq_val, power_setting_array->power_setting[slave_index]. config_val, power_setting_array->power_setting[slave_index]. delay); } kfree(power_setting_array->power_setting); break; } case CFG_WRITE_I2C_ARRAY: { struct msm_camera_i2c_reg_setting conf_array; struct msm_camera_i2c_reg_array *reg_setting = NULL; if (copy_from_user(&conf_array, (void *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } reg_setting = kzalloc(conf_array.size * (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL); if (!reg_setting) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -ENOMEM; break; } if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); kfree(reg_setting); rc = -EFAULT; break; } conf_array.reg_setting = reg_setting; rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table( s_ctrl->sensor_i2c_client, &conf_array); kfree(reg_setting); break; } case CFG_WRITE_I2C_SEQ_ARRAY: { struct msm_camera_i2c_seq_reg_setting conf_array; struct msm_camera_i2c_seq_reg_array *reg_setting = NULL; if (copy_from_user(&conf_array, (void *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_seq_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } reg_setting = kzalloc(conf_array.size * (sizeof(struct msm_camera_i2c_seq_reg_array)), GFP_KERNEL); if (!reg_setting) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -ENOMEM; break; } if (copy_from_user(reg_setting, (void *)conf_array.reg_setting, conf_array.size * sizeof(struct msm_camera_i2c_seq_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); kfree(reg_setting); rc = -EFAULT; break; } conf_array.reg_setting = reg_setting; rc = s_ctrl->sensor_i2c_client->i2c_func_tbl-> i2c_write_seq_table(s_ctrl->sensor_i2c_client, &conf_array); kfree(reg_setting); break; } case CFG_POWER_UP: if (s_ctrl->func_tbl->sensor_power_up) rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl); else rc = -EFAULT; break; case CFG_POWER_DOWN: if (s_ctrl->func_tbl->sensor_power_down) rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl); else rc = -EFAULT; break; case CFG_SET_STOP_STREAM_SETTING: { struct msm_camera_i2c_reg_setting *stop_setting = &s_ctrl->stop_setting; struct msm_camera_i2c_reg_array *reg_setting = NULL; if (copy_from_user(stop_setting, (void *)cdata->cfg.setting, sizeof(struct msm_camera_i2c_reg_setting))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } reg_setting = stop_setting->reg_setting; stop_setting->reg_setting = kzalloc(stop_setting->size * (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL); if (!stop_setting->reg_setting) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -ENOMEM; break; } if (copy_from_user(stop_setting->reg_setting, (void *)reg_setting, stop_setting->size * sizeof(struct msm_camera_i2c_reg_array))) { pr_err("%s:%d failed\n", __func__, __LINE__); kfree(stop_setting->reg_setting); stop_setting->reg_setting = NULL; stop_setting->size = 0; rc = -EFAULT; break; } break; } case CFG_SET_SATURATION: { int32_t sat_lev; if (copy_from_user(&sat_lev, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: Saturation Value is %d", __func__, sat_lev); hi256_set_stauration(s_ctrl, sat_lev); break; } case CFG_SET_CONTRAST: { int32_t con_lev; if (copy_from_user(&con_lev, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: Contrast Value is %d", __func__, con_lev); hi256_set_contrast(s_ctrl, con_lev); break; } case CFG_SET_SHARPNESS: { int32_t shp_lev; if (copy_from_user(&shp_lev, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: Sharpness Value is %d", __func__, shp_lev); hi256_set_sharpness(s_ctrl, shp_lev); break; } case CFG_SET_ISO: { int32_t iso_lev; if (copy_from_user(&iso_lev, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: ISO Value is %d", __func__, iso_lev); hi256_set_iso(s_ctrl, iso_lev); break; } case CFG_SET_EXPOSURE_COMPENSATION: { int32_t ec_lev; if (copy_from_user(&ec_lev, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: Exposure compensation Value is %d", __func__, ec_lev); hi256_set_exposure_compensation(s_ctrl, ec_lev); break; } case CFG_SET_EFFECT: { int32_t effect_mode; if (copy_from_user(&effect_mode, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: Effect mode is %d", __func__, effect_mode); hi256_set_effect(s_ctrl, effect_mode); break; } case CFG_SET_ANTIBANDING: { int32_t antibanding_mode; if (copy_from_user(&antibanding_mode, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: anti-banding mode is %d", __func__, antibanding_mode); hi256_set_antibanding(s_ctrl, antibanding_mode); break; } case CFG_SET_BESTSHOT_MODE: { int32_t bs_mode; if (copy_from_user(&bs_mode, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: best shot mode is %d", __func__, bs_mode); hi256_set_scene_mode(s_ctrl, bs_mode); break; } case CFG_SET_WHITE_BALANCE: { int32_t wb_mode; if (copy_from_user(&wb_mode, (void *)cdata->cfg.setting, sizeof(int32_t))) { pr_err("%s:%d failed\n", __func__, __LINE__); rc = -EFAULT; break; } pr_debug("%s: white balance is %d", __func__, wb_mode); hi256_set_white_balance_mode(s_ctrl, wb_mode); break; } default: rc = -EFAULT; break; } mutex_unlock(s_ctrl->msm_sensor_mutex); return rc; } static struct msm_sensor_fn_t hi256_sensor_func_tbl = { .sensor_config = hi256_sensor_config, .sensor_power_up = msm_sensor_power_up, .sensor_power_down = hi256_sensor_power_down, .sensor_match_id = hi256_sensor_match_id, }; static struct msm_sensor_ctrl_t hi256_s_ctrl = { .sensor_i2c_client = &hi256_sensor_i2c_client, .power_setting_array.power_setting = hi256_power_setting, .power_setting_array.size = ARRAY_SIZE(hi256_power_setting), .msm_sensor_mutex = &hi256_mut, .sensor_v4l2_subdev_info = hi256_subdev_info, .sensor_v4l2_subdev_info_size = ARRAY_SIZE(hi256_subdev_info), .func_tbl = &hi256_sensor_func_tbl, }; module_init(hi256_init_module); module_exit(hi256_exit_module); MODULE_DESCRIPTION("Hi256 2MP YUV sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
somcom3x/kernel_u8800pro
drivers/usb/misc/diag_bridge_test.c
1350
4395
/* * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/crc-ccitt.h> #include <mach/diag_bridge.h> #define DRIVER_DESC "USB host diag bridge driver test" #define DRIVER_VERSION "1.0" #define RD_BUF_SIZE 2048 #define DIAG_TEST_CONNECTED 0 struct diag_test_dev { char *read_buf; struct work_struct read_w; unsigned long flags; struct diag_bridge_ops ops; }; static struct diag_test_dev *__dev; static struct dentry *dent; static void diag_test_read_complete_cb(void *d, char *buf, size_t size, size_t actual) { if (actual < 0) { pr_err("%s: read complete err\n", __func__); return; } print_hex_dump(KERN_INFO, "to_host:", 0, 1, 1, buf, actual, false); } static void diag_test_read_work(struct work_struct *w) { struct diag_test_dev *dev = container_of(w, struct diag_test_dev, read_w); memset(dev->read_buf, 0, RD_BUF_SIZE); diag_bridge_read(dev->read_buf, RD_BUF_SIZE); } static void diag_test_write_complete_cb(void *d, char *buf, size_t size, size_t actual) { struct diag_test_dev *dev = d; if (actual > 0) schedule_work(&dev->read_w); } #if defined(CONFIG_DEBUG_FS) #define DEBUG_BUF_SIZE 1024 static ssize_t send_ping_cmd(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct diag_test_dev *dev = __dev; unsigned char *buf; int temp = sizeof(unsigned char) * 4; if (!dev) return -ENODEV; buf = kmalloc(temp, GFP_KERNEL); if (!buf) { pr_err("%s: unable to allocate mem for ping cmd\n", __func__); return -ENOMEM; } /* hdlc encoded ping command */ buf[0] = 0x0C; buf[1] = 0x14; buf[2] = 0x3A; buf[3] = 0x7E; diag_bridge_write(buf, temp); return count; } const struct file_operations diag_test_ping_ops = { .write = send_ping_cmd, }; static void diag_test_debug_init(void) { struct dentry *dfile; dent = debugfs_create_dir("diag_test", 0); if (IS_ERR(dent)) return; dfile = debugfs_create_file("send_ping", 0444, dent, 0, &diag_test_ping_ops); if (!dfile || IS_ERR(dfile)) debugfs_remove(dent); } #else static void diag_test_debug_init(void) { } #endif static int diag_test_remove(struct platform_device *pdev) { diag_bridge_close(); if (dent) { debugfs_remove_recursive(dent); dent = NULL; } return 0; } static int diag_test_probe(struct platform_device *pdev) { struct diag_test_dev *dev = __dev; int ret = 0; pr_info("%s:\n", __func__); ret = diag_bridge_open(&dev->ops); if (ret) pr_err("diag open failed: %d", ret); diag_test_debug_init(); return ret; } static struct platform_driver diag_test = { .remove = diag_test_remove, .probe = diag_test_probe, .driver = { .name = "diag_bridge", .owner = THIS_MODULE, }, }; static int __init diag_test_init(void) { struct diag_test_dev *dev; int ret = 0; pr_info("%s\n", __func__); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; __dev = dev; dev->ops.read_complete_cb = diag_test_read_complete_cb; dev->ops.write_complete_cb = diag_test_write_complete_cb; dev->read_buf = kmalloc(RD_BUF_SIZE, GFP_KERNEL); if (!dev->read_buf) { pr_err("%s: unable to allocate read buffer\n", __func__); kfree(dev); return -ENOMEM; } dev->ops.ctxt = dev; INIT_WORK(&dev->read_w, diag_test_read_work); ret = platform_driver_register(&diag_test); if (ret) pr_err("%s: platform driver %s register failed %d\n", __func__, diag_test.driver.name, ret); return ret; } static void __exit diag_test_exit(void) { struct diag_test_dev *dev = __dev; pr_info("%s:\n", __func__); if (test_bit(DIAG_TEST_CONNECTED, &dev->flags)) diag_bridge_close(); kfree(dev->read_buf); kfree(dev); } module_init(diag_test_init); module_exit(diag_test_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0
showp1984/bricked-pyramid-3.0
arch/arm/mach-ux500/platsmp.c
2118
4602
/* * Copyright (C) 2002 ARM Ltd. * Copyright (C) 2008 STMicroelctronics. * Copyright (C) 2009 ST-Ericsson. * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> * * This file is based on arm realview platform * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/hardware/gic.h> #include <asm/smp_scu.h> #include <mach/hardware.h> #include <mach/setup.h> /* This is called from headsmp.S to wakeup the secondary core */ extern void u8500_secondary_startup(void); /* * control for which core is the next to come out of the secondary * boot "holding pen" */ volatile int pen_release = -1; /* * Write pen_release in a way that is guaranteed to be visible to all * observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void write_pen_release(int val) { pen_release = val; smp_wmb(); __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } static void __iomem *scu_base_addr(void) { if (cpu_is_u5500()) return __io_address(U5500_SCU_BASE); else if (cpu_is_u8500()) return __io_address(U8500_SCU_BASE); else ux500_unknown_soc(); return NULL; } static DEFINE_SPINLOCK(boot_lock); void __cpuinit platform_secondary_init(unsigned int cpu) { /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ gic_secondary_init(0); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. */ write_pen_release(cpu); gic_raise_softirq(cpumask_of(cpu), 1); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { if (pen_release == -1) break; } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } static void __init wakeup_secondary(void) { void __iomem *backupram; if (cpu_is_u5500()) backupram = __io_address(U5500_BACKUPRAM0_BASE); else if (cpu_is_u8500()) backupram = __io_address(U8500_BACKUPRAM0_BASE); else ux500_unknown_soc(); /* * write the address of secondary startup into the backup ram register * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the * backup ram register at offset 0x1FF0, which is what boot rom code * is waiting for. This would wake up the secondary core from WFE */ #define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 __raw_writel(virt_to_phys(u8500_secondary_startup), backupram + UX500_CPU1_JUMPADDR_OFFSET); #define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 __raw_writel(0xA1FEED01, backupram + UX500_CPU1_WAKEMAGIC_OFFSET); /* make sure write buffer is drained */ mb(); } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "U8500: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); wakeup_secondary(); }
gpl-2.0
lirokoa/htc_pico_kernel
drivers/staging/speakup/synth.c
2374
10781
#include <linux/types.h> #include <linux/ctype.h> /* for isdigit() and friends */ #include <linux/fs.h> #include <linux/mm.h> /* for verify_area */ #include <linux/errno.h> /* for -EBUSY */ #include <linux/ioport.h> /* for check_region, request_region */ #include <linux/interrupt.h> #include <linux/delay.h> /* for loops_per_sec */ #include <linux/kmod.h> #include <linux/jiffies.h> #include <linux/uaccess.h> /* for copy_from_user */ #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "speakup.h" #include "serialio.h" #define MAXSYNTHS 16 /* Max number of synths in array. */ static struct spk_synth *synths[MAXSYNTHS]; struct spk_synth *synth; char pitch_buff[32] = ""; static int module_status; int quiet_boot; struct speakup_info_t speakup_info = { .spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock), .flushing = 0, }; EXPORT_SYMBOL_GPL(speakup_info); static int do_synth_init(struct spk_synth *in_synth); int serial_synth_probe(struct spk_synth *synth) { struct serial_state *ser; int failed = 0; if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) { ser = spk_serial_init(synth->ser); if (ser == NULL) { failed = -1; } else { outb_p(0, ser->port); mdelay(1); outb_p('\r', ser->port); } } else { failed = -1; pr_warn("ttyS%i is an invalid port\n", synth->ser); } if (failed) { pr_info("%s: not found\n", synth->long_name); return -ENODEV; } pr_info("%s: ttyS%i, Driver Version %s\n", synth->long_name, synth->ser, synth->version); synth->alive = 1; return 0; } EXPORT_SYMBOL_GPL(serial_synth_probe); /* Main loop of the progression thread: keep eating from the buffer * and push to the serial port, waiting as needed * * For devices that have a "full" notification mecanism, the driver can * adapt the loop the way they prefer. */ void spk_do_catch_up(struct spk_synth *synth) { u_char ch; unsigned long flags; unsigned long jiff_max; struct var_t *delay_time; struct var_t *full_time; struct var_t *jiffy_delta; int jiffy_delta_val; int delay_time_val; int full_time_val; jiffy_delta = get_var(JIFFY); full_time = get_var(FULL); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); full_time_val = full_time->u.n.value; spk_unlock(flags); if (ch == '\n') ch = synth->procspeech; if (!spk_serial_out(ch)) { schedule_timeout(msecs_to_jiffies(full_time_val)); continue; } if ((jiffies >= jiff_max) && (ch == SPACE)) { spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; full_time_val = full_time->u.n.value; spk_unlock(flags); if (spk_serial_out(synth->procspeech)) schedule_timeout( msecs_to_jiffies(delay_time_val)); else schedule_timeout( msecs_to_jiffies(full_time_val)); jiff_max = jiffies + jiffy_delta_val; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); } spk_serial_out(synth->procspeech); } EXPORT_SYMBOL_GPL(spk_do_catch_up); const char *spk_synth_immediate(struct spk_synth *synth, const char *buff) { u_char ch; while ((ch = *buff)) { if (ch == '\n') ch = synth->procspeech; if (wait_for_xmitr()) outb(ch, speakup_info.port_tts); else return buff; buff++; } return 0; } EXPORT_SYMBOL_GPL(spk_synth_immediate); void spk_synth_flush(struct spk_synth *synth) { spk_serial_out(synth->clear); } EXPORT_SYMBOL_GPL(spk_synth_flush); int spk_synth_is_alive_nop(struct spk_synth *synth) { synth->alive = 1; return 1; } EXPORT_SYMBOL_GPL(spk_synth_is_alive_nop); int spk_synth_is_alive_restart(struct spk_synth *synth) { if (synth->alive) return 1; if (!synth->alive && wait_for_xmitr() > 0) { /* restart */ synth->alive = 1; synth_printf("%s", synth->init); return 2; /* reenabled */ } pr_warn("%s: can't restart synth\n", synth->long_name); return 0; } EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart); static void thread_wake_up(u_long data) { wake_up_interruptible_all(&speakup_event); } static DEFINE_TIMER(thread_timer, thread_wake_up, 0, 0); void synth_start(void) { struct var_t *trigger_time; if (!synth->alive) { synth_buffer_clear(); return; } trigger_time = get_var(TRIGGER); if (!timer_pending(&thread_timer)) mod_timer(&thread_timer, jiffies + msecs_to_jiffies(trigger_time->u.n.value)); } void do_flush(void) { speakup_info.flushing = 1; synth_buffer_clear(); if (synth->alive) { if (pitch_shift) { synth_printf("%s", pitch_buff); pitch_shift = 0; } } wake_up_interruptible_all(&speakup_event); wake_up_process(speakup_task); } void synth_write(const char *buf, size_t count) { while (count--) synth_buffer_add(*buf++); synth_start(); } void synth_printf(const char *fmt, ...) { va_list args; unsigned char buf[160], *p; int r; va_start(args, fmt); r = vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (r > sizeof(buf) - 1) r = sizeof(buf) - 1; p = buf; while (r--) synth_buffer_add(*p++); synth_start(); } EXPORT_SYMBOL_GPL(synth_printf); static int index_count; static int sentence_count; void reset_index_count(int sc) { static int first = 1; if (first) first = 0; else synth->get_index(); index_count = 0; sentence_count = sc; } int synth_supports_indexing(void) { if (synth->get_index != NULL) return 1; return 0; } void synth_insert_next_index(int sent_num) { int out; if (synth->alive) { if (sent_num == 0) { synth->indexing.currindex++; index_count++; if (synth->indexing.currindex > synth->indexing.highindex) synth->indexing.currindex = synth->indexing.lowindex; } out = synth->indexing.currindex * 10 + sent_num; synth_printf(synth->indexing.command, out, out); } } void get_index_count(int *linecount, int *sentcount) { int ind = synth->get_index(); if (ind) { sentence_count = ind % 10; if ((ind / 10) <= synth->indexing.currindex) index_count = synth->indexing.currindex-(ind/10); else index_count = synth->indexing.currindex -synth->indexing.lowindex + synth->indexing.highindex-(ind/10)+1; } *sentcount = sentence_count; *linecount = index_count; } static struct resource synth_res; int synth_request_region(unsigned long start, unsigned long n) { struct resource *parent = &ioport_resource; memset(&synth_res, 0, sizeof(synth_res)); synth_res.name = synth->name; synth_res.start = start; synth_res.end = start + n - 1; synth_res.flags = IORESOURCE_BUSY; return request_resource(parent, &synth_res); } EXPORT_SYMBOL_GPL(synth_request_region); int synth_release_region(unsigned long start, unsigned long n) { return release_resource(&synth_res); } EXPORT_SYMBOL_GPL(synth_release_region); struct var_t synth_time_vars[] = { { DELAY, .u.n = {NULL, 100, 100, 2000, 0, 0, NULL } }, { TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } }, { JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } }, { FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } }, V_LAST_VAR }; /* called by: speakup_init() */ int synth_init(char *synth_name) { int i; int ret = 0; struct spk_synth *synth = NULL; if (synth_name == NULL) return 0; if (strcmp(synth_name, "none") == 0) { mutex_lock(&spk_mutex); synth_release(); mutex_unlock(&spk_mutex); return 0; } mutex_lock(&spk_mutex); /* First, check if we already have it loaded. */ for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) if (strcmp(synths[i]->name, synth_name) == 0) synth = synths[i]; /* If we got one, initialize it now. */ if (synth) ret = do_synth_init(synth); else ret = -ENODEV; mutex_unlock(&spk_mutex); return ret; } /* called by: synth_add() */ static int do_synth_init(struct spk_synth *in_synth) { struct var_t *var; synth_release(); if (in_synth->checkval != SYNTH_CHECK) return -EINVAL; synth = in_synth; synth->alive = 0; pr_warn("synth probe\n"); if (synth->probe(synth) < 0) { pr_warn("%s: device probe failed\n", in_synth->name); synth = NULL; return -ENODEV; } synth_time_vars[0].u.n.value = synth_time_vars[0].u.n.default_val = synth->delay; synth_time_vars[1].u.n.value = synth_time_vars[1].u.n.default_val = synth->trigger; synth_time_vars[2].u.n.value = synth_time_vars[2].u.n.default_val = synth->jiffies; synth_time_vars[3].u.n.value = synth_time_vars[3].u.n.default_val = synth->full; synth_printf("%s", synth->init); for (var = synth->vars; (var->var_id >= 0) && (var->var_id < MAXVARS); var++) speakup_register_var(var); if (!quiet_boot) synth_printf("%s found\n", synth->long_name); if (synth->attributes.name && sysfs_create_group(speakup_kobj, &(synth->attributes)) < 0) return -ENOMEM; synth_flags = synth->flags; wake_up_interruptible_all(&speakup_event); if (speakup_task) wake_up_process(speakup_task); return 0; } void synth_release(void) { struct var_t *var; unsigned long flags; if (synth == NULL) return; spk_lock(flags); pr_info("releasing synth %s\n", synth->name); synth->alive = 0; del_timer(&thread_timer); spk_unlock(flags); if (synth->attributes.name) sysfs_remove_group(speakup_kobj, &(synth->attributes)); for (var = synth->vars; var->var_id != MAXVARS; var++) speakup_unregister_var(var->var_id); stop_serial_interrupt(); synth->release(); synth = NULL; } /* called by: all_driver_init() */ int synth_add(struct spk_synth *in_synth) { int i; int status = 0; mutex_lock(&spk_mutex); for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) /* synth_remove() is responsible for rotating the array down */ if (in_synth == synths[i]) { mutex_unlock(&spk_mutex); return 0; } if (i == MAXSYNTHS) { pr_warn("Error: attempting to add a synth past end of array\n"); mutex_unlock(&spk_mutex); return -1; } synths[i++] = in_synth; synths[i] = NULL; if (in_synth->startup) status = do_synth_init(in_synth); mutex_unlock(&spk_mutex); return status; } EXPORT_SYMBOL_GPL(synth_add); void synth_remove(struct spk_synth *in_synth) { int i; mutex_lock(&spk_mutex); if (synth == in_synth) synth_release(); for (i = 0; synths[i] != NULL; i++) { if (in_synth == synths[i]) break; } for ( ; synths[i] != NULL; i++) /* compress table */ synths[i] = synths[i+1]; module_status = 0; mutex_unlock(&spk_mutex); } EXPORT_SYMBOL_GPL(synth_remove); short punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
gpl-2.0
houst0nn/android_kernel_lge_g3
drivers/net/wireless/ath/ath9k/eeprom.c
4934
14207
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) { if (fbin == AR5416_BCHAN_UNUSED) return fbin; return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); } void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) { REG_WRITE(ah, reg, val); if (ah->config.analog_shiftreg) udelay(100); } void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, u32 shift, u32 val) { u32 regVal; regVal = REG_READ(ah, reg) & ~mask; regVal |= (val << shift) & mask; REG_WRITE(ah, reg, regVal); if (ah->config.analog_shiftreg) udelay(100); } int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, int16_t targetLeft, int16_t targetRight) { int16_t rv; if (srcRight == srcLeft) { rv = targetLeft; } else { rv = (int16_t) (((target - srcLeft) * targetRight + (srcRight - target) * targetLeft) / (srcRight - srcLeft)); } return rv; } bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, u16 *indexL, u16 *indexR) { u16 i; if (target <= pList[0]) { *indexL = *indexR = 0; return true; } if (target >= pList[listSize - 1]) { *indexL = *indexR = (u16) (listSize - 1); return true; } for (i = 0; i < listSize - 1; i++) { if (pList[i] == target) { *indexL = *indexR = i; return true; } if (target < pList[i + 1]) { *indexL = i; *indexR = (u16) (i + 1); return false; } } return false; } void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, int eep_start_loc, int size) { int i = 0, j, addr; u32 addrdata[8]; u32 data[8]; for (addr = 0; addr < size; addr++) { addrdata[i] = AR5416_EEPROM_OFFSET + ((addr + eep_start_loc) << AR5416_EEPROM_S); i++; if (i == 8) { REG_READ_MULTI(ah, addrdata, data, i); for (j = 0; j < i; j++) { *eep_data = data[j]; eep_data++; } i = 0; } } if (i != 0) { REG_READ_MULTI(ah, addrdata, data, i); for (j = 0; j < i; j++) { *eep_data = data[j]; eep_data++; } } } bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) { return common->bus_ops->eeprom_read(common, off, data); } void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, u8 *pVpdList, u16 numIntercepts, u8 *pRetVpdList) { u16 i, k; u8 currPwr = pwrMin; u16 idxL = 0, idxR = 0; for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { ath9k_hw_get_lower_upper_index(currPwr, pPwrList, numIntercepts, &(idxL), &(idxR)); if (idxR < 1) idxR = 1; if (idxL == numIntercepts - 1) idxL = (u16) (numIntercepts - 2); if (pPwrList[idxL] == pPwrList[idxR]) k = pVpdList[idxL]; else k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] + (pPwrList[idxR] - currPwr) * pVpdList[idxL]) / (pPwrList[idxR] - pPwrList[idxL])); pRetVpdList[i] = (u8) k; currPwr += 2; } } void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_target_power_leg *powInfo, u16 numChannels, struct cal_target_power_leg *pNewPower, u16 numRates, bool isExtTarget) { struct chan_centers centers; u16 clo, chi; int i; int matchIndex = -1, lowIndex = -1; u16 freq; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = (isExtTarget) ? centers.ext_center : centers.ctl_center; if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = 0; } else { for (i = 0; (i < numChannels) && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = i; break; } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan)) && i > 0 && freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, IS_CHAN_2GHZ(chan))) { lowIndex = i - 1; break; } } if ((matchIndex == -1) && (lowIndex == -1)) matchIndex = i - 1; } if (matchIndex != -1) { *pNewPower = powInfo[matchIndex]; } else { clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, IS_CHAN_2GHZ(chan)); chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, IS_CHAN_2GHZ(chan)); for (i = 0; i < numRates; i++) { pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, clo, chi, powInfo[lowIndex].tPow2x[i], powInfo[lowIndex + 1].tPow2x[i]); } } } void ath9k_hw_get_target_powers(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_target_power_ht *powInfo, u16 numChannels, struct cal_target_power_ht *pNewPower, u16 numRates, bool isHt40Target) { struct chan_centers centers; u16 clo, chi; int i; int matchIndex = -1, lowIndex = -1; u16 freq; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = isHt40Target ? centers.synth_center : centers.ctl_center; if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = 0; } else { for (i = 0; (i < numChannels) && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = i; break; } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan)) && i > 0 && freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, IS_CHAN_2GHZ(chan))) { lowIndex = i - 1; break; } } if ((matchIndex == -1) && (lowIndex == -1)) matchIndex = i - 1; } if (matchIndex != -1) { *pNewPower = powInfo[matchIndex]; } else { clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, IS_CHAN_2GHZ(chan)); chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, IS_CHAN_2GHZ(chan)); for (i = 0; i < numRates; i++) { pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, clo, chi, powInfo[lowIndex].tPow2x[i], powInfo[lowIndex + 1].tPow2x[i]); } } } u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, bool is2GHz, int num_band_edges) { u16 twiceMaxEdgePower = MAX_RATE_POWER; int i; for (i = 0; (i < num_band_edges) && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl); break; } else if ((i > 0) && (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz))) { if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, is2GHz) < freq && CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) { twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl); } break; } } return twiceMaxEdgePower; } void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); switch (ar5416_get_ntxchains(ah->txchainmask)) { case 1: break; case 2: regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; break; case 3: regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); break; } } void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, struct ath9k_channel *chan, void *pRawDataSet, u8 *bChans, u16 availPiers, u16 tPdGainOverlap, u16 *pPdGainBoundaries, u8 *pPDADCValues, u16 numXpdGains) { int i, j, k; int16_t ss; u16 idxL = 0, idxR = 0, numPiers; static u8 vpdTableL[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; static u8 vpdTableR[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; static u8 vpdTableI[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; u8 minPwrT4[AR5416_NUM_PD_GAINS]; u8 maxPwrT4[AR5416_NUM_PD_GAINS]; int16_t vpdStep; int16_t tmpVal; u16 sizeCurrVpdTable, maxIndex, tgtIndex; bool match; int16_t minDelta = 0; struct chan_centers centers; int pdgain_boundary_default; struct cal_data_per_freq *data_def = pRawDataSet; struct cal_data_per_freq_4k *data_4k = pRawDataSet; struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet; bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah); int intercepts; if (AR_SREV_9287(ah)) intercepts = AR9287_PD_GAIN_ICEPTS; else intercepts = AR5416_PD_GAIN_ICEPTS; memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS); ath9k_hw_get_channel_centers(ah, chan, &centers); for (numPiers = 0; numPiers < availPiers; numPiers++) { if (bChans[numPiers] == AR5416_BCHAN_UNUSED) break; } match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), bChans, numPiers, &idxL, &idxR); if (match) { if (AR_SREV_9287(ah)) { /* FIXME: array overrun? */ for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_9287[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_9287[idxL].pwrPdg[i], data_9287[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } else if (eeprom_4k) { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_4k[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_4k[idxL].pwrPdg[i], data_4k[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } else { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_def[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_def[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_def[idxL].pwrPdg[i], data_def[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } } else { for (i = 0; i < numXpdGains; i++) { if (AR_SREV_9287(ah)) { pVpdL = data_9287[idxL].vpdPdg[i]; pPwrL = data_9287[idxL].pwrPdg[i]; pVpdR = data_9287[idxR].vpdPdg[i]; pPwrR = data_9287[idxR].pwrPdg[i]; } else if (eeprom_4k) { pVpdL = data_4k[idxL].vpdPdg[i]; pPwrL = data_4k[idxL].pwrPdg[i]; pVpdR = data_4k[idxR].vpdPdg[i]; pPwrR = data_4k[idxR].pwrPdg[i]; } else { pVpdL = data_def[idxL].vpdPdg[i]; pPwrL = data_def[idxL].pwrPdg[i]; pVpdR = data_def[idxR].vpdPdg[i]; pPwrR = data_def[idxR].pwrPdg[i]; } minPwrT4[i] = max(pPwrL[0], pPwrR[0]); maxPwrT4[i] = min(pPwrL[intercepts - 1], pPwrR[intercepts - 1]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], pPwrL, pVpdL, intercepts, vpdTableL[i]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], pPwrR, pVpdR, intercepts, vpdTableR[i]); for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { vpdTableI[i][j] = (u8)(ath9k_hw_interpolate((u16) FREQ2FBIN(centers. synth_center, IS_CHAN_2GHZ (chan)), bChans[idxL], bChans[idxR], vpdTableL[i][j], vpdTableR[i][j])); } } } k = 0; for (i = 0; i < numXpdGains; i++) { if (i == (numXpdGains - 1)) pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2); else pPdGainBoundaries[i] = (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4); pPdGainBoundaries[i] = min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]); minDelta = 0; if (i == 0) { if (AR_SREV_9280_20_OR_LATER(ah)) ss = (int16_t)(0 - (minPwrT4[i] / 2)); else ss = 0; } else { ss = (int16_t)((pPdGainBoundaries[i - 1] - (minPwrT4[i] / 2)) - tPdGainOverlap + 1 + minDelta); } vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); ss++; } sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap - (minPwrT4[i] / 2)); maxIndex = (tgtIndex < sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable; while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { pPDADCValues[k++] = vpdTableI[i][ss++]; } vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - vpdTableI[i][sizeCurrVpdTable - 2]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); if (tgtIndex >= maxIndex) { while ((ss <= tgtIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); pPDADCValues[k++] = (u8)((tmpVal > 255) ? 255 : tmpVal); ss++; } } } if (eeprom_4k) pdgain_boundary_default = 58; else pdgain_boundary_default = pPdGainBoundaries[i - 1]; while (i < AR5416_PD_GAINS_IN_MASK) { pPdGainBoundaries[i] = pdgain_boundary_default; i++; } while (k < AR5416_NUM_PDADC_VALUES) { pPDADCValues[k] = pPDADCValues[k - 1]; k++; } } int ath9k_hw_eeprom_init(struct ath_hw *ah) { int status; if (AR_SREV_9300_20_OR_LATER(ah)) ah->eep_ops = &eep_ar9300_ops; else if (AR_SREV_9287(ah)) { ah->eep_ops = &eep_ar9287_ops; } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { ah->eep_ops = &eep_4k_ops; } else { ah->eep_ops = &eep_def_ops; } if (!ah->eep_ops->fill_eeprom(ah)) return -EIO; status = ah->eep_ops->check_eeprom(ah); return status; }
gpl-2.0
Eliminater74/android_kernel_lge_g3-2
drivers/net/wireless/ath/ath9k/eeprom.c
4934
14207
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) { if (fbin == AR5416_BCHAN_UNUSED) return fbin; return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); } void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) { REG_WRITE(ah, reg, val); if (ah->config.analog_shiftreg) udelay(100); } void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, u32 shift, u32 val) { u32 regVal; regVal = REG_READ(ah, reg) & ~mask; regVal |= (val << shift) & mask; REG_WRITE(ah, reg, regVal); if (ah->config.analog_shiftreg) udelay(100); } int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, int16_t targetLeft, int16_t targetRight) { int16_t rv; if (srcRight == srcLeft) { rv = targetLeft; } else { rv = (int16_t) (((target - srcLeft) * targetRight + (srcRight - target) * targetLeft) / (srcRight - srcLeft)); } return rv; } bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, u16 *indexL, u16 *indexR) { u16 i; if (target <= pList[0]) { *indexL = *indexR = 0; return true; } if (target >= pList[listSize - 1]) { *indexL = *indexR = (u16) (listSize - 1); return true; } for (i = 0; i < listSize - 1; i++) { if (pList[i] == target) { *indexL = *indexR = i; return true; } if (target < pList[i + 1]) { *indexL = i; *indexR = (u16) (i + 1); return false; } } return false; } void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, int eep_start_loc, int size) { int i = 0, j, addr; u32 addrdata[8]; u32 data[8]; for (addr = 0; addr < size; addr++) { addrdata[i] = AR5416_EEPROM_OFFSET + ((addr + eep_start_loc) << AR5416_EEPROM_S); i++; if (i == 8) { REG_READ_MULTI(ah, addrdata, data, i); for (j = 0; j < i; j++) { *eep_data = data[j]; eep_data++; } i = 0; } } if (i != 0) { REG_READ_MULTI(ah, addrdata, data, i); for (j = 0; j < i; j++) { *eep_data = data[j]; eep_data++; } } } bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) { return common->bus_ops->eeprom_read(common, off, data); } void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, u8 *pVpdList, u16 numIntercepts, u8 *pRetVpdList) { u16 i, k; u8 currPwr = pwrMin; u16 idxL = 0, idxR = 0; for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { ath9k_hw_get_lower_upper_index(currPwr, pPwrList, numIntercepts, &(idxL), &(idxR)); if (idxR < 1) idxR = 1; if (idxL == numIntercepts - 1) idxL = (u16) (numIntercepts - 2); if (pPwrList[idxL] == pPwrList[idxR]) k = pVpdList[idxL]; else k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] + (pPwrList[idxR] - currPwr) * pVpdList[idxL]) / (pPwrList[idxR] - pPwrList[idxL])); pRetVpdList[i] = (u8) k; currPwr += 2; } } void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_target_power_leg *powInfo, u16 numChannels, struct cal_target_power_leg *pNewPower, u16 numRates, bool isExtTarget) { struct chan_centers centers; u16 clo, chi; int i; int matchIndex = -1, lowIndex = -1; u16 freq; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = (isExtTarget) ? centers.ext_center : centers.ctl_center; if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = 0; } else { for (i = 0; (i < numChannels) && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = i; break; } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan)) && i > 0 && freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, IS_CHAN_2GHZ(chan))) { lowIndex = i - 1; break; } } if ((matchIndex == -1) && (lowIndex == -1)) matchIndex = i - 1; } if (matchIndex != -1) { *pNewPower = powInfo[matchIndex]; } else { clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, IS_CHAN_2GHZ(chan)); chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, IS_CHAN_2GHZ(chan)); for (i = 0; i < numRates; i++) { pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, clo, chi, powInfo[lowIndex].tPow2x[i], powInfo[lowIndex + 1].tPow2x[i]); } } } void ath9k_hw_get_target_powers(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_target_power_ht *powInfo, u16 numChannels, struct cal_target_power_ht *pNewPower, u16 numRates, bool isHt40Target) { struct chan_centers centers; u16 clo, chi; int i; int matchIndex = -1, lowIndex = -1; u16 freq; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = isHt40Target ? centers.synth_center : centers.ctl_center; if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = 0; } else { for (i = 0; (i < numChannels) && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan))) { matchIndex = i; break; } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, IS_CHAN_2GHZ(chan)) && i > 0 && freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, IS_CHAN_2GHZ(chan))) { lowIndex = i - 1; break; } } if ((matchIndex == -1) && (lowIndex == -1)) matchIndex = i - 1; } if (matchIndex != -1) { *pNewPower = powInfo[matchIndex]; } else { clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, IS_CHAN_2GHZ(chan)); chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, IS_CHAN_2GHZ(chan)); for (i = 0; i < numRates; i++) { pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, clo, chi, powInfo[lowIndex].tPow2x[i], powInfo[lowIndex + 1].tPow2x[i]); } } } u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, bool is2GHz, int num_band_edges) { u16 twiceMaxEdgePower = MAX_RATE_POWER; int i; for (i = 0; (i < num_band_edges) && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl); break; } else if ((i > 0) && (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz))) { if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, is2GHz) < freq && CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) { twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl); } break; } } return twiceMaxEdgePower; } void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); switch (ar5416_get_ntxchains(ah->txchainmask)) { case 1: break; case 2: regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; break; case 3: regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; break; default: ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); break; } } void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, struct ath9k_channel *chan, void *pRawDataSet, u8 *bChans, u16 availPiers, u16 tPdGainOverlap, u16 *pPdGainBoundaries, u8 *pPDADCValues, u16 numXpdGains) { int i, j, k; int16_t ss; u16 idxL = 0, idxR = 0, numPiers; static u8 vpdTableL[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; static u8 vpdTableR[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; static u8 vpdTableI[AR5416_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; u8 minPwrT4[AR5416_NUM_PD_GAINS]; u8 maxPwrT4[AR5416_NUM_PD_GAINS]; int16_t vpdStep; int16_t tmpVal; u16 sizeCurrVpdTable, maxIndex, tgtIndex; bool match; int16_t minDelta = 0; struct chan_centers centers; int pdgain_boundary_default; struct cal_data_per_freq *data_def = pRawDataSet; struct cal_data_per_freq_4k *data_4k = pRawDataSet; struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet; bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah); int intercepts; if (AR_SREV_9287(ah)) intercepts = AR9287_PD_GAIN_ICEPTS; else intercepts = AR5416_PD_GAIN_ICEPTS; memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS); ath9k_hw_get_channel_centers(ah, chan, &centers); for (numPiers = 0; numPiers < availPiers; numPiers++) { if (bChans[numPiers] == AR5416_BCHAN_UNUSED) break; } match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), bChans, numPiers, &idxL, &idxR); if (match) { if (AR_SREV_9287(ah)) { /* FIXME: array overrun? */ for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_9287[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_9287[idxL].pwrPdg[i], data_9287[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } else if (eeprom_4k) { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_4k[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_4k[idxL].pwrPdg[i], data_4k[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } else { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_def[idxL].pwrPdg[i][0]; maxPwrT4[i] = data_def[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_def[idxL].pwrPdg[i], data_def[idxL].vpdPdg[i], intercepts, vpdTableI[i]); } } } else { for (i = 0; i < numXpdGains; i++) { if (AR_SREV_9287(ah)) { pVpdL = data_9287[idxL].vpdPdg[i]; pPwrL = data_9287[idxL].pwrPdg[i]; pVpdR = data_9287[idxR].vpdPdg[i]; pPwrR = data_9287[idxR].pwrPdg[i]; } else if (eeprom_4k) { pVpdL = data_4k[idxL].vpdPdg[i]; pPwrL = data_4k[idxL].pwrPdg[i]; pVpdR = data_4k[idxR].vpdPdg[i]; pPwrR = data_4k[idxR].pwrPdg[i]; } else { pVpdL = data_def[idxL].vpdPdg[i]; pPwrL = data_def[idxL].pwrPdg[i]; pVpdR = data_def[idxR].vpdPdg[i]; pPwrR = data_def[idxR].pwrPdg[i]; } minPwrT4[i] = max(pPwrL[0], pPwrR[0]); maxPwrT4[i] = min(pPwrL[intercepts - 1], pPwrR[intercepts - 1]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], pPwrL, pVpdL, intercepts, vpdTableL[i]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], pPwrR, pVpdR, intercepts, vpdTableR[i]); for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { vpdTableI[i][j] = (u8)(ath9k_hw_interpolate((u16) FREQ2FBIN(centers. synth_center, IS_CHAN_2GHZ (chan)), bChans[idxL], bChans[idxR], vpdTableL[i][j], vpdTableR[i][j])); } } } k = 0; for (i = 0; i < numXpdGains; i++) { if (i == (numXpdGains - 1)) pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2); else pPdGainBoundaries[i] = (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4); pPdGainBoundaries[i] = min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]); minDelta = 0; if (i == 0) { if (AR_SREV_9280_20_OR_LATER(ah)) ss = (int16_t)(0 - (minPwrT4[i] / 2)); else ss = 0; } else { ss = (int16_t)((pPdGainBoundaries[i - 1] - (minPwrT4[i] / 2)) - tPdGainOverlap + 1 + minDelta); } vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); ss++; } sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap - (minPwrT4[i] / 2)); maxIndex = (tgtIndex < sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable; while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { pPDADCValues[k++] = vpdTableI[i][ss++]; } vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - vpdTableI[i][sizeCurrVpdTable - 2]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); if (tgtIndex >= maxIndex) { while ((ss <= tgtIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); pPDADCValues[k++] = (u8)((tmpVal > 255) ? 255 : tmpVal); ss++; } } } if (eeprom_4k) pdgain_boundary_default = 58; else pdgain_boundary_default = pPdGainBoundaries[i - 1]; while (i < AR5416_PD_GAINS_IN_MASK) { pPdGainBoundaries[i] = pdgain_boundary_default; i++; } while (k < AR5416_NUM_PDADC_VALUES) { pPDADCValues[k] = pPDADCValues[k - 1]; k++; } } int ath9k_hw_eeprom_init(struct ath_hw *ah) { int status; if (AR_SREV_9300_20_OR_LATER(ah)) ah->eep_ops = &eep_ar9300_ops; else if (AR_SREV_9287(ah)) { ah->eep_ops = &eep_ar9287_ops; } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { ah->eep_ops = &eep_4k_ops; } else { ah->eep_ops = &eep_def_ops; } if (!ah->eep_ops->fill_eeprom(ah)) return -EIO; status = ah->eep_ops->check_eeprom(ah); return status; }
gpl-2.0
ADVANSEE/0066_linux
arch/mips/mti-malta/malta-pci.c
8518
8493
/* * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. * All rights reserved. * Authors: Carsten Langgaard <carstenl@mips.com> * Maciej W. Rozycki <macro@mips.com> * * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * MIPS boards specific PCI support. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/gt64120.h> #include <asm/gcmpregs.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/bonito64.h> #include <asm/mips-boards/msc01_pci.h> static struct resource bonito64_mem_resource = { .name = "Bonito PCI MEM", .flags = IORESOURCE_MEM, }; static struct resource bonito64_io_resource = { .name = "Bonito PCI I/O", .start = 0x00000000UL, .end = 0x000fffffUL, .flags = IORESOURCE_IO, }; static struct resource gt64120_mem_resource = { .name = "GT-64120 PCI MEM", .flags = IORESOURCE_MEM, }; static struct resource gt64120_io_resource = { .name = "GT-64120 PCI I/O", .flags = IORESOURCE_IO, }; static struct resource msc_mem_resource = { .name = "MSC PCI MEM", .flags = IORESOURCE_MEM, }; static struct resource msc_io_resource = { .name = "MSC PCI I/O", .flags = IORESOURCE_IO, }; extern struct pci_ops bonito64_pci_ops; extern struct pci_ops gt64xxx_pci0_ops; extern struct pci_ops msc_pci_ops; static struct pci_controller bonito64_controller = { .pci_ops = &bonito64_pci_ops, .io_resource = &bonito64_io_resource, .mem_resource = &bonito64_mem_resource, .io_offset = 0x00000000UL, }; static struct pci_controller gt64120_controller = { .pci_ops = &gt64xxx_pci0_ops, .io_resource = &gt64120_io_resource, .mem_resource = &gt64120_mem_resource, }; static struct pci_controller msc_controller = { .pci_ops = &msc_pci_ops, .io_resource = &msc_io_resource, .mem_resource = &msc_mem_resource, }; void __init mips_pcibios_init(void) { struct pci_controller *controller; resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; switch (mips_revision_sconid) { case MIPS_REVISION_SCON_GT64120: /* * Due to a bug in the Galileo system controller, we need * to setup the PCI BAR for the Galileo internal registers. * This should be done in the bios/bootprom and will be * fixed in a later revision of YAMON (the MIPS boards * boot prom). */ GT_WRITE(GT_PCI0_CFGADDR_OFS, (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */ (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */ (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/ ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/ GT_PCI0_CFGADDR_CONFIGEN_BIT); /* Perform the write */ GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE)); /* Set up resource ranges from the controller's registers. */ start = GT_READ(GT_PCI0M0LD_OFS); end = GT_READ(GT_PCI0M0HD_OFS); map = GT_READ(GT_PCI0M0REMAP_OFS); end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); start1 = GT_READ(GT_PCI0M1LD_OFS); end1 = GT_READ(GT_PCI0M1HD_OFS); map1 = GT_READ(GT_PCI0M1REMAP_OFS); end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK); /* Cannot support multiple windows, use the wider. */ if (end1 - start1 > end - start) { start = start1; end = end1; map = map1; } mask = ~(start ^ end); /* We don't support remapping with a discontiguous mask. */ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && mask != ~((mask & -mask) - 1)); gt64120_mem_resource.start = start; gt64120_mem_resource.end = end; gt64120_controller.mem_offset = (start & mask) - (map & mask); /* Addresses are 36-bit, so do shifts in the destinations. */ gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF; start = GT_READ(GT_PCI0IOLD_OFS); end = GT_READ(GT_PCI0IOHD_OFS); map = GT_READ(GT_PCI0IOREMAP_OFS); end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); mask = ~(start ^ end); /* We don't support remapping with a discontiguous mask. */ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && mask != ~((mask & -mask) - 1)); gt64120_io_resource.start = map & mask; gt64120_io_resource.end = (map & mask) | ~mask; gt64120_controller.io_offset = 0; /* Addresses are 36-bit, so do shifts in the destinations. */ gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; gt64120_io_resource.end <<= GT_PCI_DCRM_SHF; gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; controller = &gt64120_controller; break; case MIPS_REVISION_SCON_BONITO: /* Set up resource ranges from the controller's registers. */ map = BONITO_PCIMAP; map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >> BONITO_PCIMAP_PCIMAP_LO0_SHIFT; map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >> BONITO_PCIMAP_PCIMAP_LO1_SHIFT; map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >> BONITO_PCIMAP_PCIMAP_LO2_SHIFT; /* Combine as many adjacent windows as possible. */ map = map1; start = BONITO_PCILO0_BASE; end = 1; if (map3 == map2 + 1) { map = map2; start = BONITO_PCILO1_BASE; end++; } if (map2 == map1 + 1) { map = map1; start = BONITO_PCILO0_BASE; end++; } bonito64_mem_resource.start = start; bonito64_mem_resource.end = start + BONITO_PCIMAP_WINBASE(end) - 1; bonito64_controller.mem_offset = start - BONITO_PCIMAP_WINBASE(map); controller = &bonito64_controller; break; case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: case MIPS_REVISION_SCON_SOCITSC: case MIPS_REVISION_SCON_SOCITSCP: /* Set up resource ranges from the controller's registers. */ MSC_READ(MSC01_PCI_SC2PMBASL, start); MSC_READ(MSC01_PCI_SC2PMMSKL, mask); MSC_READ(MSC01_PCI_SC2PMMAPL, map); msc_mem_resource.start = start & mask; msc_mem_resource.end = (start & mask) | ~mask; msc_controller.mem_offset = (start & mask) - (map & mask); #ifdef CONFIG_MIPS_CMP if (gcmp_niocu()) gcmp_setregion(0, start, mask, GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); #endif MSC_READ(MSC01_PCI_SC2PIOBASL, start); MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); MSC_READ(MSC01_PCI_SC2PIOMAPL, map); msc_io_resource.start = map & mask; msc_io_resource.end = (map & mask) | ~mask; msc_controller.io_offset = 0; ioport_resource.end = ~mask; #ifdef CONFIG_MIPS_CMP if (gcmp_niocu()) gcmp_setregion(1, start, mask, GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); #endif /* If ranges overlap I/O takes precedence. */ start = start & mask; end = start | ~mask; if ((start >= msc_mem_resource.start && start <= msc_mem_resource.end) || (end >= msc_mem_resource.start && end <= msc_mem_resource.end)) { /* Use the larger space. */ start = max(start, msc_mem_resource.start); end = min(end, msc_mem_resource.end); if (start - msc_mem_resource.start >= msc_mem_resource.end - end) msc_mem_resource.end = start - 1; else msc_mem_resource.start = end + 1; } controller = &msc_controller; break; default: return; } if (controller->io_resource->start < 0x00001000UL) /* FIXME */ controller->io_resource->start = 0x00001000UL; iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ ioport_resource.end = controller->io_resource->end; controller->io_map_base = mips_io_port_base; register_pci_controller(controller); } /* Enable PCI 2.1 compatibility in PIIX4 */ static void __init quirk_dlcsetup(struct pci_dev *dev) { u8 odlc, ndlc; (void) pci_read_config_byte(dev, 0x82, &odlc); /* Enable passive releases and delayed transaction */ ndlc = odlc | 7; (void) pci_write_config_byte(dev, 0x82, ndlc); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, quirk_dlcsetup);
gpl-2.0
wangsai008/NewWorld-F160-JB-Kernel
arch/mips/kernel/stacktrace.c
8774
2076
/* * Stack trace management functions * * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/export.h> #include <asm/stacktrace.h> /* * Save stack-backtrace addresses into a stack_trace buffer: */ static void save_raw_context_stack(struct stack_trace *trace, unsigned long reg29) { unsigned long *sp = (unsigned long *)reg29; unsigned long addr; while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = addr; if (trace->nr_entries >= trace->max_entries) break; } } } static void save_context_stack(struct stack_trace *trace, struct task_struct *tsk, struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(tsk); if (stack_page && sp >= stack_page && sp <= stack_page + THREAD_SIZE - 32) save_raw_context_stack(trace, sp); return; } do { if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = pc; if (trace->nr_entries >= trace->max_entries) break; pc = unwind_stack(tsk, &sp, pc, &ra); } while (pc); #else save_raw_context_stack(trace, sp); #endif } /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(current, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct pt_regs dummyregs; struct pt_regs *regs = &dummyregs; WARN_ON(trace->nr_entries || !trace->max_entries); if (tsk != current) { regs->regs[29] = tsk->thread.reg29; regs->regs[31] = 0; regs->cp0_epc = tsk->thread.reg31; } else prepare_frametrace(regs); save_context_stack(trace, tsk, regs); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
gpl-2.0
galaxyishere/samsung-kernel-latona
drivers/media/dvb/frontends/lnbp21.c
9286
5105
/* * lnbp21.c - driver for lnb supply and control ic lnbp21 * * Copyright (C) 2006, 2009 Oliver Endriss <o.endriss@gmx.de> * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "lnbp21.h" #include "lnbh24.h" struct lnbp21 { u8 config; u8 override_or; u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; }; static int lnbp21_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; lnbp21->config &= ~(LNBP21_VSEL | LNBP21_EN); switch(voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: lnbp21->config |= LNBP21_EN; break; case SEC_VOLTAGE_18: lnbp21->config |= (LNBP21_EN | LNBP21_VSEL); break; default: return -EINVAL; }; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; if (arg) lnbp21->config |= LNBP21_LLC; else lnbp21->config &= ~LNBP21_LLC; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int lnbp21_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; switch (tone) { case SEC_TONE_OFF: lnbp21->config &= ~LNBP21_TEN; break; case SEC_TONE_ON: lnbp21->config |= LNBP21_TEN; break; default: return -EINVAL; }; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void lnbp21_release(struct dvb_frontend *fe) { /* LNBP power off */ lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF); /* free data */ kfree(fe->sec_priv); fe->sec_priv = NULL; } static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear, u8 i2c_addr, u8 config) { struct lnbp21 *lnbp21 = kmalloc(sizeof(struct lnbp21), GFP_KERNEL); if (!lnbp21) return NULL; /* default configuration */ lnbp21->config = config; lnbp21->i2c = i2c; lnbp21->i2c_addr = i2c_addr; fe->sec_priv = lnbp21; /* bits which should be forced to '1' */ lnbp21->override_or = override_set; /* bits which should be forced to '0' */ lnbp21->override_and = ~override_clear; /* detect if it is present or not */ if (lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF)) { kfree(lnbp21); return NULL; } /* install release callback */ fe->ops.release_sec = lnbp21_release; /* override frontend ops */ fe->ops.set_voltage = lnbp21_set_voltage; fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage; if (!(override_clear & LNBH24_TEN)) /*22kHz logic controlled by demod*/ fe->ops.set_tone = lnbp21_set_tone; printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr); return fe; } struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear, u8 i2c_addr) { return lnbx2x_attach(fe, i2c, override_set, override_clear, i2c_addr, LNBH24_TTX); } EXPORT_SYMBOL(lnbh24_attach); struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear) { return lnbx2x_attach(fe, i2c, override_set, override_clear, 0x08, LNBP21_ISEL); } EXPORT_SYMBOL(lnbp21_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24"); MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/D710VMUB_GB28_Kernel
arch/x86/kernel/cpu/topology.c
10310
2575
/* * Check for extended topology enumeration cpuid leaf 0xb and if it * exists, use it for populating initial_apicid and cpu topology * detection. */ #include <linux/cpu.h> #include <asm/apic.h> #include <asm/pat.h> #include <asm/processor.h> /* leaf 0xb SMT level */ #define SMT_LEVEL 0 /* leaf 0xb sub-leaf types */ #define INVALID_TYPE 0 #define SMT_TYPE 1 #define CORE_TYPE 2 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) /* * Check for extended topology enumeration cpuid leaf 0xb and if it * exists, use it for populating initial_apicid and cpu topology * detection. */ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width; unsigned int core_select_mask, core_level_siblings; static bool printed; if (c->cpuid_level < 0xb) return; cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); /* * check if the cpuid leaf 0xb is actually implemented. */ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) return; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); /* * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; /* * Populate HT related information from sub-leaf level 0. */ core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; do { cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); /* * Check for the Core type in the implemented sub leaves. */ if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); break; } sub_index++; } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); if (!printed) { printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); if (c->x86_max_cores > 1) printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); printed = 1; } return; #endif }
gpl-2.0
nothingisdead/tostab03_kernel_cm10
arch/powerpc/platforms/pseries/power.c
12102
2187
/* * Interface for power-management for ppc64 compliant platform * * Manish Ahuja <mahuja@us.ibm.com> * * Feb 2007 * * Copyright (C) 2007 IBM Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> unsigned long rtas_poweron_auto; /* default and normal state is 0 */ static ssize_t auto_poweron_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", rtas_poweron_auto); } static ssize_t auto_poweron_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int ret; unsigned long ups_restart; ret = sscanf(buf, "%lu", &ups_restart); if ((ret == 1) && ((ups_restart == 1) || (ups_restart == 0))){ rtas_poweron_auto = ups_restart; return n; } return -EINVAL; } static struct kobj_attribute auto_poweron_attr = __ATTR(auto_poweron, 0644, auto_poweron_show, auto_poweron_store); #ifndef CONFIG_PM struct kobject *power_kobj; static struct attribute *g[] = { &auto_poweron_attr.attr, NULL, }; static struct attribute_group attr_group = { .attrs = g, }; static int __init pm_init(void) { power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; return sysfs_create_group(power_kobj, &attr_group); } core_initcall(pm_init); #else static int __init apo_pm_init(void) { return (sysfs_create_file(power_kobj, &auto_poweron_attr.attr)); } __initcall(apo_pm_init); #endif
gpl-2.0
davtse/i9505
drivers/net/fddi/skfp/smtdef.c
13126
9862
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ /* SMT/CMT defaults */ #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #ifndef OEM_USER_DATA #define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata" #endif #ifndef lint static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ; #endif /* * defaults */ #define TTMS(x) ((u_long)(x)*1000L) #define TTS(x) ((u_long)(x)*1000000L) #define TTUS(x) ((u_long)(x)) #define DEFAULT_TB_MIN TTMS(5) #define DEFAULT_TB_MAX TTMS(50) #define DEFAULT_C_MIN TTUS(1600) #define DEFAULT_T_OUT TTMS(100+5) #define DEFAULT_TL_MIN TTUS(30) #define DEFAULT_LC_SHORT TTMS(50+5) #define DEFAULT_LC_MEDIUM TTMS(500+20) #define DEFAULT_LC_LONG TTS(5)+TTMS(50) #define DEFAULT_LC_EXTENDED TTS(50)+TTMS(50) #define DEFAULT_T_NEXT_9 TTMS(200+10) #define DEFAULT_NS_MAX TTUS(1310) #define DEFAULT_I_MAX TTMS(25) #define DEFAULT_IN_MAX TTMS(40) #define DEFAULT_TD_MIN TTMS(5) #define DEFAULT_T_NON_OP TTS(1) #define DEFAULT_T_STUCK TTS(8) #define DEFAULT_T_DIRECT TTMS(370) #define DEFAULT_T_JAM TTMS(370) #define DEFAULT_T_ANNOUNCE TTMS(2500) #define DEFAULT_D_MAX TTUS(1617) #define DEFAULT_LEM_ALARM (8) #define DEFAULT_LEM_CUTOFF (7) #define DEFAULT_TEST_DONE TTS(1) #define DEFAULT_CHECK_POLL TTS(1) #define DEFAULT_POLL TTMS(50) /* * LCT errors threshold */ #define DEFAULT_LCT_SHORT 1 #define DEFAULT_LCT_MEDIUM 3 #define DEFAULT_LCT_LONG 5 #define DEFAULT_LCT_EXTEND 50 /* Forward declarations */ void smt_reset_defaults(struct s_smc *smc, int level); static void smt_init_mib(struct s_smc *smc, int level); static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper); #define MS2BCLK(x) ((x)*12500L) #define US2BCLK(x) ((x)*1250L) void smt_reset_defaults(struct s_smc *smc, int level) { struct smt_config *smt ; int i ; u_long smt_boot_time; smt_init_mib(smc,level) ; smc->os.smc_version = SMC_VERSION ; smt_boot_time = smt_get_time(); for( i = 0; i < NUMMACS; i++ ) smc->sm.last_tok_time[i] = smt_boot_time ; smt = &smc->s ; smt->attach_s = 0 ; smt->build_ring_map = 1 ; smt->sas = SMT_DAS ; smt->numphys = NUMPHYS ; smt->pcm_tb_min = DEFAULT_TB_MIN ; smt->pcm_tb_max = DEFAULT_TB_MAX ; smt->pcm_c_min = DEFAULT_C_MIN ; smt->pcm_t_out = DEFAULT_T_OUT ; smt->pcm_tl_min = DEFAULT_TL_MIN ; smt->pcm_lc_short = DEFAULT_LC_SHORT ; smt->pcm_lc_medium = DEFAULT_LC_MEDIUM ; smt->pcm_lc_long = DEFAULT_LC_LONG ; smt->pcm_lc_extended = DEFAULT_LC_EXTENDED ; smt->pcm_t_next_9 = DEFAULT_T_NEXT_9 ; smt->pcm_ns_max = DEFAULT_NS_MAX ; smt->ecm_i_max = DEFAULT_I_MAX ; smt->ecm_in_max = DEFAULT_IN_MAX ; smt->ecm_td_min = DEFAULT_TD_MIN ; smt->ecm_test_done = DEFAULT_TEST_DONE ; smt->ecm_check_poll = DEFAULT_CHECK_POLL ; smt->rmt_t_non_op = DEFAULT_T_NON_OP ; smt->rmt_t_stuck = DEFAULT_T_STUCK ; smt->rmt_t_direct = DEFAULT_T_DIRECT ; smt->rmt_t_jam = DEFAULT_T_JAM ; smt->rmt_t_announce = DEFAULT_T_ANNOUNCE ; smt->rmt_t_poll = DEFAULT_POLL ; smt->rmt_dup_mac_behavior = FALSE ; /* See Struct smt_config */ smt->mac_d_max = DEFAULT_D_MAX ; smt->lct_short = DEFAULT_LCT_SHORT ; smt->lct_medium = DEFAULT_LCT_MEDIUM ; smt->lct_long = DEFAULT_LCT_LONG ; smt->lct_extended = DEFAULT_LCT_EXTEND ; #ifndef SLIM_SMT #ifdef ESS if (level == 0) { smc->ess.sync_bw_available = FALSE ; smc->mib.fddiESSPayload = 0 ; smc->mib.fddiESSOverhead = 0 ; smc->mib.fddiESSMaxTNeg = (u_long)(- MS2BCLK(25)) ; smc->mib.fddiESSMinSegmentSize = 1 ; smc->mib.fddiESSCategory = SB_STATIC ; smc->mib.fddiESSSynchTxMode = FALSE ; smc->ess.raf_act_timer_poll = FALSE ; smc->ess.timer_count = 7 ; /* first RAF alc req after 3s */ } smc->ess.local_sba_active = FALSE ; smc->ess.sba_reply_pend = NULL ; #endif #ifdef SBA smt_init_sba(smc,level) ; #endif #endif /* no SLIM_SMT */ #ifdef TAG_MODE if (level == 0) { smc->hw.pci_fix_value = 0 ; } #endif } /* * manufacturer data */ static const char man_data[32] = /* 01234567890123456789012345678901 */ "xxxSK-NET FDDI SMT 7.3 - V2.8.8" ; static void smt_init_mib(struct s_smc *smc, int level) { struct fddi_mib *mib ; struct fddi_mib_p *pm ; int port ; int path ; mib = &smc->mib ; if (level == 0) { /* * set EVERYTHING to ZERO * EXCEPT hw and os */ memset(((char *)smc)+ sizeof(struct s_smt_os)+sizeof(struct s_smt_hw), 0, sizeof(struct s_smc) - sizeof(struct s_smt_os) - sizeof(struct s_smt_hw)) ; } else { mib->fddiSMTRemoteDisconnectFlag = 0 ; mib->fddiSMTPeerWrapFlag = 0 ; } mib->fddiSMTOpVersionId = 2 ; mib->fddiSMTHiVersionId = 2 ; mib->fddiSMTLoVersionId = 2 ; memcpy((char *) mib->fddiSMTManufacturerData,man_data,32) ; if (level == 0) { strcpy(mib->fddiSMTUserData,OEM_USER_DATA) ; } mib->fddiSMTMIBVersionId = 1 ; mib->fddiSMTMac_Ct = NUMMACS ; mib->fddiSMTConnectionPolicy = POLICY_MM | POLICY_AA | POLICY_BB ; /* * fddiSMTNonMaster_Ct and fddiSMTMaster_Ct are set in smt_fixup_mib * s.sas is not set yet (is set in init driver) */ mib->fddiSMTAvailablePaths = MIB_PATH_P | MIB_PATH_S ; mib->fddiSMTConfigCapabilities = 0 ; /* no hold,no wrap_ab*/ mib->fddiSMTTT_Notify = 10 ; mib->fddiSMTStatRptPolicy = TRUE ; mib->fddiSMTTrace_MaxExpiration = SEC2MIB(7) ; mib->fddiSMTMACIndexes = INDEX_MAC ; mib->fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ; /* separated */ mib->m[MAC0].fddiMACIndex = INDEX_MAC ; mib->m[MAC0].fddiMACFrameStatusFunctions = FSC_TYPE0 ; mib->m[MAC0].fddiMACRequestedPaths = MIB_P_PATH_LOCAL | MIB_P_PATH_SEC_ALTER | MIB_P_PATH_PRIM_ALTER ; mib->m[MAC0].fddiMACAvailablePaths = MIB_PATH_P ; mib->m[MAC0].fddiMACCurrentPath = MIB_PATH_PRIMARY ; mib->m[MAC0].fddiMACT_MaxCapabilitiy = (u_long)(- MS2BCLK(165)) ; mib->m[MAC0].fddiMACTVXCapabilitiy = (u_long)(- US2BCLK(52)) ; if (level == 0) { mib->m[MAC0].fddiMACTvxValue = (u_long)(- US2BCLK(27)) ; mib->m[MAC0].fddiMACTvxValueMIB = (u_long)(- US2BCLK(27)) ; mib->m[MAC0].fddiMACT_Req = (u_long)(- MS2BCLK(165)) ; mib->m[MAC0].fddiMACT_ReqMIB = (u_long)(- MS2BCLK(165)) ; mib->m[MAC0].fddiMACT_Max = (u_long)(- MS2BCLK(165)) ; mib->m[MAC0].fddiMACT_MaxMIB = (u_long)(- MS2BCLK(165)) ; mib->m[MAC0].fddiMACT_Min = (u_long)(- MS2BCLK(4)) ; } mib->m[MAC0].fddiMACHardwarePresent = TRUE ; mib->m[MAC0].fddiMACMA_UnitdataEnable = TRUE ; mib->m[MAC0].fddiMACFrameErrorThreshold = 1 ; mib->m[MAC0].fddiMACNotCopiedThreshold = 1 ; /* * Path attributes */ for (path = 0 ; path < NUMPATHS ; path++) { mib->a[path].fddiPATHIndex = INDEX_PATH + path ; if (level == 0) { mib->a[path].fddiPATHTVXLowerBound = (u_long)(- US2BCLK(27)) ; mib->a[path].fddiPATHT_MaxLowerBound = (u_long)(- MS2BCLK(165)) ; mib->a[path].fddiPATHMaxT_Req = (u_long)(- MS2BCLK(165)) ; } } /* * Port attributes */ pm = mib->p ; for (port = 0 ; port < NUMPHYS ; port++) { /* * set MIB pointer in phy */ /* Attention: don't initialize mib pointer here! */ /* It must be initialized during phase 2 */ smc->y[port].mib = NULL; mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ; pm->fddiPORTIndex = port+INDEX_PORT ; pm->fddiPORTHardwarePresent = TRUE ; if (level == 0) { pm->fddiPORTLer_Alarm = DEFAULT_LEM_ALARM ; pm->fddiPORTLer_Cutoff = DEFAULT_LEM_CUTOFF ; } /* * fddiPORTRequestedPaths are set in pcmplc.c * we don't know the port type yet ! */ pm->fddiPORTRequestedPaths[1] = 0 ; pm->fddiPORTRequestedPaths[2] = 0 ; pm->fddiPORTRequestedPaths[3] = 0 ; pm->fddiPORTAvailablePaths = MIB_PATH_P ; pm->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ; pm++ ; } (void) smt_set_mac_opvalues(smc) ; } int smt_set_mac_opvalues(struct s_smc *smc) { int st ; int st2 ; st = set_min_max(1,smc->mib.m[MAC0].fddiMACTvxValueMIB, smc->mib.a[PATH0].fddiPATHTVXLowerBound, &smc->mib.m[MAC0].fddiMACTvxValue) ; st |= set_min_max(0,smc->mib.m[MAC0].fddiMACT_MaxMIB, smc->mib.a[PATH0].fddiPATHT_MaxLowerBound, &smc->mib.m[MAC0].fddiMACT_Max) ; st |= (st2 = set_min_max(0,smc->mib.m[MAC0].fddiMACT_ReqMIB, smc->mib.a[PATH0].fddiPATHMaxT_Req, &smc->mib.m[MAC0].fddiMACT_Req)) ; if (st2) { /* Treq attribute changed remotely. So send an AIX_EVENT to the * user */ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ, smt_get_event_word(smc)); } return st; } void smt_fixup_mib(struct s_smc *smc) { #ifdef CONCENTRATOR switch (smc->s.sas) { case SMT_SAS : smc->mib.fddiSMTNonMaster_Ct = 1 ; break ; case SMT_DAS : smc->mib.fddiSMTNonMaster_Ct = 2 ; break ; case SMT_NAC : smc->mib.fddiSMTNonMaster_Ct = 0 ; break ; } smc->mib.fddiSMTMaster_Ct = NUMPHYS - smc->mib.fddiSMTNonMaster_Ct ; #else switch (smc->s.sas) { case SMT_SAS : smc->mib.fddiSMTNonMaster_Ct = 1 ; break ; case SMT_DAS : smc->mib.fddiSMTNonMaster_Ct = 2 ; break ; } smc->mib.fddiSMTMaster_Ct = 0 ; #endif } /* * determine new setting for operational value * if limit is lower than mib * use limit * else * use mib * NOTE : numbers are negative, negate comparison ! */ static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper) { u_long old ; old = *oper ; if ((limit > mib) ^ maxflag) *oper = limit ; else *oper = mib ; return old != *oper; }
gpl-2.0
zparallax/amplitude_aosp_12
drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
71
18884
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "MSM-SENSOR-DRIVER %s:%d " fmt "\n", __func__, __LINE__ /* Header file declaration */ #include "msm_sensor.h" #include "msm_sd.h" #include "camera.h" #include "msm_cci.h" #include "msm_camera_dt_util.h" //#define MSM_SENSOR_DRIVER_DEBUG /* Logging macro */ #undef CDBG #ifdef MSM_SENSOR_DRIVER_DEBUG #define CDBG(fmt, args ...) pr_err(fmt, ## args) #else #define CDBG(fmt, args ...) pr_debug(fmt, ## args) #endif /* Static declaration */ static struct msm_sensor_ctrl_t *g_sctrl[MAX_CAMERAS]; static const struct of_device_id msm_sensor_driver_dt_match[] = { { .compatible = "qcom,camera" }, {} }; MODULE_DEVICE_TABLE(of, msm_sensor_driver_dt_match); static struct platform_driver msm_sensor_platform_driver = { .driver = { .name = "qcom,camera", .owner = THIS_MODULE, .of_match_table = msm_sensor_driver_dt_match, }, }; static struct v4l2_subdev_info msm_sensor_driver_subdev_info[] = { { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 0, }, }; /* static function definition */ int32_t msm_sensor_driver_probe(void *setting) { int32_t rc = 0; int32_t is_power_off = 0; uint16_t i = 0, size = 0, off_size = 0; uint32_t session_id = 0; struct msm_sensor_ctrl_t *s_ctrl = NULL; struct msm_camera_cci_client *cci_client = NULL; struct msm_camera_sensor_slave_info *slave_info = NULL; struct msm_sensor_power_setting *power_setting = NULL; struct msm_sensor_power_setting *power_off_setting = NULL; struct msm_camera_slave_info *camera_info = NULL; struct msm_camera_power_ctrl_t *power_info = NULL; /* Validate input parameters */ if (!setting) { pr_err("failed: slave_info %p", setting); return -EINVAL; } /* Allocate memory for slave info */ slave_info = kzalloc(sizeof(*slave_info), GFP_KERNEL); if (!slave_info) { pr_err("failed: no memory slave_info %p", slave_info); return -ENOMEM; } if (copy_from_user(slave_info, (void*)setting, sizeof(*slave_info))) { pr_err("failed: copy_from_user"); rc = -EFAULT; goto FREE_SLAVE_INFO; } /* Print slave info */ CDBG("camera id %d", slave_info->camera_id); CDBG("slave_addr %x", slave_info->slave_addr); CDBG("addr_type %d", slave_info->addr_type); CDBG("sensor_id_reg_addr %x", slave_info->sensor_id_info.sensor_id_reg_addr); CDBG("sensor_id %x", slave_info->sensor_id_info.sensor_id); CDBG("size %x", slave_info->power_setting_array.size); /* Validate camera id */ if (slave_info->camera_id >= MAX_CAMERAS) { pr_err("failed: invalid camera id %d max %d", slave_info->camera_id, MAX_CAMERAS); rc = -EINVAL; goto FREE_SLAVE_INFO; } /* Extract s_ctrl from camera id */ s_ctrl = g_sctrl[slave_info->camera_id]; if (!s_ctrl) { pr_err("failed: s_ctrl %p for camera_id %d", s_ctrl, slave_info->camera_id); rc = -EINVAL; goto FREE_SLAVE_INFO; } CDBG("s_ctrl[%d] %p", slave_info->camera_id, s_ctrl); if (s_ctrl->is_probe_succeed == 1) { /* * Different sensor on this camera slot has been connected * and probe already succeeded for that sensor. Ignore this * probe */ pr_err("slot %d has some other sensor", slave_info->camera_id); kfree(slave_info); return 0; } size = slave_info->power_setting_array.size; /* Allocate memory for power setting */ power_setting = kzalloc(sizeof(*power_setting) * size, GFP_KERNEL); if (!power_setting) { pr_err("failed: no memory power_setting %p", power_setting); rc = -ENOMEM; goto FREE_SLAVE_INFO; } if (copy_from_user(power_setting, (void*)slave_info->power_setting_array.power_setting, sizeof(*power_setting) * size)) { pr_err("failed: copy_from_user"); rc = -EFAULT; goto FREE_POWER_SETTING; } /* Print power setting */ for (i = 0; i < size; i++) { CDBG("seq_type %d seq_val %d config_val %ld delay %d", power_setting[i].seq_type, power_setting[i].seq_val, power_setting[i].config_val, power_setting[i].delay); } off_size = slave_info->power_setting_array.off_size; if (off_size > 0) { /* Allocate memory for power setting */ power_off_setting = kzalloc(sizeof(*power_off_setting) * off_size, GFP_KERNEL); if (!power_off_setting) { pr_err("failed: no memory power_setting %p", power_off_setting); rc = -ENOMEM; goto FREE_POWER_SETTING; } if (copy_from_user(power_off_setting, (void*)slave_info->power_setting_array.power_off_setting, sizeof(*power_off_setting) * off_size)) { pr_err("failed: copy_from_user"); rc = -EFAULT; goto FREE_POWER_OFF_SETTING; } /* Print power setting */ for (i = 0; i < off_size; i++) { CDBG("seq_type %d seq_val %d config_val %ld delay %d", power_off_setting[i].seq_type, power_off_setting[i].seq_val, power_off_setting[i].config_val, power_off_setting[i].delay); } is_power_off = 1; } camera_info = kzalloc(sizeof(struct msm_camera_slave_info), GFP_KERNEL); if (!camera_info) { pr_err("failed: no memory slave_info %p", camera_info); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } /* Fill power up setting and power up setting size */ power_info = &s_ctrl->sensordata->power_info; power_info->power_setting = power_setting; power_info->power_setting_size = size; power_info->power_off_setting = power_off_setting; power_info->power_off_setting_size = off_size; s_ctrl->sensordata->slave_info = camera_info; s_ctrl->sensor_device_type = MSM_CAMERA_PLATFORM_DEVICE; /* Fill sensor slave info */ camera_info->sensor_slave_addr = slave_info->slave_addr; camera_info->sensor_id_reg_addr = slave_info->sensor_id_info.sensor_id_reg_addr; camera_info->sensor_id = slave_info->sensor_id_info.sensor_id; /* Fill CCI master, slave address and CCI default params */ if (!s_ctrl->sensor_i2c_client) { pr_err("failed: sensor_i2c_client %p", s_ctrl->sensor_i2c_client); rc = -EINVAL; if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } /* Fill sensor address type */ s_ctrl->sensor_i2c_client->addr_type = slave_info->addr_type; cci_client = s_ctrl->sensor_i2c_client->cci_client; if (!cci_client) { pr_err("failed: cci_client %p", cci_client); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } cci_client->cci_i2c_master = s_ctrl->cci_i2c_master; cci_client->sid = slave_info->slave_addr >> 1; cci_client->retries = 3; cci_client->id_map = 0; /* Parse and fill vreg params */ rc = msm_camera_fill_vreg_params( power_info->cam_vreg, power_info->num_vreg, power_info->power_setting, power_info->power_setting_size); if (rc < 0) { pr_err("failed: msm_camera_get_dt_power_setting_data rc %d", rc); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } if (power_info->power_off_setting && (power_info->power_off_setting_size > 0)) { /* Parse and fill vreg params */ rc = msm_camera_fill_vreg_params( power_info->cam_vreg, power_info->num_vreg, power_info->power_off_setting, power_info->power_off_setting_size); if (rc < 0) { pr_err("failed: msm_camera_get_dt_power_setting_data rc %d", rc); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } } /* remove this code for DFMS test */ #if 0 /* Power up and probe sensor */ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl, &s_ctrl->sensordata->power_info, s_ctrl->sensor_i2c_client, s_ctrl->sensordata->slave_info, slave_info->sensor_name); if (rc < 0) { pr_err("%s power up failed", slave_info->sensor_name); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } #endif /* Update sensor name in sensor control structure */ s_ctrl->sensordata->sensor_name = slave_info->sensor_name; /* Set probe succeeded flag to 1 so that no other camera shall * probed on this slot */ s_ctrl->is_probe_succeed = 1; /* * Create /dev/videoX node, comment for now until dummy /dev/videoX * node is created and used by HAL */ rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id); if (rc < 0) { pr_err("failed: camera_init_v4l2 rc %d", rc); if (is_power_off) goto FREE_POWER_OFF_SETTING; else goto FREE_POWER_SETTING; } s_ctrl->sensordata->sensor_info->session_id = session_id; /* Create /dev/v4l-subdevX device */ v4l2_subdev_init(&s_ctrl->msm_sd.sd, s_ctrl->sensor_v4l2_subdev_ops); snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s", s_ctrl->sensordata->sensor_name); v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev); s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0); s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR; s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name; s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3; msm_sd_register(&s_ctrl->msm_sd); memcpy(slave_info->subdev_name, s_ctrl->msm_sd.sd.entity.name, sizeof(slave_info->subdev_name)); slave_info->is_probe_succeed = 1; slave_info->sensor_info.session_id = s_ctrl->sensordata->sensor_info->session_id; for (i = 0; i < SUB_MODULE_MAX; i++) { slave_info->sensor_info.subdev_id[i] = s_ctrl->sensordata->sensor_info->subdev_id[i]; slave_info->sensor_info.subdev_intf[i] = s_ctrl->sensordata->sensor_info->subdev_intf[i]; } slave_info->sensor_info.is_mount_angle_valid = s_ctrl->sensordata->sensor_info->is_mount_angle_valid; slave_info->sensor_info.sensor_mount_angle = s_ctrl->sensordata->sensor_info->sensor_mount_angle; CDBG("%s:%d sensor name %s\n", __func__, __LINE__, slave_info->sensor_info.sensor_name); CDBG("%s:%d session id %d\n", __func__, __LINE__, slave_info->sensor_info.session_id); for (i = 0; i < SUB_MODULE_MAX; i++) { /* pr_err("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i, slave_info->sensor_info.subdev_id[i]); pr_err("%s:%d additional subdev_intf[%d] %d\n", __func__, __LINE__, i, slave_info->sensor_info.subdev_intf[i]); */ } CDBG("%s:%d mount angle valid %d value %d\n", __func__, __LINE__, slave_info->sensor_info.is_mount_angle_valid, slave_info->sensor_info.sensor_mount_angle); if (copy_to_user((void __user*)setting, (void*)slave_info, sizeof(*slave_info))) { pr_err("%s:%d copy failed\n", __func__, __LINE__); rc = -EFAULT; } pr_warn("rc %d session_id %d", rc, session_id); pr_warn("%s probe succeeded", slave_info->sensor_name); /* remove this code for DFMS test */ #if 0 /* Power down */ s_ctrl->func_tbl->sensor_power_down( s_ctrl, &s_ctrl->sensordata->power_info, s_ctrl->sensor_device_type, s_ctrl->sensor_i2c_client); #endif /*COMP_EN init-set low*/ gpio_set_value_cansleep( power_info->gpio_conf->gpio_num_info->gpio_num [SENSOR_GPIO_COMP], GPIOF_OUT_INIT_LOW); return rc; FREE_POWER_OFF_SETTING: kfree(power_off_setting); FREE_POWER_SETTING: kfree(power_setting); FREE_SLAVE_INFO: kfree(slave_info); return rc; } static int32_t msm_sensor_driver_get_gpio_data( struct msm_camera_sensor_board_info *sensordata, struct device_node *of_node) { int32_t rc = 0, i = 0; struct msm_camera_gpio_conf *gconf = NULL; uint16_t *gpio_array = NULL; uint16_t gpio_array_size = 0; /* Validate input paramters */ if (!sensordata || !of_node) { pr_err("failed: invalid params sensordata %p of_node %p", sensordata, of_node); return -EINVAL; } sensordata->power_info.gpio_conf = kzalloc( sizeof(struct msm_camera_gpio_conf), GFP_KERNEL); if (!sensordata->power_info.gpio_conf) { pr_err("failed"); return -ENOMEM; } gconf = sensordata->power_info.gpio_conf; gpio_array_size = of_gpio_count(of_node); CDBG("gpio count %d", gpio_array_size); if (!gpio_array_size) return 0; gpio_array = kzalloc(sizeof(uint16_t) * gpio_array_size, GFP_KERNEL); if (!gpio_array) { pr_err("failed"); goto FREE_GPIO_CONF; } for (i = 0; i < gpio_array_size; i++) { gpio_array[i] = of_get_gpio(of_node, i); CDBG("gpio_array[%d] = %d", i, gpio_array[i]); } rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array, gpio_array_size); if (rc < 0) { pr_err("failed"); goto FREE_GPIO_CONF; } rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array, gpio_array_size); if (rc < 0) { pr_err("failed"); goto FREE_GPIO_REQ_TBL; } kfree(gpio_array); return rc; FREE_GPIO_REQ_TBL: kfree(sensordata->power_info.gpio_conf->cam_gpio_req_tbl); FREE_GPIO_CONF: kfree(sensordata->power_info.gpio_conf); kfree(gpio_array); return rc; } static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl, struct platform_device *pdev) { int32_t rc = 0; struct msm_camera_sensor_board_info *sensordata = NULL; struct device_node *of_node = pdev->dev.of_node; s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL); if (!s_ctrl->sensordata) { pr_err("failed: no memory"); return -ENOMEM; } sensordata = s_ctrl->sensordata; /* * Read cell index - this cell index will be the camera slot where * this camera will be mounted */ rc = of_property_read_u32(of_node, "cell-index", &pdev->id); if (rc < 0) { pr_err("failed: cell-index rc %d", rc); goto FREE_SENSOR_DATA; } /* Validate pdev->id */ if (pdev->id >= MAX_CAMERAS) { pr_err("failed: invalid pdev->id %d", pdev->id); rc = -EINVAL; goto FREE_SENSOR_DATA; } /* Check whether g_sctrl is already filled for this pdev id */ if (g_sctrl[pdev->id]) { pr_err("failed: sctrl already filled for id %d", pdev->id); rc = -EINVAL; goto FREE_SENSOR_DATA; } /* Read subdev info */ rc = msm_sensor_get_sub_module_index(of_node, &sensordata->sensor_info); if (rc < 0) { pr_err("failed"); goto FREE_SENSOR_DATA; } /* Read vreg information */ rc = msm_camera_get_dt_vreg_data(of_node, &sensordata->power_info.cam_vreg, &sensordata->power_info.num_vreg); if (rc < 0) { pr_err("failed: msm_camera_get_dt_vreg_data rc %d", rc); goto FREE_SUB_MODULE_DATA; } /* Read gpio information */ rc = msm_sensor_driver_get_gpio_data(sensordata, of_node); if (rc < 0) { pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc); goto FREE_VREG_DATA; } /* Get CCI master */ rc = of_property_read_u32(of_node, "qcom,cci-master", &s_ctrl->cci_i2c_master); CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc); if (rc < 0) { /* Set default master 0 */ s_ctrl->cci_i2c_master = MASTER_0; rc = 0; } /* Get mount angle */ rc = of_property_read_u32(of_node, "qcom,mount-angle", &sensordata->sensor_info->sensor_mount_angle); CDBG("%s qcom,mount-angle %d, rc %d\n", __func__, sensordata->sensor_info->sensor_mount_angle, rc); if (rc < 0) { sensordata->sensor_info->is_mount_angle_valid = 0; sensordata->sensor_info->sensor_mount_angle = 0; rc = 0; } else { sensordata->sensor_info->is_mount_angle_valid = 1; } /* Get vdd-cx regulator */ /*Optional property, don't return error if absent */ of_property_read_string(of_node, "qcom,vdd-cx-name", &sensordata->misc_regulator); CDBG("qcom,misc_regulator %s", sensordata->misc_regulator); return rc; FREE_VREG_DATA: kfree(sensordata->power_info.cam_vreg); FREE_SUB_MODULE_DATA: kfree(sensordata->sensor_info); FREE_SENSOR_DATA: kfree(sensordata); return rc; } static int32_t msm_sensor_driver_parse(struct platform_device *pdev) { int32_t rc = 0; struct msm_sensor_ctrl_t *s_ctrl = NULL; CDBG("Enter\n"); /* Validate input parameters */ if (!pdev || !pdev->dev.of_node) { pr_err("failed: invalid params"); return -EINVAL; } /* Create sensor control structure */ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL); if (!s_ctrl) { pr_err("failed: no memory s_ctrl %p", s_ctrl); return -ENOMEM; } /* Fill platform device */ s_ctrl->pdev = pdev; /* Allocate memory for sensor_i2c_client */ s_ctrl->sensor_i2c_client = kzalloc(sizeof(*s_ctrl->sensor_i2c_client), GFP_KERNEL); if (!s_ctrl->sensor_i2c_client) { pr_err("failed: no memory sensor_i2c_client %p", s_ctrl->sensor_i2c_client); goto FREE_SCTRL; } /* Allocate memory for mutex */ s_ctrl->msm_sensor_mutex = kzalloc(sizeof(*s_ctrl->msm_sensor_mutex), GFP_KERNEL); if (!s_ctrl->msm_sensor_mutex) { pr_err("failed: no memory msm_sensor_mutex %p", s_ctrl->msm_sensor_mutex); goto FREE_SENSOR_I2C_CLIENT; } /* Parse dt information and store in sensor control structure */ rc = msm_sensor_driver_get_dt_data(s_ctrl, pdev); if (rc < 0) { pr_err("failed: rc %d", rc); goto FREE_MUTEX; } /* Fill device in power info */ s_ctrl->sensordata->power_info.dev = &pdev->dev; /* Initialize mutex */ mutex_init(s_ctrl->msm_sensor_mutex); /* Initilize v4l2 subdev info */ s_ctrl->sensor_v4l2_subdev_info = msm_sensor_driver_subdev_info; s_ctrl->sensor_v4l2_subdev_info_size = ARRAY_SIZE(msm_sensor_driver_subdev_info); /* Initialize default parameters */ rc = msm_sensor_init_default_params(s_ctrl); if (rc < 0) { pr_err("failed: msm_sensor_init_default_params rc %d", rc); goto FREE_DT_DATA; } /* Store sensor control structure in static database */ g_sctrl[pdev->id] = s_ctrl; pr_warn("g_sctrl[%d] %p\n", pdev->id, g_sctrl[pdev->id]); return rc; FREE_DT_DATA: kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info); kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl); kfree(s_ctrl->sensordata->power_info.gpio_conf); kfree(s_ctrl->sensordata->power_info.cam_vreg); kfree(s_ctrl->sensordata); FREE_MUTEX: kfree(s_ctrl->msm_sensor_mutex); FREE_SENSOR_I2C_CLIENT: kfree(s_ctrl->sensor_i2c_client); FREE_SCTRL: kfree(s_ctrl); return rc; } static int __init msm_sensor_driver_init(void) { int32_t rc = 0; pr_warn("%s : Enter", __func__); rc = platform_driver_probe(&msm_sensor_platform_driver, msm_sensor_driver_parse); if (!rc) pr_warn("probe success"); return rc; } static void __exit msm_sensor_driver_exit(void) { CDBG("Enter"); return; } module_init(msm_sensor_driver_init); module_exit(msm_sensor_driver_exit); MODULE_DESCRIPTION("msm_sensor_driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
loverszhaokai/gcc
gcc/testsuite/gcc.c-torture/execute/pr40386.c
71
2317
/* { dg-options "-fno-ira-share-spill-slots -Wno-shift-overflow" } */ extern void abort (void); extern void exit (int); #define CHAR_BIT 8 #define ROR(a,b) (((a) >> (b)) | ((a) << ((sizeof (a) * CHAR_BIT) - (b)))) #define ROL(a,b) (((a) << (b)) | ((a) >> ((sizeof (a) * CHAR_BIT) - (b)))) #define CHAR_VALUE ((char)0xf234) #define SHORT_VALUE ((short)0xf234) #define INT_VALUE ((int)0xf234) #define LONG_VALUE ((long)0xf2345678L) #define LL_VALUE ((long long)0xf2345678abcdef0LL) #define SHIFT1 4 #define SHIFT2 ((sizeof (long long) * CHAR_BIT) - SHIFT1) char c = CHAR_VALUE; short s = SHORT_VALUE; int i = INT_VALUE; long l = LONG_VALUE; long long ll = LL_VALUE; int shift1 = SHIFT1; int shift2 = SHIFT2; int main () { if (ROR (c, shift1) != ROR (CHAR_VALUE, SHIFT1)) abort (); if (ROR (c, SHIFT1) != ROR (CHAR_VALUE, SHIFT1)) abort (); if (ROR (s, shift1) != ROR (SHORT_VALUE, SHIFT1)) abort (); if (ROR (s, SHIFT1) != ROR (SHORT_VALUE, SHIFT1)) abort (); if (ROR (i, shift1) != ROR (INT_VALUE, SHIFT1)) abort (); if (ROR (i, SHIFT1) != ROR (INT_VALUE, SHIFT1)) abort (); if (ROR (l, shift1) != ROR (LONG_VALUE, SHIFT1)) abort (); if (ROR (l, SHIFT1) != ROR (LONG_VALUE, SHIFT1)) abort (); if (ROR (ll, shift1) != ROR (LL_VALUE, SHIFT1)) abort (); if (ROR (ll, SHIFT1) != ROR (LL_VALUE, SHIFT1)) abort (); if (ROR (ll, shift2) != ROR (LL_VALUE, SHIFT2)) abort (); if (ROR (ll, SHIFT2) != ROR (LL_VALUE, SHIFT2)) abort (); if (ROL (c, shift1) != ROL (CHAR_VALUE, SHIFT1)) abort (); if (ROL (c, SHIFT1) != ROL (CHAR_VALUE, SHIFT1)) abort (); if (ROL (s, shift1) != ROL (SHORT_VALUE, SHIFT1)) abort (); if (ROL (s, SHIFT1) != ROL (SHORT_VALUE, SHIFT1)) abort (); if (ROL (i, shift1) != ROL (INT_VALUE, SHIFT1)) abort (); if (ROL (i, SHIFT1) != ROL (INT_VALUE, SHIFT1)) abort (); if (ROL (l, shift1) != ROL (LONG_VALUE, SHIFT1)) abort (); if (ROL (l, SHIFT1) != ROL (LONG_VALUE, SHIFT1)) abort (); if (ROL (ll, shift1) != ROL (LL_VALUE, SHIFT1)) abort (); if (ROL (ll, SHIFT1) != ROL (LL_VALUE, SHIFT1)) abort (); if (ROL (ll, shift2) != ROL (LL_VALUE, SHIFT2)) abort (); if (ROL (ll, SHIFT2) != ROL (LL_VALUE, SHIFT2)) abort (); exit (0); }
gpl-2.0
chidelmun/server
pcre/pcre_xclass.c
71
8231
/************************************************* * Perl-Compatible Regular Expressions * *************************************************/ /* PCRE is a library of functions to support regular expressions whose syntax and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Copyright (c) 1997-2013 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Cambridge nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ----------------------------------------------------------------------------- */ /* This module contains an internal function that is used to match an extended class. It is used by both pcre_exec() and pcre_def_exec(). */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "pcre_internal.h" /************************************************* * Match character against an XCLASS * *************************************************/ /* This function is called to match a character against an extended class that might contain values > 255 and/or Unicode properties. Arguments: c the character data points to the flag byte of the XCLASS data Returns: TRUE if character matches, else FALSE */ BOOL PRIV(xclass)(pcre_uint32 c, const pcre_uchar *data, BOOL utf) { pcre_uchar t; BOOL negated = (*data & XCL_NOT) != 0; (void)utf; #ifdef COMPILE_PCRE8 /* In 8 bit mode, this must always be TRUE. Help the compiler to know that. */ utf = TRUE; #endif /* Character values < 256 are matched against a bitmap, if one is present. If not, we still carry on, because there may be ranges that start below 256 in the additional data. */ if (c < 256) { if ((*data & XCL_HASPROP) == 0) { if ((*data & XCL_MAP) == 0) return negated; return (((pcre_uint8 *)(data + 1))[c/8] & (1 << (c&7))) != 0; } if ((*data & XCL_MAP) != 0 && (((pcre_uint8 *)(data + 1))[c/8] & (1 << (c&7))) != 0) return !negated; /* char found */ } /* First skip the bit map if present. Then match against the list of Unicode properties or large chars or ranges that end with a large char. We won't ever encounter XCL_PROP or XCL_NOTPROP when UCP support is not compiled. */ if ((*data++ & XCL_MAP) != 0) data += 32 / sizeof(pcre_uchar); while ((t = *data++) != XCL_END) { pcre_uint32 x, y; if (t == XCL_SINGLE) { #ifdef SUPPORT_UTF if (utf) { GETCHARINC(x, data); /* macro generates multiple statements */ } else #endif x = *data++; if (c == x) return !negated; } else if (t == XCL_RANGE) { #ifdef SUPPORT_UTF if (utf) { GETCHARINC(x, data); /* macro generates multiple statements */ GETCHARINC(y, data); /* macro generates multiple statements */ } else #endif { x = *data++; y = *data++; } if (c >= x && c <= y) return !negated; } #ifdef SUPPORT_UCP else /* XCL_PROP & XCL_NOTPROP */ { const ucd_record *prop = GET_UCD(c); BOOL isprop = t == XCL_PROP; switch(*data) { case PT_ANY: if (isprop) return !negated; break; case PT_LAMP: if ((prop->chartype == ucp_Lu || prop->chartype == ucp_Ll || prop->chartype == ucp_Lt) == isprop) return !negated; break; case PT_GC: if ((data[1] == PRIV(ucp_gentype)[prop->chartype]) == isprop) return !negated; break; case PT_PC: if ((data[1] == prop->chartype) == isprop) return !negated; break; case PT_SC: if ((data[1] == prop->script) == isprop) return !negated; break; case PT_ALNUM: if ((PRIV(ucp_gentype)[prop->chartype] == ucp_L || PRIV(ucp_gentype)[prop->chartype] == ucp_N) == isprop) return !negated; break; /* Perl space used to exclude VT, but from Perl 5.18 it is included, which means that Perl space and POSIX space are now identical. PCRE was changed at release 8.34. */ case PT_SPACE: /* Perl space */ case PT_PXSPACE: /* POSIX space */ switch(c) { HSPACE_CASES: VSPACE_CASES: if (isprop) return !negated; break; default: if ((PRIV(ucp_gentype)[prop->chartype] == ucp_Z) == isprop) return !negated; break; } break; case PT_WORD: if ((PRIV(ucp_gentype)[prop->chartype] == ucp_L || PRIV(ucp_gentype)[prop->chartype] == ucp_N || c == CHAR_UNDERSCORE) == isprop) return !negated; break; case PT_UCNC: if (c < 0xa0) { if ((c == CHAR_DOLLAR_SIGN || c == CHAR_COMMERCIAL_AT || c == CHAR_GRAVE_ACCENT) == isprop) return !negated; } else { if ((c < 0xd800 || c > 0xdfff) == isprop) return !negated; } break; /* The following three properties can occur only in an XCLASS, as there is no \p or \P coding for them. */ /* Graphic character. Implement this as not Z (space or separator) and not C (other), except for Cf (format) with a few exceptions. This seems to be what Perl does. The exceptional characters are: U+061C Arabic Letter Mark U+180E Mongolian Vowel Separator U+2066 - U+2069 Various "isolate"s */ case PT_PXGRAPH: if ((PRIV(ucp_gentype)[prop->chartype] != ucp_Z && (PRIV(ucp_gentype)[prop->chartype] != ucp_C || (prop->chartype == ucp_Cf && c != 0x061c && c != 0x180e && (c < 0x2066 || c > 0x2069)) )) == isprop) return !negated; break; /* Printable character: same as graphic, with the addition of Zs, i.e. not Zl and not Zp, and U+180E. */ case PT_PXPRINT: if ((prop->chartype != ucp_Zl && prop->chartype != ucp_Zp && (PRIV(ucp_gentype)[prop->chartype] != ucp_C || (prop->chartype == ucp_Cf && c != 0x061c && (c < 0x2066 || c > 0x2069)) )) == isprop) return !negated; break; /* Punctuation: all Unicode punctuation, plus ASCII characters that Unicode treats as symbols rather than punctuation, for Perl compatibility (these are $+<=>^`|~). */ case PT_PXPUNCT: if ((PRIV(ucp_gentype)[prop->chartype] == ucp_P || (c < 128 && PRIV(ucp_gentype)[prop->chartype] == ucp_S)) == isprop) return !negated; break; /* This should never occur, but compilers may mutter if there is no default. */ default: return FALSE; } data += 2; } #endif /* SUPPORT_UCP */ } return negated; /* char did not match */ } /* End of pcre_xclass.c */
gpl-2.0
wuby986/Sixty-4Stroke-kernel
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
2119
15155
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/client.h> #include <core/engctx.h> #include <core/ramht.h> #include <core/class.h> #include <core/math.h> #include <subdev/timer.h> #include <subdev/bar.h> #include <engine/dmaobj.h> #include <engine/fifo.h> #include "nv50.h" /******************************************************************************* * FIFO channel objects ******************************************************************************/ static void nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) { struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_gpuobj *cur; int i, p; cur = priv->playlist[priv->cur_playlist]; priv->cur_playlist = !priv->cur_playlist; for (i = priv->base.min, p = 0; i < priv->base.max; i++) { if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000) nv_wo32(cur, p++ * 4, i); } bar->flush(bar); nv_wr32(priv, 0x0032f4, cur->addr >> 12); nv_wr32(priv, 0x0032ec, p); nv_wr32(priv, 0x002500, 0x00000101); } void nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) { mutex_lock(&nv_subdev(priv)->mutex); nv50_fifo_playlist_update_locked(priv); mutex_unlock(&nv_subdev(priv)->mutex); } static int nv50_fifo_context_attach(struct nouveau_object *parent, struct nouveau_object *object) { struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent->parent; struct nouveau_gpuobj *ectx = (void *)object; u64 limit = ectx->addr + ectx->size - 1; u64 start = ectx->addr; u32 addr; switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_SW : return 0; case NVDEV_ENGINE_GR : addr = 0x0000; break; case NVDEV_ENGINE_MPEG : addr = 0x0060; break; default: return -EINVAL; } nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; nv_wo32(base->eng, addr + 0x00, 0x00190000); nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); nv_wo32(base->eng, addr + 0x08, lower_32_bits(start)); nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | upper_32_bits(start)); nv_wo32(base->eng, addr + 0x10, 0x00000000); nv_wo32(base->eng, addr + 0x14, 0x00000000); bar->flush(bar); return 0; } static int nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, struct nouveau_object *object) { struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_priv *priv = (void *)parent->engine; struct nv50_fifo_base *base = (void *)parent->parent; struct nv50_fifo_chan *chan = (void *)parent; u32 addr, me; int ret = 0; switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_SW : return 0; case NVDEV_ENGINE_GR : addr = 0x0000; break; case NVDEV_ENGINE_MPEG : addr = 0x0060; break; default: return -EINVAL; } /* HW bug workaround: * * PFIFO will hang forever if the connected engines don't report * that they've processed the context switch request. * * In order for the kickoff to work, we need to ensure all the * connected engines are in a state where they can answer. * * Newer chipsets don't seem to suffer from this issue, and well, * there's also a "ignore these engines" bitmask reg we can use * if we hit the issue there.. */ me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001); /* do the kickoff... */ nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) { nv_error(priv, "channel %d [%s] unload timeout\n", chan->base.chid, nouveau_client_name(chan)); if (suspend) ret = -EBUSY; } nv_wr32(priv, 0x00b860, me); if (ret == 0) { nv_wo32(base->eng, addr + 0x00, 0x00000000); nv_wo32(base->eng, addr + 0x04, 0x00000000); nv_wo32(base->eng, addr + 0x08, 0x00000000); nv_wo32(base->eng, addr + 0x0c, 0x00000000); nv_wo32(base->eng, addr + 0x10, 0x00000000); nv_wo32(base->eng, addr + 0x14, 0x00000000); bar->flush(bar); } return ret; } static int nv50_fifo_object_attach(struct nouveau_object *parent, struct nouveau_object *object, u32 handle) { struct nv50_fifo_chan *chan = (void *)parent; u32 context; if (nv_iclass(object, NV_GPUOBJ_CLASS)) context = nv_gpuobj(object)->node->offset >> 4; else context = 0x00000004; /* just non-zero */ switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_DMAOBJ: case NVDEV_ENGINE_SW : context |= 0x00000000; break; case NVDEV_ENGINE_GR : context |= 0x00100000; break; case NVDEV_ENGINE_MPEG : context |= 0x00200000; break; default: return -EINVAL; } return nouveau_ramht_insert(chan->ramht, 0, handle, context); } void nv50_fifo_object_detach(struct nouveau_object *parent, int cookie) { struct nv50_fifo_chan *chan = (void *)parent; nouveau_ramht_remove(chan->ramht, cookie); } static int nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; struct nv03_channel_dma_class *args = data; int ret; if (size < sizeof(*args)) return -EINVAL; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | (1ULL << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; nv_parent(chan)->context_attach = nv50_fifo_context_attach; nv_parent(chan)->context_detach = nv50_fifo_context_detach; nv_parent(chan)->object_attach = nv50_fifo_object_attach; nv_parent(chan)->object_detach = nv50_fifo_object_detach; ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, &chan->ramht); if (ret) return ret; nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); nv_wo32(base->ramfc, 0x3c, 0x003f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); nv_wo32(base->ramfc, 0x4c, 0xffffffff); nv_wo32(base->ramfc, 0x60, 0x7fffffff); nv_wo32(base->ramfc, 0x78, 0x00000000); nv_wo32(base->ramfc, 0x7c, 0x30000001); nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | (4 << 24) /* SEARCH_FULL */ | (chan->ramht->base.node->offset >> 4)); bar->flush(bar); return 0; } static int nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_channel_ind_class *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; u64 ioffset, ilength; int ret; if (size < sizeof(*args)) return -EINVAL; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | (1ULL << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; nv_parent(chan)->context_attach = nv50_fifo_context_attach; nv_parent(chan)->context_detach = nv50_fifo_context_detach; nv_parent(chan)->object_attach = nv50_fifo_object_attach; nv_parent(chan)->object_detach = nv50_fifo_object_detach; ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, &chan->ramht); if (ret) return ret; ioffset = args->ioffset; ilength = log2i(args->ilength / 8); nv_wo32(base->ramfc, 0x3c, 0x403f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); nv_wo32(base->ramfc, 0x60, 0x7fffffff); nv_wo32(base->ramfc, 0x78, 0x00000000); nv_wo32(base->ramfc, 0x7c, 0x30000001); nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | (4 << 24) /* SEARCH_FULL */ | (chan->ramht->base.node->offset >> 4)); bar->flush(bar); return 0; } void nv50_fifo_chan_dtor(struct nouveau_object *object) { struct nv50_fifo_chan *chan = (void *)object; nouveau_ramht_ref(NULL, &chan->ramht); nouveau_fifo_channel_destroy(&chan->base); } static int nv50_fifo_chan_init(struct nouveau_object *object) { struct nv50_fifo_priv *priv = (void *)object->engine; struct nv50_fifo_base *base = (void *)object->parent; struct nv50_fifo_chan *chan = (void *)object; struct nouveau_gpuobj *ramfc = base->ramfc; u32 chid = chan->base.chid; int ret; ret = nouveau_fifo_channel_init(&chan->base); if (ret) return ret; nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12); nv50_fifo_playlist_update(priv); return 0; } int nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend) { struct nv50_fifo_priv *priv = (void *)object->engine; struct nv50_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; /* remove channel from playlist, fifo will unload context */ nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000); nv50_fifo_playlist_update(priv); nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000); return nouveau_fifo_channel_fini(&chan->base, suspend); } static struct nouveau_ofuncs nv50_fifo_ofuncs_dma = { .ctor = nv50_fifo_chan_ctor_dma, .dtor = nv50_fifo_chan_dtor, .init = nv50_fifo_chan_init, .fini = nv50_fifo_chan_fini, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, }; static struct nouveau_ofuncs nv50_fifo_ofuncs_ind = { .ctor = nv50_fifo_chan_ctor_ind, .dtor = nv50_fifo_chan_dtor, .init = nv50_fifo_chan_init, .fini = nv50_fifo_chan_fini, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, }; static struct nouveau_oclass nv50_fifo_sclass[] = { { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma }, { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind }, {} }; /******************************************************************************* * FIFO context - basically just the instmem reserved for the channel ******************************************************************************/ static int nv50_fifo_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_fifo_base *base; int ret; ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000, 0x1000, NVOBJ_FLAG_HEAP, &base); *pobject = nv_object(base); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0, NVOBJ_FLAG_ZERO_ALLOC, &base->eng); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0, &base->pgd); if (ret) return ret; ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); if (ret) return ret; return 0; } void nv50_fifo_context_dtor(struct nouveau_object *object) { struct nv50_fifo_base *base = (void *)object; nouveau_vm_ref(NULL, &base->vm, base->pgd); nouveau_gpuobj_ref(NULL, &base->pgd); nouveau_gpuobj_ref(NULL, &base->eng); nouveau_gpuobj_ref(NULL, &base->ramfc); nouveau_gpuobj_ref(NULL, &base->cache); nouveau_fifo_context_destroy(&base->base); } static struct nouveau_oclass nv50_fifo_cclass = { .handle = NV_ENGCTX(FIFO, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_fifo_context_ctor, .dtor = nv50_fifo_context_dtor, .init = _nouveau_fifo_context_init, .fini = _nouveau_fifo_context_fini, .rd32 = _nouveau_fifo_context_rd32, .wr32 = _nouveau_fifo_context_wr32, }, }; /******************************************************************************* * PFIFO engine ******************************************************************************/ static int nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_fifo_priv *priv; int ret; ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv); *pobject = nv_object(priv); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, &priv->playlist[0]); if (ret) return ret; ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, &priv->playlist[1]); if (ret) return ret; nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nv04_fifo_intr; nv_engine(priv)->cclass = &nv50_fifo_cclass; nv_engine(priv)->sclass = nv50_fifo_sclass; return 0; } void nv50_fifo_dtor(struct nouveau_object *object) { struct nv50_fifo_priv *priv = (void *)object; nouveau_gpuobj_ref(NULL, &priv->playlist[1]); nouveau_gpuobj_ref(NULL, &priv->playlist[0]); nouveau_fifo_destroy(&priv->base); } int nv50_fifo_init(struct nouveau_object *object) { struct nv50_fifo_priv *priv = (void *)object; int ret, i; ret = nouveau_fifo_init(&priv->base); if (ret) return ret; nv_mask(priv, 0x000200, 0x00000100, 0x00000000); nv_mask(priv, 0x000200, 0x00000100, 0x00000100); nv_wr32(priv, 0x00250c, 0x6f3cfc34); nv_wr32(priv, 0x002044, 0x01003fff); nv_wr32(priv, 0x002100, 0xffffffff); nv_wr32(priv, 0x002140, 0xbfffffff); for (i = 0; i < 128; i++) nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); nv50_fifo_playlist_update_locked(priv); nv_wr32(priv, 0x003200, 0x00000001); nv_wr32(priv, 0x003250, 0x00000001); nv_wr32(priv, 0x002500, 0x00000001); return 0; } struct nouveau_oclass nv50_fifo_oclass = { .handle = NV_ENGINE(FIFO, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_fifo_ctor, .dtor = nv50_fifo_dtor, .init = nv50_fifo_init, .fini = _nouveau_fifo_fini, }, };
gpl-2.0
wang701/nexus_9_flounder_kernel_src
drivers/gpu/drm/nouveau/core/subdev/timer/base.c
2375
2489
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "subdev/timer.h" bool nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) { struct nouveau_timer *ptimer = nouveau_timer(obj); u64 time0; time0 = ptimer->read(ptimer); do { if (nv_iclass(obj, NV_SUBDEV_CLASS)) { if ((nv_rd32(obj, addr) & mask) == data) return true; } else { if ((nv_ro32(obj, addr) & mask) == data) return true; } } while (ptimer->read(ptimer) - time0 < nsec); return false; } bool nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) { struct nouveau_timer *ptimer = nouveau_timer(obj); u64 time0; time0 = ptimer->read(ptimer); do { if (nv_iclass(obj, NV_SUBDEV_CLASS)) { if ((nv_rd32(obj, addr) & mask) != data) return true; } else { if ((nv_ro32(obj, addr) & mask) != data) return true; } } while (ptimer->read(ptimer) - time0 < nsec); return false; } bool nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data) { struct nouveau_timer *ptimer = nouveau_timer(obj); u64 time0; time0 = ptimer->read(ptimer); do { if (func(data) == true) return true; } while (ptimer->read(ptimer) - time0 < nsec); return false; } void nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm) { struct nouveau_timer *ptimer = nouveau_timer(obj); ptimer->alarm(ptimer, nsec, alarm); }
gpl-2.0
Clust3r/P8000-Kernel
tools/perf/builtin-list.c
2375
1508
/* * builtin-list.c * * Builtin list command: list all event types * * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com> * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> */ #include "builtin.h" #include "perf.h" #include "util/parse-events.h" #include "util/cache.h" int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { setup_pager(); if (argc == 1) print_events(NULL, false); else { int i; for (i = 1; i < argc; ++i) { if (i > 2) putchar('\n'); if (strncmp(argv[i], "tracepoint", 10) == 0) print_tracepoint_events(NULL, NULL, false); else if (strcmp(argv[i], "hw") == 0 || strcmp(argv[i], "hardware") == 0) print_events_type(PERF_TYPE_HARDWARE); else if (strcmp(argv[i], "sw") == 0 || strcmp(argv[i], "software") == 0) print_events_type(PERF_TYPE_SOFTWARE); else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) print_hwcache_events(NULL, false); else if (strcmp(argv[i], "--raw-dump") == 0) print_events(NULL, true); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; if (sep == NULL) { print_events(argv[i], false); continue; } sep_idx = sep - argv[i]; s = strdup(argv[i]); if (s == NULL) return -1; s[sep_idx] = '\0'; print_tracepoint_events(s, s + sep_idx + 1, false); free(s); } } } return 0; }
gpl-2.0
ivanmeler/android_kernel_samsung_n7100
tools/perf/util/header.c
2375
28913
#define _FILE_OFFSET_BITS 64 #include <sys/types.h> #include <byteswap.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <linux/list.h> #include <linux/kernel.h> #include "evlist.h" #include "evsel.h" #include "util.h" #include "header.h" #include "../perf.h" #include "trace-event.h" #include "session.h" #include "symbol.h" #include "debug.h" static bool no_buildid_cache = false; static int event_count; static struct perf_trace_event_type *events; int perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) pr_warning("Event %s will be truncated\n", name); if (!events) { events = malloc(sizeof(struct perf_trace_event_type)); if (events == NULL) return -ENOMEM; } else { struct perf_trace_event_type *nevents; nevents = realloc(events, (event_count + 1) * sizeof(*events)); if (nevents == NULL) return -ENOMEM; events = nevents; } memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); events[event_count].event_id = id; strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); event_count++; return 0; } char *perf_header__find_event(u64 id) { int i; for (i = 0 ; i < event_count; i++) { if (events[i].event_id == id) return events[i].name; } return NULL; } static const char *__perf_magic = "PERFFILE"; #define PERF_MAGIC (*(u64 *)__perf_magic) struct perf_file_attr { struct perf_event_attr attr; struct perf_file_section ids; }; void perf_header__set_feat(struct perf_header *header, int feat) { set_bit(feat, header->adds_features); } void perf_header__clear_feat(struct perf_header *header, int feat) { clear_bit(feat, header->adds_features); } bool perf_header__has_feat(const struct perf_header *header, int feat) { return test_bit(feat, header->adds_features); } static int do_write(int fd, const void *buf, size_t size) { while (size) { int ret = write(fd, buf, size); if (ret < 0) return -errno; size -= ret; buf += ret; } return 0; } #define NAME_ALIGN 64 static int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) { static const char zero_buf[NAME_ALIGN]; int err = do_write(fd, bf, count); if (!err) err = do_write(fd, zero_buf, count_aligned - count); return err; } #define dsos__for_each_with_build_id(pos, head) \ list_for_each_entry(pos, head, node) \ if (!pos->has_build_id) \ continue; \ else static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, u16 misc, int fd) { struct dso *pos; dsos__for_each_with_build_id(pos, head) { int err; struct build_id_event b; size_t len; if (!pos->hit) continue; len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); b.pid = pid; b.header.misc = misc; b.header.size = sizeof(b) + len; err = do_write(fd, &b, sizeof(b)); if (err < 0) return err; err = write_padded(fd, pos->long_name, pos->long_name_len + 1, len); if (err < 0) return err; } return 0; } static int machine__write_buildid_table(struct machine *machine, int fd) { int err; u16 kmisc = PERF_RECORD_MISC_KERNEL, umisc = PERF_RECORD_MISC_USER; if (!machine__is_host(machine)) { kmisc = PERF_RECORD_MISC_GUEST_KERNEL; umisc = PERF_RECORD_MISC_GUEST_USER; } err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, kmisc, fd); if (err == 0) err = __dsos__write_buildid_table(&machine->user_dsos, machine->pid, umisc, fd); return err; } static int dsos__write_buildid_table(struct perf_header *header, int fd) { struct perf_session *session = container_of(header, struct perf_session, header); struct rb_node *nd; int err = machine__write_buildid_table(&session->host_machine, fd); if (err) return err; for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); err = machine__write_buildid_table(pos, fd); if (err) break; } return err; } int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms) { const size_t size = PATH_MAX; char *realname, *filename = malloc(size), *linkname = malloc(size), *targetname; int len, err = -1; if (is_kallsyms) { if (symbol_conf.kptr_restrict) { pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); return 0; } realname = (char *)name; } else realname = realpath(name, NULL); if (realname == NULL || filename == NULL || linkname == NULL) goto out_free; len = snprintf(filename, size, "%s%s%s", debugdir, is_kallsyms ? "/" : "", realname); if (mkdir_p(filename, 0755)) goto out_free; snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); if (access(filename, F_OK)) { if (is_kallsyms) { if (copyfile("/proc/kallsyms", filename)) goto out_free; } else if (link(realname, filename) && copyfile(name, filename)) goto out_free; } len = snprintf(linkname, size, "%s/.build-id/%.2s", debugdir, sbuild_id); if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) goto out_free; snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); targetname = filename + strlen(debugdir) - 5; memcpy(targetname, "../..", 5); if (symlink(targetname, linkname) == 0) err = 0; out_free: if (!is_kallsyms) free(realname); free(filename); free(linkname); return err; } static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, const char *name, const char *debugdir, bool is_kallsyms) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; build_id__sprintf(build_id, build_id_size, sbuild_id); return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); } int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) { const size_t size = PATH_MAX; char *filename = malloc(size), *linkname = malloc(size); int err = -1; if (filename == NULL || linkname == NULL) goto out_free; snprintf(linkname, size, "%s/.build-id/%.2s/%s", debugdir, sbuild_id, sbuild_id + 2); if (access(linkname, F_OK)) goto out_free; if (readlink(linkname, filename, size) < 0) goto out_free; if (unlink(linkname)) goto out_free; /* * Since the link is relative, we must make it absolute: */ snprintf(linkname, size, "%s/.build-id/%.2s/%s", debugdir, sbuild_id, filename); if (unlink(linkname)) goto out_free; err = 0; out_free: free(filename); free(linkname); return err; } static int dso__cache_build_id(struct dso *dso, const char *debugdir) { bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), dso->long_name, debugdir, is_kallsyms); } static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) { struct dso *pos; int err = 0; dsos__for_each_with_build_id(pos, head) if (dso__cache_build_id(pos, debugdir)) err = -1; return err; } static int machine__cache_build_ids(struct machine *machine, const char *debugdir) { int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); return ret; } static int perf_session__cache_build_ids(struct perf_session *session) { struct rb_node *nd; int ret; char debugdir[PATH_MAX]; snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) return -1; ret = machine__cache_build_ids(&session->host_machine, debugdir); for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__cache_build_ids(pos, debugdir); } return ret ? -1 : 0; } static bool machine__read_build_ids(struct machine *machine, bool with_hits) { bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); return ret; } static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) { struct rb_node *nd; bool ret = machine__read_build_ids(&session->host_machine, with_hits); for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__read_build_ids(pos, with_hits); } return ret; } static int perf_header__adds_write(struct perf_header *header, struct perf_evlist *evlist, int fd) { int nr_sections; struct perf_session *session; struct perf_file_section *feat_sec; int sec_size; u64 sec_start; int idx = 0, err; session = container_of(header, struct perf_session, header); if (perf_header__has_feat(header, HEADER_BUILD_ID && !perf_session__read_build_ids(session, true))) perf_header__clear_feat(header, HEADER_BUILD_ID); nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; feat_sec = calloc(sizeof(*feat_sec), nr_sections); if (feat_sec == NULL) return -ENOMEM; sec_size = sizeof(*feat_sec) * nr_sections; sec_start = header->data_offset + header->data_size; lseek(fd, sec_start + sec_size, SEEK_SET); if (perf_header__has_feat(header, HEADER_TRACE_INFO)) { struct perf_file_section *trace_sec; trace_sec = &feat_sec[idx++]; /* Write trace info */ trace_sec->offset = lseek(fd, 0, SEEK_CUR); read_tracing_data(fd, &evlist->entries); trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; } if (perf_header__has_feat(header, HEADER_BUILD_ID)) { struct perf_file_section *buildid_sec; buildid_sec = &feat_sec[idx++]; /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); err = dsos__write_buildid_table(header, fd); if (err < 0) { pr_debug("failed to write buildid table\n"); goto out_free; } buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; if (!no_buildid_cache) perf_session__cache_build_ids(session); } lseek(fd, sec_start, SEEK_SET); err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); out_free: free(feat_sec); return err; } int perf_header__write_pipe(int fd) { struct perf_pipe_file_header f_header; int err; f_header = (struct perf_pipe_file_header){ .magic = PERF_MAGIC, .size = sizeof(f_header), }; err = do_write(fd, &f_header, sizeof(f_header)); if (err < 0) { pr_debug("failed to write perf pipe header\n"); return err; } return 0; } int perf_session__write_header(struct perf_session *session, struct perf_evlist *evlist, int fd, bool at_exit) { struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header *header = &session->header; struct perf_evsel *attr, *pair = NULL; int err; lseek(fd, sizeof(f_header), SEEK_SET); if (session->evlist != evlist) pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); list_for_each_entry(attr, &evlist->entries, node) { attr->id_offset = lseek(fd, 0, SEEK_CUR); err = do_write(fd, attr->id, attr->ids * sizeof(u64)); if (err < 0) { out_err_write: pr_debug("failed to write perf header\n"); return err; } if (session->evlist != evlist) { err = do_write(fd, pair->id, pair->ids * sizeof(u64)); if (err < 0) goto out_err_write; attr->ids += pair->ids; pair = list_entry(pair->node.next, struct perf_evsel, node); } } header->attr_offset = lseek(fd, 0, SEEK_CUR); list_for_each_entry(attr, &evlist->entries, node) { f_attr = (struct perf_file_attr){ .attr = attr->attr, .ids = { .offset = attr->id_offset, .size = attr->ids * sizeof(u64), } }; err = do_write(fd, &f_attr, sizeof(f_attr)); if (err < 0) { pr_debug("failed to write perf header attribute\n"); return err; } } header->event_offset = lseek(fd, 0, SEEK_CUR); header->event_size = event_count * sizeof(struct perf_trace_event_type); if (events) { err = do_write(fd, events, header->event_size); if (err < 0) { pr_debug("failed to write perf header events\n"); return err; } } header->data_offset = lseek(fd, 0, SEEK_CUR); if (at_exit) { err = perf_header__adds_write(header, evlist, fd); if (err < 0) return err; } f_header = (struct perf_file_header){ .magic = PERF_MAGIC, .size = sizeof(f_header), .attr_size = sizeof(f_attr), .attrs = { .offset = header->attr_offset, .size = evlist->nr_entries * sizeof(f_attr), }, .data = { .offset = header->data_offset, .size = header->data_size, }, .event_types = { .offset = header->event_offset, .size = header->event_size, }, }; memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); lseek(fd, 0, SEEK_SET); err = do_write(fd, &f_header, sizeof(f_header)); if (err < 0) { pr_debug("failed to write perf header\n"); return err; } lseek(fd, header->data_offset + header->data_size, SEEK_SET); header->frozen = 1; return 0; } static int perf_header__getbuffer64(struct perf_header *header, int fd, void *buf, size_t size) { if (readn(fd, buf, size) <= 0) return -1; if (header->needs_swap) mem_bswap_64(buf, size); return 0; } int perf_header__process_sections(struct perf_header *header, int fd, int (*process)(struct perf_file_section *section, struct perf_header *ph, int feat, int fd)) { struct perf_file_section *feat_sec; int nr_sections; int sec_size; int idx = 0; int err = -1, feat = 1; nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; feat_sec = calloc(sizeof(*feat_sec), nr_sections); if (!feat_sec) return -1; sec_size = sizeof(*feat_sec) * nr_sections; lseek(fd, header->data_offset + header->data_size, SEEK_SET); if (perf_header__getbuffer64(header, fd, feat_sec, sec_size)) goto out_free; err = 0; while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { if (perf_header__has_feat(header, feat)) { struct perf_file_section *sec = &feat_sec[idx++]; err = process(sec, header, feat, fd); if (err < 0) break; } ++feat; } out_free: free(feat_sec); return err; } int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd) { lseek(fd, 0, SEEK_SET); if (readn(fd, header, sizeof(*header)) <= 0 || memcmp(&header->magic, __perf_magic, sizeof(header->magic))) return -1; if (header->attr_size != sizeof(struct perf_file_attr)) { u64 attr_size = bswap_64(header->attr_size); if (attr_size != sizeof(struct perf_file_attr)) return -1; mem_bswap_64(header, offsetof(struct perf_file_header, adds_features)); ph->needs_swap = true; } if (header->size != sizeof(*header)) { /* Support the previous format */ if (header->size == offsetof(typeof(*header), adds_features)) bitmap_zero(header->adds_features, HEADER_FEAT_BITS); else return -1; } memcpy(&ph->adds_features, &header->adds_features, sizeof(ph->adds_features)); /* * FIXME: hack that assumes that if we need swap the perf.data file * may be coming from an arch with a different word-size, ergo different * DEFINE_BITMAP format, investigate more later, but for now its mostly * safe to assume that we have a build-id section. Trace files probably * have several other issues in this realm anyway... */ if (ph->needs_swap) { memset(&ph->adds_features, 0, sizeof(ph->adds_features)); perf_header__set_feat(ph, HEADER_BUILD_ID); } ph->event_offset = header->event_types.offset; ph->event_size = header->event_types.size; ph->data_offset = header->data.offset; ph->data_size = header->data.size; return 0; } static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) { int err = -1; struct list_head *head; struct machine *machine; u16 misc; struct dso *dso; enum dso_kernel_type dso_type; machine = perf_session__findnew_machine(session, bev->pid); if (!machine) goto out; misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; switch (misc) { case PERF_RECORD_MISC_KERNEL: dso_type = DSO_TYPE_KERNEL; head = &machine->kernel_dsos; break; case PERF_RECORD_MISC_GUEST_KERNEL: dso_type = DSO_TYPE_GUEST_KERNEL; head = &machine->kernel_dsos; break; case PERF_RECORD_MISC_USER: case PERF_RECORD_MISC_GUEST_USER: dso_type = DSO_TYPE_USER; head = &machine->user_dsos; break; default: goto out; } dso = __dsos__findnew(head, filename); if (dso != NULL) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; dso__set_build_id(dso, &bev->build_id); if (filename[0] == '[') dso->kernel = dso_type; build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); pr_debug("build id event received for %s: %s\n", dso->long_name, sbuild_id); } err = 0; out: return err; } static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, int input, u64 offset, u64 size) { struct perf_session *session = container_of(header, struct perf_session, header); struct { struct perf_event_header header; u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; char filename[0]; } old_bev; struct build_id_event bev; char filename[PATH_MAX]; u64 limit = offset + size; while (offset < limit) { ssize_t len; if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) return -1; if (header->needs_swap) perf_event_header__bswap(&old_bev.header); len = old_bev.header.size - sizeof(old_bev); if (read(input, filename, len) != len) return -1; bev.header = old_bev.header; bev.pid = 0; memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); __event_process_build_id(&bev, filename, session); offset += bev.header.size; } return 0; } static int perf_header__read_build_ids(struct perf_header *header, int input, u64 offset, u64 size) { struct perf_session *session = container_of(header, struct perf_session, header); struct build_id_event bev; char filename[PATH_MAX]; u64 limit = offset + size, orig_offset = offset; int err = -1; while (offset < limit) { ssize_t len; if (read(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; if (header->needs_swap) perf_event_header__bswap(&bev.header); len = bev.header.size - sizeof(bev); if (read(input, filename, len) != len) goto out; /* * The a1645ce1 changeset: * * "perf: 'perf kvm' tool for monitoring guest performance from host" * * Added a field to struct build_id_event that broke the file * format. * * Since the kernel build-id is the first entry, process the * table using the old format if the well known * '[kernel.kallsyms]' string for the kernel build-id has the * first 4 characters chopped off (where the pid_t sits). */ if (memcmp(filename, "nel.kallsyms]", 13) == 0) { if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) return -1; return perf_header__read_build_ids_abi_quirk(header, input, offset, size); } __event_process_build_id(&bev, filename, session); offset += bev.header.size; } err = 0; out: return err; } static int perf_file_section__process(struct perf_file_section *section, struct perf_header *ph, int feat, int fd) { if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { pr_debug("Failed to lseek to %" PRIu64 " offset for feature " "%d, continuing...\n", section->offset, feat); return 0; } switch (feat) { case HEADER_TRACE_INFO: trace_report(fd, false); break; case HEADER_BUILD_ID: if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); break; default: pr_debug("unknown feature %d, continuing...\n", feat); } return 0; } static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, struct perf_header *ph, int fd, bool repipe) { if (readn(fd, header, sizeof(*header)) <= 0 || memcmp(&header->magic, __perf_magic, sizeof(header->magic))) return -1; if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) return -1; if (header->size != sizeof(*header)) { u64 size = bswap_64(header->size); if (size != sizeof(*header)) return -1; ph->needs_swap = true; } return 0; } static int perf_header__read_pipe(struct perf_session *session, int fd) { struct perf_header *header = &session->header; struct perf_pipe_file_header f_header; if (perf_file_header__read_pipe(&f_header, header, fd, session->repipe) < 0) { pr_debug("incompatible file format\n"); return -EINVAL; } session->fd = fd; return 0; } int perf_session__read_header(struct perf_session *session, int fd) { struct perf_header *header = &session->header; struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; int nr_attrs, nr_ids, i, j; session->evlist = perf_evlist__new(NULL, NULL); if (session->evlist == NULL) return -ENOMEM; if (session->fd_pipe) return perf_header__read_pipe(session, fd); if (perf_file_header__read(&f_header, header, fd) < 0) { pr_debug("incompatible file format\n"); return -EINVAL; } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); for (i = 0; i < nr_attrs; i++) { struct perf_evsel *evsel; off_t tmp; if (readn(fd, &f_attr, sizeof(f_attr)) <= 0) goto out_errno; if (header->needs_swap) perf_event__attr_swap(&f_attr.attr); tmp = lseek(fd, 0, SEEK_CUR); evsel = perf_evsel__new(&f_attr.attr, i); if (evsel == NULL) goto out_delete_evlist; /* * Do it before so that if perf_evsel__alloc_id fails, this * entry gets purged too at perf_evlist__delete(). */ perf_evlist__add(session->evlist, evsel); nr_ids = f_attr.ids.size / sizeof(u64); /* * We don't have the cpu and thread maps on the header, so * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ if (perf_evsel__alloc_id(evsel, 1, nr_ids)) goto out_delete_evlist; lseek(fd, f_attr.ids.offset, SEEK_SET); for (j = 0; j < nr_ids; j++) { if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) goto out_errno; perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); } lseek(fd, tmp, SEEK_SET); } if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); events = malloc(f_header.event_types.size); if (events == NULL) return -ENOMEM; if (perf_header__getbuffer64(header, fd, events, f_header.event_types.size)) goto out_errno; event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } perf_header__process_sections(header, fd, perf_file_section__process); lseek(fd, header->data_offset, SEEK_SET); header->frozen = 1; return 0; out_errno: return -errno; out_delete_evlist: perf_evlist__delete(session->evlist); session->evlist = NULL; return -ENOMEM; } int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, perf_event__handler_t process, struct perf_session *session) { union perf_event *ev; size_t size; int err; size = sizeof(struct perf_event_attr); size = ALIGN(size, sizeof(u64)); size += sizeof(struct perf_event_header); size += ids * sizeof(u64); ev = malloc(size); if (ev == NULL) return -ENOMEM; ev->attr.attr = *attr; memcpy(ev->attr.id, id, ids * sizeof(u64)); ev->attr.header.type = PERF_RECORD_HEADER_ATTR; ev->attr.header.size = size; err = process(ev, NULL, session); free(ev); return err; } int perf_session__synthesize_attrs(struct perf_session *session, perf_event__handler_t process) { struct perf_evsel *attr; int err = 0; list_for_each_entry(attr, &session->evlist->entries, node) { err = perf_event__synthesize_attr(&attr->attr, attr->ids, attr->id, process, session); if (err) { pr_debug("failed to create perf header attribute\n"); return err; } } return err; } int perf_event__process_attr(union perf_event *event, struct perf_session *session) { unsigned int i, ids, n_ids; struct perf_evsel *evsel; if (session->evlist == NULL) { session->evlist = perf_evlist__new(NULL, NULL); if (session->evlist == NULL) return -ENOMEM; } evsel = perf_evsel__new(&event->attr.attr, session->evlist->nr_entries); if (evsel == NULL) return -ENOMEM; perf_evlist__add(session->evlist, evsel); ids = event->header.size; ids -= (void *)&event->attr.id - (void *)event; n_ids = ids / sizeof(u64); /* * We don't have the cpu and thread maps on the header, so * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ if (perf_evsel__alloc_id(evsel, 1, n_ids)) return -ENOMEM; for (i = 0; i < n_ids; i++) { perf_evlist__id_add(session->evlist, evsel, 0, i, event->attr.id[i]); } perf_session__update_sample_type(session); return 0; } int perf_event__synthesize_event_type(u64 event_id, char *name, perf_event__handler_t process, struct perf_session *session) { union perf_event ev; size_t size = 0; int err = 0; memset(&ev, 0, sizeof(ev)); ev.event_type.event_type.event_id = event_id; memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME); strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; size = strlen(name); size = ALIGN(size, sizeof(u64)); ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); err = process(&ev, NULL, session); return err; } int perf_event__synthesize_event_types(perf_event__handler_t process, struct perf_session *session) { struct perf_trace_event_type *type; int i, err = 0; for (i = 0; i < event_count; i++) { type = &events[i]; err = perf_event__synthesize_event_type(type->event_id, type->name, process, session); if (err) { pr_debug("failed to create perf header event type\n"); return err; } } return err; } int perf_event__process_event_type(union perf_event *event, struct perf_session *session __unused) { if (perf_header__push_event(event->event_type.event_type.event_id, event->event_type.event_type.name) < 0) return -ENOMEM; return 0; } int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, perf_event__handler_t process, struct perf_session *session __unused) { union perf_event ev; ssize_t size = 0, aligned_size = 0, padding; int err __used = 0; memset(&ev, 0, sizeof(ev)); ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; size = read_tracing_data_size(fd, &evlist->entries); if (size <= 0) return size; aligned_size = ALIGN(size, sizeof(u64)); padding = aligned_size - size; ev.tracing_data.header.size = sizeof(ev.tracing_data); ev.tracing_data.size = aligned_size; process(&ev, NULL, session); err = read_tracing_data(fd, &evlist->entries); write_padded(fd, NULL, 0, padding); return aligned_size; } int perf_event__process_tracing_data(union perf_event *event, struct perf_session *session) { ssize_t size_read, padding, size = event->tracing_data.size; off_t offset = lseek(session->fd, 0, SEEK_CUR); char buf[BUFSIZ]; /* setup for reading amidst mmap */ lseek(session->fd, offset + sizeof(struct tracing_data_event), SEEK_SET); size_read = trace_report(session->fd, session->repipe); padding = ALIGN(size_read, sizeof(u64)) - size_read; if (read(session->fd, buf, padding) < 0) die("reading input file"); if (session->repipe) { int retw = write(STDOUT_FILENO, buf, padding); if (retw <= 0 || retw != padding) die("repiping tracing data padding"); } if (size_read + padding != size) die("tracing data size mismatch"); return size_read + padding; } int perf_event__synthesize_build_id(struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine, struct perf_session *session) { union perf_event ev; size_t len; int err = 0; if (!pos->hit) return err; memset(&ev, 0, sizeof(ev)); len = pos->long_name_len + 1; len = ALIGN(len, NAME_ALIGN); memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; ev.build_id.header.misc = misc; ev.build_id.pid = machine->pid; ev.build_id.header.size = sizeof(ev.build_id) + len; memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); err = process(&ev, NULL, session); return err; } int perf_event__process_build_id(union perf_event *event, struct perf_session *session) { __event_process_build_id(&event->build_id, event->build_id.filename, session); return 0; } void disable_buildid_cache(void) { no_buildid_cache = true; }
gpl-2.0
Pafcholini/Nadia-kernel-LL-N910F-EUR-LL-OpenSource
tools/perf/builtin-list.c
2375
1508
/* * builtin-list.c * * Builtin list command: list all event types * * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com> * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> */ #include "builtin.h" #include "perf.h" #include "util/parse-events.h" #include "util/cache.h" int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { setup_pager(); if (argc == 1) print_events(NULL, false); else { int i; for (i = 1; i < argc; ++i) { if (i > 2) putchar('\n'); if (strncmp(argv[i], "tracepoint", 10) == 0) print_tracepoint_events(NULL, NULL, false); else if (strcmp(argv[i], "hw") == 0 || strcmp(argv[i], "hardware") == 0) print_events_type(PERF_TYPE_HARDWARE); else if (strcmp(argv[i], "sw") == 0 || strcmp(argv[i], "software") == 0) print_events_type(PERF_TYPE_SOFTWARE); else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) print_hwcache_events(NULL, false); else if (strcmp(argv[i], "--raw-dump") == 0) print_events(NULL, true); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; if (sep == NULL) { print_events(argv[i], false); continue; } sep_idx = sep - argv[i]; s = strdup(argv[i]); if (s == NULL) return -1; s[sep_idx] = '\0'; print_tracepoint_events(s, s + sep_idx + 1, false); free(s); } } } return 0; }
gpl-2.0