repo_name
string
path
string
copies
string
size
string
content
string
license
string
TeamWin/android_kernel_samsung_galaxys2plus-common
drivers/s390/crypto/zcrypt_api.c
2627
32780
/* * linux/drivers/s390/crypto/zcrypt_api.c * * zcrypt 2.1.0 * * Copyright (C) 2001, 2006 IBM Corporation * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/compat.h> #include <linux/slab.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include <linux/hw_random.h> #include "zcrypt_api.h" /* * Module description. */ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " "Copyright 2001, 2006 IBM Corporation"); MODULE_LICENSE("GPL"); static DEFINE_SPINLOCK(zcrypt_device_lock); static LIST_HEAD(zcrypt_device_list); static int zcrypt_device_count = 0; static atomic_t zcrypt_open_count = ATOMIC_INIT(0); static int zcrypt_rng_device_add(void); static void zcrypt_rng_device_remove(void); /* * Device attributes common for all crypto devices. */ static ssize_t zcrypt_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zcrypt_device *zdev = to_ap_dev(dev)->private; return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); } static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); static ssize_t zcrypt_online_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zcrypt_device *zdev = to_ap_dev(dev)->private; return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); } static ssize_t zcrypt_online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct zcrypt_device *zdev = to_ap_dev(dev)->private; int online; if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; zdev->online = online; if (!online) ap_flush_queue(zdev->ap_dev); return count; } static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); static struct attribute * zcrypt_device_attrs[] = { &dev_attr_type.attr, &dev_attr_online.attr, NULL, }; static struct attribute_group zcrypt_device_attr_group = { .attrs = zcrypt_device_attrs, }; /** * __zcrypt_increase_preference(): Increase preference of a crypto device. * @zdev: Pointer the crypto device * * Move the device towards the head of the device list. * Need to be called while holding the zcrypt device list lock. * Note: cards with speed_rating of 0 are kept at the end of the list. */ static void __zcrypt_increase_preference(struct zcrypt_device *zdev) { struct zcrypt_device *tmp; struct list_head *l; if (zdev->speed_rating == 0) return; for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { tmp = list_entry(l, struct zcrypt_device, list); if ((tmp->request_count + 1) * tmp->speed_rating <= (zdev->request_count + 1) * zdev->speed_rating && tmp->speed_rating != 0) break; } if (l == zdev->list.prev) return; /* Move zdev behind l */ list_move(&zdev->list, l); } /** * __zcrypt_decrease_preference(): Decrease preference of a crypto device. * @zdev: Pointer to a crypto device. * * Move the device towards the tail of the device list. * Need to be called while holding the zcrypt device list lock. * Note: cards with speed_rating of 0 are kept at the end of the list. */ static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) { struct zcrypt_device *tmp; struct list_head *l; if (zdev->speed_rating == 0) return; for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { tmp = list_entry(l, struct zcrypt_device, list); if ((tmp->request_count + 1) * tmp->speed_rating > (zdev->request_count + 1) * zdev->speed_rating || tmp->speed_rating == 0) break; } if (l == zdev->list.next) return; /* Move zdev before l */ list_move_tail(&zdev->list, l); } static void zcrypt_device_release(struct kref *kref) { struct zcrypt_device *zdev = container_of(kref, struct zcrypt_device, refcount); zcrypt_device_free(zdev); } void zcrypt_device_get(struct zcrypt_device *zdev) { kref_get(&zdev->refcount); } EXPORT_SYMBOL(zcrypt_device_get); int zcrypt_device_put(struct zcrypt_device *zdev) { return kref_put(&zdev->refcount, zcrypt_device_release); } EXPORT_SYMBOL(zcrypt_device_put); struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) { struct zcrypt_device *zdev; zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); if (!zdev) return NULL; zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); if (!zdev->reply.message) goto out_free; zdev->reply.length = max_response_size; spin_lock_init(&zdev->lock); INIT_LIST_HEAD(&zdev->list); return zdev; out_free: kfree(zdev); return NULL; } EXPORT_SYMBOL(zcrypt_device_alloc); void zcrypt_device_free(struct zcrypt_device *zdev) { kfree(zdev->reply.message); kfree(zdev); } EXPORT_SYMBOL(zcrypt_device_free); /** * zcrypt_device_register() - Register a crypto device. * @zdev: Pointer to a crypto device * * Register a crypto device. Returns 0 if successful. */ int zcrypt_device_register(struct zcrypt_device *zdev) { int rc; rc = sysfs_create_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); if (rc) goto out; get_device(&zdev->ap_dev->device); kref_init(&zdev->refcount); spin_lock_bh(&zcrypt_device_lock); zdev->online = 1; /* New devices are online by default. */ list_add_tail(&zdev->list, &zcrypt_device_list); __zcrypt_increase_preference(zdev); zcrypt_device_count++; spin_unlock_bh(&zcrypt_device_lock); if (zdev->ops->rng) { rc = zcrypt_rng_device_add(); if (rc) goto out_unregister; } return 0; out_unregister: spin_lock_bh(&zcrypt_device_lock); zcrypt_device_count--; list_del_init(&zdev->list); spin_unlock_bh(&zcrypt_device_lock); sysfs_remove_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); out: return rc; } EXPORT_SYMBOL(zcrypt_device_register); /** * zcrypt_device_unregister(): Unregister a crypto device. * @zdev: Pointer to crypto device * * Unregister a crypto device. */ void zcrypt_device_unregister(struct zcrypt_device *zdev) { if (zdev->ops->rng) zcrypt_rng_device_remove(); spin_lock_bh(&zcrypt_device_lock); zcrypt_device_count--; list_del_init(&zdev->list); spin_unlock_bh(&zcrypt_device_lock); sysfs_remove_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); } EXPORT_SYMBOL(zcrypt_device_unregister); /** * zcrypt_read (): Not supported beyond zcrypt 1.3.1. * * This function is not supported beyond zcrypt 1.3.1. */ static ssize_t zcrypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { return -EPERM; } /** * zcrypt_write(): Not allowed. * * Write is is not allowed */ static ssize_t zcrypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { return -EPERM; } /** * zcrypt_open(): Count number of users. * * Device open function to count number of users. */ static int zcrypt_open(struct inode *inode, struct file *filp) { atomic_inc(&zcrypt_open_count); return nonseekable_open(inode, filp); } /** * zcrypt_release(): Count number of users. * * Device close function to count number of users. */ static int zcrypt_release(struct inode *inode, struct file *filp) { atomic_dec(&zcrypt_open_count); return 0; } /* * zcrypt ioctls. */ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) { struct zcrypt_device *zdev; int rc; if (mex->outputdatalength < mex->inputdatalength) return -EINVAL; /* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case */ mex->outputdatalength = mex->inputdatalength; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->rsa_modexpo || zdev->min_mod_size > mex->inputdatalength || zdev->max_mod_size < mex->inputdatalength) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->rsa_modexpo(zdev, mex); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV; } static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) { struct zcrypt_device *zdev; unsigned long long z1, z2, z3; int rc, copied; if (crt->outputdatalength < crt->inputdatalength || (crt->inputdatalength & 1)) return -EINVAL; /* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case */ crt->outputdatalength = crt->inputdatalength; copied = 0; restart: spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->rsa_modexpo_crt || zdev->min_mod_size > crt->inputdatalength || zdev->max_mod_size < crt->inputdatalength) continue; if (zdev->short_crt && crt->inputdatalength > 240) { /* * Check inputdata for leading zeros for cards * that can't handle np_prime, bp_key, or * u_mult_inv > 128 bytes. */ if (copied == 0) { unsigned int len; spin_unlock_bh(&zcrypt_device_lock); /* len is max 256 / 2 - 120 = 8 * For bigger device just assume len of leading * 0s is 8 as stated in the requirements for * ica_rsa_modexpo_crt struct in zcrypt.h. */ if (crt->inputdatalength <= 256) len = crt->inputdatalength / 2 - 120; else len = 8; if (len > sizeof(z1)) return -EFAULT; z1 = z2 = z3 = 0; if (copy_from_user(&z1, crt->np_prime, len) || copy_from_user(&z2, crt->bp_key, len) || copy_from_user(&z3, crt->u_mult_inv, len)) return -EFAULT; z1 = z2 = z3 = 0; copied = 1; /* * We have to restart device lookup - * the device list may have changed by now. */ goto restart; } if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) /* The device can't handle this request. */ continue; } zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->rsa_modexpo_crt(zdev, crt); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV; } static long zcrypt_send_cprb(struct ica_xcRB *xcRB) { struct zcrypt_device *zdev; int rc; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->send_cprb || (xcRB->user_defined != AUTOSELECT && AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) ) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->send_cprb(zdev, xcRB); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV; } static long zcrypt_rng(char *buffer) { struct zcrypt_device *zdev; int rc; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->rng) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->rng(zdev, buffer); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV; } static void zcrypt_status_mask(char status[AP_DEVICES]) { struct zcrypt_device *zdev; memset(status, 0, sizeof(char) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) status[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->online ? zdev->user_space_type : 0x0d; spin_unlock_bh(&zcrypt_device_lock); } static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) { struct zcrypt_device *zdev; memset(qdepth, 0, sizeof(char) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->ap_dev->pendingq_count + zdev->ap_dev->requestq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); } static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) { struct zcrypt_device *zdev; memset(reqcnt, 0, sizeof(int) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->ap_dev->total_request_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); } static int zcrypt_pendingq_count(void) { struct zcrypt_device *zdev; int pendingq_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); pendingq_count += zdev->ap_dev->pendingq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); return pendingq_count; } static int zcrypt_requestq_count(void) { struct zcrypt_device *zdev; int requestq_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); requestq_count += zdev->ap_dev->requestq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); return requestq_count; } static int zcrypt_count_type(int type) { struct zcrypt_device *zdev; int device_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) if (zdev->user_space_type == type) device_count++; spin_unlock_bh(&zcrypt_device_lock); return device_count; } /** * zcrypt_ica_status(): Old, depracted combi status call. * * Old, deprecated combi status call. */ static long zcrypt_ica_status(struct file *filp, unsigned long arg) { struct ica_z90_status *pstat; int ret; pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); if (!pstat) return -ENOMEM; pstat->totalcount = zcrypt_device_count; pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); pstat->requestqWaitCount = zcrypt_requestq_count(); pstat->pendingqWaitCount = zcrypt_pendingq_count(); pstat->totalOpenCount = atomic_read(&zcrypt_open_count); pstat->cryptoDomain = ap_domain_index; zcrypt_status_mask(pstat->status); zcrypt_qdepth_mask(pstat->qdepth); ret = 0; if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) ret = -EFAULT; kfree(pstat); return ret; } static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int rc; switch (cmd) { case ICARSAMODEXPO: { struct ica_rsa_modexpo __user *umex = (void __user *) arg; struct ica_rsa_modexpo mex; if (copy_from_user(&mex, umex, sizeof(mex))) return -EFAULT; do { rc = zcrypt_rsa_modexpo(&mex); } while (rc == -EAGAIN); if (rc) return rc; return put_user(mex.outputdatalength, &umex->outputdatalength); } case ICARSACRT: { struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; struct ica_rsa_modexpo_crt crt; if (copy_from_user(&crt, ucrt, sizeof(crt))) return -EFAULT; do { rc = zcrypt_rsa_crt(&crt); } while (rc == -EAGAIN); if (rc) return rc; return put_user(crt.outputdatalength, &ucrt->outputdatalength); } case ZSECSENDCPRB: { struct ica_xcRB __user *uxcRB = (void __user *) arg; struct ica_xcRB xcRB; if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) return -EFAULT; do { rc = zcrypt_send_cprb(&xcRB); } while (rc == -EAGAIN); if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) return -EFAULT; return rc; } case Z90STAT_STATUS_MASK: { char status[AP_DEVICES]; zcrypt_status_mask(status); if (copy_to_user((char __user *) arg, status, sizeof(char) * AP_DEVICES)) return -EFAULT; return 0; } case Z90STAT_QDEPTH_MASK: { char qdepth[AP_DEVICES]; zcrypt_qdepth_mask(qdepth); if (copy_to_user((char __user *) arg, qdepth, sizeof(char) * AP_DEVICES)) return -EFAULT; return 0; } case Z90STAT_PERDEV_REQCNT: { int reqcnt[AP_DEVICES]; zcrypt_perdev_reqcnt(reqcnt); if (copy_to_user((int __user *) arg, reqcnt, sizeof(int) * AP_DEVICES)) return -EFAULT; return 0; } case Z90STAT_REQUESTQ_COUNT: return put_user(zcrypt_requestq_count(), (int __user *) arg); case Z90STAT_PENDINGQ_COUNT: return put_user(zcrypt_pendingq_count(), (int __user *) arg); case Z90STAT_TOTALOPEN_COUNT: return put_user(atomic_read(&zcrypt_open_count), (int __user *) arg); case Z90STAT_DOMAIN_INDEX: return put_user(ap_domain_index, (int __user *) arg); /* * Deprecated ioctls. Don't add another device count ioctl, * you can count them yourself in the user space with the * output of the Z90STAT_STATUS_MASK ioctl. */ case ICAZ90STATUS: return zcrypt_ica_status(filp, arg); case Z90STAT_TOTALCOUNT: return put_user(zcrypt_device_count, (int __user *) arg); case Z90STAT_PCICACOUNT: return put_user(zcrypt_count_type(ZCRYPT_PCICA), (int __user *) arg); case Z90STAT_PCICCCOUNT: return put_user(zcrypt_count_type(ZCRYPT_PCICC), (int __user *) arg); case Z90STAT_PCIXCCMCL2COUNT: return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), (int __user *) arg); case Z90STAT_PCIXCCMCL3COUNT: return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), (int __user *) arg); case Z90STAT_PCIXCCCOUNT: return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), (int __user *) arg); case Z90STAT_CEX2CCOUNT: return put_user(zcrypt_count_type(ZCRYPT_CEX2C), (int __user *) arg); case Z90STAT_CEX2ACOUNT: return put_user(zcrypt_count_type(ZCRYPT_CEX2A), (int __user *) arg); default: /* unknown ioctl number */ return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT /* * ioctl32 conversion routines */ struct compat_ica_rsa_modexpo { compat_uptr_t inputdata; unsigned int inputdatalength; compat_uptr_t outputdata; unsigned int outputdatalength; compat_uptr_t b_key; compat_uptr_t n_modulus; }; static long trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); struct compat_ica_rsa_modexpo mex32; struct ica_rsa_modexpo mex64; long rc; if (copy_from_user(&mex32, umex32, sizeof(mex32))) return -EFAULT; mex64.inputdata = compat_ptr(mex32.inputdata); mex64.inputdatalength = mex32.inputdatalength; mex64.outputdata = compat_ptr(mex32.outputdata); mex64.outputdatalength = mex32.outputdatalength; mex64.b_key = compat_ptr(mex32.b_key); mex64.n_modulus = compat_ptr(mex32.n_modulus); do { rc = zcrypt_rsa_modexpo(&mex64); } while (rc == -EAGAIN); if (!rc) rc = put_user(mex64.outputdatalength, &umex32->outputdatalength); return rc; } struct compat_ica_rsa_modexpo_crt { compat_uptr_t inputdata; unsigned int inputdatalength; compat_uptr_t outputdata; unsigned int outputdatalength; compat_uptr_t bp_key; compat_uptr_t bq_key; compat_uptr_t np_prime; compat_uptr_t nq_prime; compat_uptr_t u_mult_inv; }; static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); struct compat_ica_rsa_modexpo_crt crt32; struct ica_rsa_modexpo_crt crt64; long rc; if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) return -EFAULT; crt64.inputdata = compat_ptr(crt32.inputdata); crt64.inputdatalength = crt32.inputdatalength; crt64.outputdata= compat_ptr(crt32.outputdata); crt64.outputdatalength = crt32.outputdatalength; crt64.bp_key = compat_ptr(crt32.bp_key); crt64.bq_key = compat_ptr(crt32.bq_key); crt64.np_prime = compat_ptr(crt32.np_prime); crt64.nq_prime = compat_ptr(crt32.nq_prime); crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); do { rc = zcrypt_rsa_crt(&crt64); } while (rc == -EAGAIN); if (!rc) rc = put_user(crt64.outputdatalength, &ucrt32->outputdatalength); return rc; } struct compat_ica_xcRB { unsigned short agent_ID; unsigned int user_defined; unsigned short request_ID; unsigned int request_control_blk_length; unsigned char padding1[16 - sizeof (compat_uptr_t)]; compat_uptr_t request_control_blk_addr; unsigned int request_data_length; char padding2[16 - sizeof (compat_uptr_t)]; compat_uptr_t request_data_address; unsigned int reply_control_blk_length; char padding3[16 - sizeof (compat_uptr_t)]; compat_uptr_t reply_control_blk_addr; unsigned int reply_data_length; char padding4[16 - sizeof (compat_uptr_t)]; compat_uptr_t reply_data_addr; unsigned short priority_window; unsigned int status; } __attribute__((packed)); static long trans_xcRB32(struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); struct compat_ica_xcRB xcRB32; struct ica_xcRB xcRB64; long rc; if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) return -EFAULT; xcRB64.agent_ID = xcRB32.agent_ID; xcRB64.user_defined = xcRB32.user_defined; xcRB64.request_ID = xcRB32.request_ID; xcRB64.request_control_blk_length = xcRB32.request_control_blk_length; xcRB64.request_control_blk_addr = compat_ptr(xcRB32.request_control_blk_addr); xcRB64.request_data_length = xcRB32.request_data_length; xcRB64.request_data_address = compat_ptr(xcRB32.request_data_address); xcRB64.reply_control_blk_length = xcRB32.reply_control_blk_length; xcRB64.reply_control_blk_addr = compat_ptr(xcRB32.reply_control_blk_addr); xcRB64.reply_data_length = xcRB32.reply_data_length; xcRB64.reply_data_addr = compat_ptr(xcRB32.reply_data_addr); xcRB64.priority_window = xcRB32.priority_window; xcRB64.status = xcRB32.status; do { rc = zcrypt_send_cprb(&xcRB64); } while (rc == -EAGAIN); xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; xcRB32.reply_data_length = xcRB64.reply_data_length; xcRB32.status = xcRB64.status; if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) return -EFAULT; return rc; } static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (cmd == ICARSAMODEXPO) return trans_modexpo32(filp, cmd, arg); if (cmd == ICARSACRT) return trans_modexpo_crt32(filp, cmd, arg); if (cmd == ZSECSENDCPRB) return trans_xcRB32(filp, cmd, arg); return zcrypt_unlocked_ioctl(filp, cmd, arg); } #endif /* * Misc device file operations. */ static const struct file_operations zcrypt_fops = { .owner = THIS_MODULE, .read = zcrypt_read, .write = zcrypt_write, .unlocked_ioctl = zcrypt_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = zcrypt_compat_ioctl, #endif .open = zcrypt_open, .release = zcrypt_release, .llseek = no_llseek, }; /* * Misc device. */ static struct miscdevice zcrypt_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "z90crypt", .fops = &zcrypt_fops, }; /* * Deprecated /proc entry support. */ static struct proc_dir_entry *zcrypt_entry; static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) { int i; for (i = 0; i < len; i++) seq_printf(m, "%01x", (unsigned int) addr[i]); seq_putc(m, ' '); } static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) { int inl, c, cx; seq_printf(m, " "); inl = 0; for (c = 0; c < (len / 16); c++) { sprintcl(m, addr+inl, 16); inl += 16; } cx = len%16; if (cx) { sprintcl(m, addr+inl, cx); inl += cx; } seq_putc(m, '\n'); } static void sprinthx(unsigned char *title, struct seq_file *m, unsigned char *addr, unsigned int len) { int inl, r, rx; seq_printf(m, "\n%s\n", title); inl = 0; for (r = 0; r < (len / 64); r++) { sprintrw(m, addr+inl, 64); inl += 64; } rx = len % 64; if (rx) { sprintrw(m, addr+inl, rx); inl += rx; } seq_putc(m, '\n'); } static void sprinthx4(unsigned char *title, struct seq_file *m, unsigned int *array, unsigned int len) { int r; seq_printf(m, "\n%s\n", title); for (r = 0; r < len; r++) { if ((r % 8) == 0) seq_printf(m, " "); seq_printf(m, "%08X ", array[r]); if ((r % 8) == 7) seq_putc(m, '\n'); } seq_putc(m, '\n'); } static int zcrypt_proc_show(struct seq_file *m, void *v) { char workarea[sizeof(int) * AP_DEVICES]; seq_printf(m, "\nzcrypt version: %d.%d.%d\n", ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); seq_printf(m, "Total device count: %d\n", zcrypt_device_count); seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); seq_printf(m, "PCIXCC MCL2 count: %d\n", zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); seq_printf(m, "PCIXCC MCL3 count: %d\n", zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); seq_printf(m, "Total open handles: %d\n\n", atomic_read(&zcrypt_open_count)); zcrypt_status_mask(workarea); sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", m, workarea, AP_DEVICES); zcrypt_qdepth_mask(workarea); sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); zcrypt_perdev_reqcnt((int *) workarea); sprinthx4("Per-device successfully completed request counts", m, (unsigned int *) workarea, AP_DEVICES); return 0; } static int zcrypt_proc_open(struct inode *inode, struct file *file) { return single_open(file, zcrypt_proc_show, NULL); } static void zcrypt_disable_card(int index) { struct zcrypt_device *zdev; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { zdev->online = 0; ap_flush_queue(zdev->ap_dev); break; } spin_unlock_bh(&zcrypt_device_lock); } static void zcrypt_enable_card(int index) { struct zcrypt_device *zdev; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { zdev->online = 1; break; } spin_unlock_bh(&zcrypt_device_lock); } static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned char *lbuf, *ptr; size_t local_count; int j; if (count <= 0) return 0; #define LBUFSIZE 1200UL lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); if (!lbuf) return 0; local_count = min(LBUFSIZE - 1, count); if (copy_from_user(lbuf, buffer, local_count) != 0) { kfree(lbuf); return -EFAULT; } lbuf[local_count] = '\0'; ptr = strstr(lbuf, "Online devices"); if (!ptr) goto out; ptr = strstr(ptr, "\n"); if (!ptr) goto out; ptr++; if (strstr(ptr, "Waiting work element counts") == NULL) goto out; for (j = 0; j < 64 && *ptr; ptr++) { /* * '0' for no device, '1' for PCICA, '2' for PCICC, * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, * '5' for CEX2C and '6' for CEX2A' * '7' for CEX3C and '8' for CEX3A */ if (*ptr >= '0' && *ptr <= '8') j++; else if (*ptr == 'd' || *ptr == 'D') zcrypt_disable_card(j++); else if (*ptr == 'e' || *ptr == 'E') zcrypt_enable_card(j++); else if (*ptr != ' ' && *ptr != '\t') break; } out: kfree(lbuf); return count; } static const struct file_operations zcrypt_proc_fops = { .owner = THIS_MODULE, .open = zcrypt_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = zcrypt_proc_write, }; static int zcrypt_rng_device_count; static u32 *zcrypt_rng_buffer; static int zcrypt_rng_buffer_index; static DEFINE_MUTEX(zcrypt_rng_mutex); static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) { int rc; /* * We don't need locking here because the RNG API guarantees serialized * read method calls. */ if (zcrypt_rng_buffer_index == 0) { rc = zcrypt_rng((char *) zcrypt_rng_buffer); if (rc < 0) return -EIO; zcrypt_rng_buffer_index = rc / sizeof *data; } *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; return sizeof *data; } static struct hwrng zcrypt_rng_dev = { .name = "zcrypt", .data_read = zcrypt_rng_data_read, }; static int zcrypt_rng_device_add(void) { int rc = 0; mutex_lock(&zcrypt_rng_mutex); if (zcrypt_rng_device_count == 0) { zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); if (!zcrypt_rng_buffer) { rc = -ENOMEM; goto out; } zcrypt_rng_buffer_index = 0; rc = hwrng_register(&zcrypt_rng_dev); if (rc) goto out_free; zcrypt_rng_device_count = 1; } else zcrypt_rng_device_count++; mutex_unlock(&zcrypt_rng_mutex); return 0; out_free: free_page((unsigned long) zcrypt_rng_buffer); out: mutex_unlock(&zcrypt_rng_mutex); return rc; } static void zcrypt_rng_device_remove(void) { mutex_lock(&zcrypt_rng_mutex); zcrypt_rng_device_count--; if (zcrypt_rng_device_count == 0) { hwrng_unregister(&zcrypt_rng_dev); free_page((unsigned long) zcrypt_rng_buffer); } mutex_unlock(&zcrypt_rng_mutex); } /** * zcrypt_api_init(): Module initialization. * * The module initialization code. */ int __init zcrypt_api_init(void) { int rc; /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); if (rc < 0) goto out; /* Set up the proc file system */ zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); if (!zcrypt_entry) { rc = -ENOMEM; goto out_misc; } return 0; out_misc: misc_deregister(&zcrypt_misc_device); out: return rc; } /** * zcrypt_api_exit(): Module termination. * * The module termination code. */ void zcrypt_api_exit(void) { remove_proc_entry("driver/z90crypt", NULL); misc_deregister(&zcrypt_misc_device); } #ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_api_init); module_exit(zcrypt_api_exit); #endif
gpl-2.0
nopy/android_kernel_huawei_u8815
arch/x86/kernel/topology.c
2883
2198
/* * Populate sysfs with topology information * * Written by: Matthew Dobson, IBM Corporation * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <colpatch@us.ibm.com> */ #include <linux/nodemask.h> #include <linux/mmzone.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/cpu.h> static DEFINE_PER_CPU(struct x86_cpu, cpu_devices); #ifdef CONFIG_HOTPLUG_CPU int __ref arch_register_cpu(int num) { /* * CPU0 cannot be offlined due to several * restrictions and assumptions in kernel. This basically * doesn't add a control file, one cannot attempt to offline * BSP. * * Also certain PCI quirks require not to enable hotplug control * for all CPU's. */ if (num) per_cpu(cpu_devices, num).cpu.hotpluggable = 1; return register_cpu(&per_cpu(cpu_devices, num).cpu, num); } EXPORT_SYMBOL(arch_register_cpu); void arch_unregister_cpu(int num) { unregister_cpu(&per_cpu(cpu_devices, num).cpu); } EXPORT_SYMBOL(arch_unregister_cpu); #else /* CONFIG_HOTPLUG_CPU */ static int __init arch_register_cpu(int num) { return register_cpu(&per_cpu(cpu_devices, num).cpu, num); } #endif /* CONFIG_HOTPLUG_CPU */ static int __init topology_init(void) { int i; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) arch_register_cpu(i); return 0; } subsys_initcall(topology_init);
gpl-2.0
ResurrectionRemix-Devices/android_kernel_lge_hammerhead
arch/blackfin/mach-bf527/boards/ezbrd.c
4419
21072
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF526-EZBRD"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PG13, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezbrd_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezbrd_flash_data = { .width = 2, .parts = ezbrd_partitions, .nr_parts = ARRAY_SIZE(ezbrd_partitions), }; static struct resource ezbrd_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezbrd_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezbrd_flash_data, }, .num_resources = 1, .resource = &ezbrd_flash_resource, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "bootloader(nand)", .offset = 0, .size = 0x40000, }, { .name = "linux kernel(nand)", .offset = MTDPART_OFS_APPEND, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "sst25wf040", }; /* SPI flash chip (sst25wf040) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PG0, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ && defined(CONFIG_SND_SOC_WM8731_SPI) { .modalias = "wm8731", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PG0, .end = GPIO_PG0, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 1, .gpio_bl = GPIO_PG12, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezbrd_flash_device, #endif }; static int __init ezbrd_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezbrd_init); static struct platform_device *ezbrd_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezbrd_early_devices, ARRAY_SIZE(ezbrd_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
raden/kencana-kernel
arch/blackfin/mach-bf527/boards/ad7160eval.c
4419
20371
/* * Copyright 2004-20010 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <linux/leds.h> #include <linux/input.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF527-AD7160EVAL"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PG13, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_FB_BFIN_RA158Z) || defined(CONFIG_FB_BFIN_RA158Z_MODULE) static struct resource bf52x_ra158z_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf52x_ra158z_device = { .name = "bfin-ra158z", .id = -1, .num_resources = ARRAY_SIZE(bf52x_ra158z_resources), .resource = bf52x_ra158z_resources, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ad7160eval_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ad7160eval_flash_data = { .width = 2, .parts = ad7160eval_partitions, .nr_parts = ARRAY_SIZE(ad7160eval_partitions), }; static struct resource ad7160eval_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ad7160eval_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ad7160eval_flash_data, }, .num_resources = 1, .resource = &ad7160eval_flash_resource, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "linux kernel(nand)", .offset = 0, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 30000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = GPIO_PH3 + MAX_CTRL_CS, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PF9, .end = GPIO_PF9, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_TOUCHSCREEN_AD7160) || defined(CONFIG_TOUCHSCREEN_AD7160_MODULE) #include <linux/input/ad7160.h> static const struct ad7160_platform_data bfin_ad7160_ts_info = { .sensor_x_res = 854, .sensor_y_res = 480, .pressure = 100, .filter_coef = 3, .coord_pref = AD7160_ORIG_TOP_LEFT, .first_touch_window = 5, .move_window = 3, .event_cabs = AD7160_EMIT_ABS_MT_TRACKING_ID | AD7160_EMIT_ABS_MT_PRESSURE | AD7160_TRACKING_ID_ASCENDING, .finger_act_ctrl = 0x64, .haptic_effect1_ctrl = AD7160_HAPTIC_SLOT_A(60) | AD7160_HAPTIC_SLOT_A_LVL_HIGH | AD7160_HAPTIC_SLOT_B(60) | AD7160_HAPTIC_SLOT_B_LVL_LOW, .haptic_effect2_ctrl = AD7160_HAPTIC_SLOT_A(20) | AD7160_HAPTIC_SLOT_A_LVL_HIGH | AD7160_HAPTIC_SLOT_B(80) | AD7160_HAPTIC_SLOT_B_LVL_LOW | AD7160_HAPTIC_SLOT_C(120) | AD7160_HAPTIC_SLOT_C_LVL_HIGH | AD7160_HAPTIC_SLOT_D(30) | AD7160_HAPTIC_SLOT_D_LVL_LOW, }; #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_TOUCHSCREEN_AD7160) || defined(CONFIG_TOUCHSCREEN_AD7160_MODULE) { I2C_BOARD_INFO("ad7160", 0x33), .irq = IRQ_PH1, .platform_data = (void *)&bfin_ad7160_ts_info, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) #include <asm/bfin_rotary.h> static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ /*.rotary_down_key = KEY_DOWN,*/ .rotary_rel_code = REL_WHEEL, .rotary_button_key = KEY_ENTER, .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, }; static struct resource bfin_rotary_resources[] = { { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_rotary_device = { .name = "bfin-rotary", .id = -1, .num_resources = ARRAY_SIZE(bfin_rotary_resources), .resource = bfin_rotary_resources, .dev = { .platform_data = &bfin_rotary_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_FB_BFIN_RA158Z) || defined(CONFIG_FB_BFIN_RA158Z_MODULE) &bf52x_ra158z_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) &bfin_rotary_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ad7160eval_flash_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm, #endif }; static int __init ad7160eval_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ad7160eval_init); static struct platform_device *ad7160eval_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ad7160eval_early_devices, ARRAY_SIZE(ad7160eval_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
DooMLoRD/android_kernel_sony_msm8960t_aosp
arch/powerpc/kvm/book3s_pr_papr.c
4675
4066
/* * Copyright (C) 2011. Freescale Inc. All rights reserved. * * Authors: * Alexander Graf <agraf@suse.de> * Paul Mackerras <paulus@samba.org> * * Description: * * Hypercall handling for running PAPR guests in PR KVM on Book 3S * processors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <asm/uaccess.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); unsigned long pteg_addr; pte_index <<= 4; pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70; pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; pteg_addr |= pte_index; return pteg_addr; } static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) { long flags = kvmppc_get_gpr(vcpu, 4); long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long pteg[2 * 8]; unsigned long pteg_addr, i, *hpte; pte_index &= ~7UL; pteg_addr = get_pteg_addr(vcpu, pte_index); copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); hpte = pteg; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; for (i = 0; ; ++i) { if (i == 8) return H_PTEG_FULL; if ((*hpte & HPTE_V_VALID) == 0) break; hpte += 2; } } else { i = kvmppc_get_gpr(vcpu, 5) & 7UL; hpte += i * 2; } hpte[0] = kvmppc_get_gpr(vcpu, 6); hpte[1] = kvmppc_get_gpr(vcpu, 7); copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); kvmppc_set_gpr(vcpu, 4, pte_index | i); return EMULATE_DONE; } static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) { unsigned long flags= kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long v = 0, pteg, rb; unsigned long pte[2]; pteg = get_pteg_addr(vcpu, pte_index); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); return EMULATE_DONE; } copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); kvmppc_set_gpr(vcpu, 4, pte[0]); kvmppc_set_gpr(vcpu, 5, pte[1]); return EMULATE_DONE; } static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) { unsigned long flags = kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long rb, pteg, r, v; unsigned long pte[2]; pteg = get_pteg_addr(vcpu, pte_index); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); return EMULATE_DONE; } v = pte[0]; r = pte[1]; r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO); r |= (flags << 55) & HPTE_R_PP0; r |= (flags << 48) & HPTE_R_KEY_HI; r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); pte[1] = r; rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); copy_to_user((void __user *)pteg, pte, sizeof(pte)); kvmppc_set_gpr(vcpu, 3, H_SUCCESS); return EMULATE_DONE; } int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) { switch (cmd) { case H_ENTER: return kvmppc_h_pr_enter(vcpu); case H_REMOVE: return kvmppc_h_pr_remove(vcpu); case H_PROTECT: return kvmppc_h_pr_protect(vcpu); case H_BULK_REMOVE: /* We just flush all PTEs, so user space can handle the HPT modifications */ kvmppc_mmu_pte_flush(vcpu, 0, 0); break; case H_CEDE: kvm_vcpu_block(vcpu); vcpu->stat.halt_wakeup++; return EMULATE_DONE; } return EMULATE_FAIL; }
gpl-2.0
lostemp/lsk-3.4-android-12.09
sound/pci/hda/hda_generic.c
5187
28446
/* * Universal Interface for Intel High Definition Audio Codec * * Generic widget tree parser * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* widget node for parsing */ struct hda_gnode { hda_nid_t nid; /* NID of this widget */ unsigned short nconns; /* number of input connections */ hda_nid_t *conn_list; hda_nid_t slist[2]; /* temporay list */ unsigned int wid_caps; /* widget capabilities */ unsigned char type; /* widget type */ unsigned char pin_ctl; /* pin controls */ unsigned char checked; /* the flag indicates that the node is already parsed */ unsigned int pin_caps; /* pin widget capabilities */ unsigned int def_cfg; /* default configuration */ unsigned int amp_out_caps; /* AMP out capabilities */ unsigned int amp_in_caps; /* AMP in capabilities */ struct list_head list; }; /* patch-specific record */ #define MAX_PCM_VOLS 2 struct pcm_vol { struct hda_gnode *node; /* Node for PCM volume */ unsigned int index; /* connection of PCM volume */ }; struct hda_gspec { struct hda_gnode *dac_node[2]; /* DAC node */ struct hda_gnode *out_pin_node[2]; /* Output pin (Line-Out) node */ struct pcm_vol pcm_vol[MAX_PCM_VOLS]; /* PCM volumes */ unsigned int pcm_vol_nodes; /* number of PCM volumes */ struct hda_gnode *adc_node; /* ADC node */ struct hda_gnode *cap_vol_node; /* Node for capture volume */ unsigned int cur_cap_src; /* current capture source */ struct hda_input_mux input_mux; unsigned int def_amp_in_caps; unsigned int def_amp_out_caps; struct hda_pcm pcm_rec; /* PCM information */ struct list_head nid_list; /* list of widgets */ #ifdef CONFIG_SND_HDA_POWER_SAVE #define MAX_LOOPBACK_AMPS 7 struct hda_loopback_check loopback; int num_loopbacks; struct hda_amp_list loopback_list[MAX_LOOPBACK_AMPS + 1]; #endif }; /* * retrieve the default device type from the default config value */ #define defcfg_type(node) (((node)->def_cfg & AC_DEFCFG_DEVICE) >> \ AC_DEFCFG_DEVICE_SHIFT) #define defcfg_location(node) (((node)->def_cfg & AC_DEFCFG_LOCATION) >> \ AC_DEFCFG_LOCATION_SHIFT) #define defcfg_port_conn(node) (((node)->def_cfg & AC_DEFCFG_PORT_CONN) >> \ AC_DEFCFG_PORT_CONN_SHIFT) /* * destructor */ static void snd_hda_generic_free(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node, *n; if (! spec) return; /* free all widgets */ list_for_each_entry_safe(node, n, &spec->nid_list, list) { if (node->conn_list != node->slist) kfree(node->conn_list); kfree(node); } kfree(spec); } /* * add a new widget node and read its attributes */ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid_t nid) { struct hda_gnode *node; int nconns; hda_nid_t conn_list[HDA_MAX_CONNECTIONS]; node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return -ENOMEM; node->nid = nid; node->wid_caps = get_wcaps(codec, nid); node->type = get_wcaps_type(node->wid_caps); if (node->wid_caps & AC_WCAP_CONN_LIST) { nconns = snd_hda_get_connections(codec, nid, conn_list, HDA_MAX_CONNECTIONS); if (nconns < 0) { kfree(node); return nconns; } } else { nconns = 0; } if (nconns <= ARRAY_SIZE(node->slist)) node->conn_list = node->slist; else { node->conn_list = kmalloc(sizeof(hda_nid_t) * nconns, GFP_KERNEL); if (! node->conn_list) { snd_printk(KERN_ERR "hda-generic: cannot malloc\n"); kfree(node); return -ENOMEM; } } memcpy(node->conn_list, conn_list, nconns * sizeof(hda_nid_t)); node->nconns = nconns; if (node->type == AC_WID_PIN) { node->pin_caps = snd_hda_query_pin_caps(codec, node->nid); node->pin_ctl = snd_hda_codec_read(codec, node->nid, 0, AC_VERB_GET_PIN_WIDGET_CONTROL, 0); node->def_cfg = snd_hda_codec_get_pincfg(codec, node->nid); } if (node->wid_caps & AC_WCAP_OUT_AMP) { if (node->wid_caps & AC_WCAP_AMP_OVRD) node->amp_out_caps = snd_hda_param_read(codec, node->nid, AC_PAR_AMP_OUT_CAP); if (! node->amp_out_caps) node->amp_out_caps = spec->def_amp_out_caps; } if (node->wid_caps & AC_WCAP_IN_AMP) { if (node->wid_caps & AC_WCAP_AMP_OVRD) node->amp_in_caps = snd_hda_param_read(codec, node->nid, AC_PAR_AMP_IN_CAP); if (! node->amp_in_caps) node->amp_in_caps = spec->def_amp_in_caps; } list_add_tail(&node->list, &spec->nid_list); return 0; } /* * build the AFG subtree */ static int build_afg_tree(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; int i, nodes, err; hda_nid_t nid; if (snd_BUG_ON(!spec)) return -EINVAL; spec->def_amp_out_caps = snd_hda_param_read(codec, codec->afg, AC_PAR_AMP_OUT_CAP); spec->def_amp_in_caps = snd_hda_param_read(codec, codec->afg, AC_PAR_AMP_IN_CAP); nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid); if (! nid || nodes < 0) { printk(KERN_ERR "Invalid AFG subtree\n"); return -EINVAL; } /* parse all nodes belonging to the AFG */ for (i = 0; i < nodes; i++, nid++) { if ((err = add_new_node(codec, spec, nid)) < 0) return err; } return 0; } /* * look for the node record for the given NID */ /* FIXME: should avoid the braindead linear search */ static struct hda_gnode *hda_get_node(struct hda_gspec *spec, hda_nid_t nid) { struct hda_gnode *node; list_for_each_entry(node, &spec->nid_list, list) { if (node->nid == nid) return node; } return NULL; } /* * unmute (and set max vol) the output amplifier */ static int unmute_output(struct hda_codec *codec, struct hda_gnode *node) { unsigned int val, ofs; snd_printdd("UNMUTE OUT: NID=0x%x\n", node->nid); val = (node->amp_out_caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; ofs = (node->amp_out_caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; if (val >= ofs) val -= ofs; snd_hda_codec_amp_stereo(codec, node->nid, HDA_OUTPUT, 0, 0xff, val); return 0; } /* * unmute (and set max vol) the input amplifier */ static int unmute_input(struct hda_codec *codec, struct hda_gnode *node, unsigned int index) { unsigned int val, ofs; snd_printdd("UNMUTE IN: NID=0x%x IDX=0x%x\n", node->nid, index); val = (node->amp_in_caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT; ofs = (node->amp_in_caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; if (val >= ofs) val -= ofs; snd_hda_codec_amp_stereo(codec, node->nid, HDA_INPUT, index, 0xff, val); return 0; } /* * select the input connection of the given node. */ static int select_input_connection(struct hda_codec *codec, struct hda_gnode *node, unsigned int index) { snd_printdd("CONNECT: NID=0x%x IDX=0x%x\n", node->nid, index); return snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_CONNECT_SEL, index); } /* * clear checked flag of each node in the node list */ static void clear_check_flags(struct hda_gspec *spec) { struct hda_gnode *node; list_for_each_entry(node, &spec->nid_list, list) { node->checked = 0; } } /* * parse the output path recursively until reach to an audio output widget * * returns 0 if not found, 1 if found, or a negative error code. */ static int parse_output_path(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, int dac_idx) { int i, err; struct hda_gnode *child; if (node->checked) return 0; node->checked = 1; if (node->type == AC_WID_AUD_OUT) { if (node->wid_caps & AC_WCAP_DIGITAL) { snd_printdd("Skip Digital OUT node %x\n", node->nid); return 0; } snd_printdd("AUD_OUT found %x\n", node->nid); if (spec->dac_node[dac_idx]) { /* already DAC node is assigned, just unmute & connect */ return node == spec->dac_node[dac_idx]; } spec->dac_node[dac_idx] = node; if ((node->wid_caps & AC_WCAP_OUT_AMP) && spec->pcm_vol_nodes < MAX_PCM_VOLS) { spec->pcm_vol[spec->pcm_vol_nodes].node = node; spec->pcm_vol[spec->pcm_vol_nodes].index = 0; spec->pcm_vol_nodes++; } return 1; /* found */ } for (i = 0; i < node->nconns; i++) { child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_output_path(codec, spec, child, dac_idx); if (err < 0) return err; else if (err > 0) { /* found one, * select the path, unmute both input and output */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); if (spec->dac_node[dac_idx] && spec->pcm_vol_nodes < MAX_PCM_VOLS && !(spec->dac_node[dac_idx]->wid_caps & AC_WCAP_OUT_AMP)) { if ((node->wid_caps & AC_WCAP_IN_AMP) || (node->wid_caps & AC_WCAP_OUT_AMP)) { int n = spec->pcm_vol_nodes; spec->pcm_vol[n].node = node; spec->pcm_vol[n].index = i; spec->pcm_vol_nodes++; } } return 1; } } return 0; } /* * Look for the output PIN widget with the given jack type * and parse the output path to that PIN. * * Returns the PIN node when the path to DAC is established. */ static struct hda_gnode *parse_output_jack(struct hda_codec *codec, struct hda_gspec *spec, int jack_type) { struct hda_gnode *node; int err; list_for_each_entry(node, &spec->nid_list, list) { if (node->type != AC_WID_PIN) continue; /* output capable? */ if (! (node->pin_caps & AC_PINCAP_OUT)) continue; if (defcfg_port_conn(node) == AC_JACK_PORT_NONE) continue; /* unconnected */ if (jack_type >= 0) { if (jack_type != defcfg_type(node)) continue; if (node->wid_caps & AC_WCAP_DIGITAL) continue; /* skip SPDIF */ } else { /* output as default? */ if (! (node->pin_ctl & AC_PINCTL_OUT_EN)) continue; } clear_check_flags(spec); err = parse_output_path(codec, spec, node, 0); if (err < 0) return NULL; if (! err && spec->out_pin_node[0]) { err = parse_output_path(codec, spec, node, 1); if (err < 0) return NULL; } if (err > 0) { /* unmute the PIN output */ unmute_output(codec, node); /* set PIN-Out enable */ snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN | ((node->pin_caps & AC_PINCAP_HP_DRV) ? AC_PINCTL_HP_EN : 0)); return node; } } return NULL; } /* * parse outputs */ static int parse_output(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; /* * Look for the output PIN widget */ /* first, look for the line-out pin */ node = parse_output_jack(codec, spec, AC_JACK_LINE_OUT); if (node) /* found, remember the PIN node */ spec->out_pin_node[0] = node; else { /* if no line-out is found, try speaker out */ node = parse_output_jack(codec, spec, AC_JACK_SPEAKER); if (node) spec->out_pin_node[0] = node; } /* look for the HP-out pin */ node = parse_output_jack(codec, spec, AC_JACK_HP_OUT); if (node) { if (! spec->out_pin_node[0]) spec->out_pin_node[0] = node; else spec->out_pin_node[1] = node; } if (! spec->out_pin_node[0]) { /* no line-out or HP pins found, * then choose for the first output pin */ spec->out_pin_node[0] = parse_output_jack(codec, spec, -1); if (! spec->out_pin_node[0]) snd_printd("hda_generic: no proper output path found\n"); } return 0; } /* * input MUX */ /* control callbacks */ static int capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; return snd_hda_input_mux_info(&spec->input_mux, uinfo); } static int capture_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; ucontrol->value.enumerated.item[0] = spec->cur_cap_src; return 0; } static int capture_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_gspec *spec = codec->spec; return snd_hda_input_mux_put(codec, &spec->input_mux, ucontrol, spec->adc_node->nid, &spec->cur_cap_src); } /* * return the string name of the given input PIN widget */ static const char *get_input_type(struct hda_gnode *node, unsigned int *pinctl) { unsigned int location = defcfg_location(node); switch (defcfg_type(node)) { case AC_JACK_LINE_IN: if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Line"; return "Line"; case AC_JACK_CD: #if 0 if (pinctl) *pinctl |= AC_PINCTL_VREF_GRD; #endif return "CD"; case AC_JACK_AUX: if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Aux"; return "Aux"; case AC_JACK_MIC_IN: if (pinctl && (node->pin_caps & (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT))) *pinctl |= AC_PINCTL_VREF_80; if ((location & 0x0f) == AC_JACK_LOC_FRONT) return "Front Mic"; return "Mic"; case AC_JACK_SPDIF_IN: return "SPDIF"; case AC_JACK_DIG_OTHER_IN: return "Digital"; } return NULL; } /* * parse the nodes recursively until reach to the input PIN * * returns 0 if not found, 1 if found, or a negative error code. */ static int parse_adc_sub_nodes(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, int idx) { int i, err; unsigned int pinctl; const char *type; if (node->checked) return 0; node->checked = 1; if (node->type != AC_WID_PIN) { for (i = 0; i < node->nconns; i++) { struct hda_gnode *child; child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_adc_sub_nodes(codec, spec, child, idx); if (err < 0) return err; if (err > 0) { /* found one, * select the path, unmute both input and output */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); return err; } } return 0; } /* input capable? */ if (! (node->pin_caps & AC_PINCAP_IN)) return 0; if (defcfg_port_conn(node) == AC_JACK_PORT_NONE) return 0; /* unconnected */ if (node->wid_caps & AC_WCAP_DIGITAL) return 0; /* skip SPDIF */ if (spec->input_mux.num_items >= HDA_MAX_NUM_INPUTS) { snd_printk(KERN_ERR "hda_generic: Too many items for capture\n"); return -EINVAL; } pinctl = AC_PINCTL_IN_EN; /* create a proper capture source label */ type = get_input_type(node, &pinctl); if (! type) { /* input as default? */ if (! (node->pin_ctl & AC_PINCTL_IN_EN)) return 0; type = "Input"; } snd_hda_add_imux_item(&spec->input_mux, type, idx, NULL); /* unmute the PIN external input */ unmute_input(codec, node, 0); /* index = 0? */ /* set PIN-In enable */ snd_hda_codec_write_cache(codec, node->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); return 1; /* found */ } /* * parse input */ static int parse_input_path(struct hda_codec *codec, struct hda_gnode *adc_node) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int i, err; snd_printdd("AUD_IN = %x\n", adc_node->nid); clear_check_flags(spec); // awk added - fixed no recording due to muted widget unmute_input(codec, adc_node, 0); /* * check each connection of the ADC * if it reaches to a proper input PIN, add the path as the * input path. */ /* first, check the direct connections to PIN widgets */ for (i = 0; i < adc_node->nconns; i++) { node = hda_get_node(spec, adc_node->conn_list[i]); if (node && node->type == AC_WID_PIN) { err = parse_adc_sub_nodes(codec, spec, node, i); if (err < 0) return err; } } /* ... then check the rests, more complicated connections */ for (i = 0; i < adc_node->nconns; i++) { node = hda_get_node(spec, adc_node->conn_list[i]); if (node && node->type != AC_WID_PIN) { err = parse_adc_sub_nodes(codec, spec, node, i); if (err < 0) return err; } } if (! spec->input_mux.num_items) return 0; /* no input path found... */ snd_printdd("[Capture Source] NID=0x%x, #SRC=%d\n", adc_node->nid, spec->input_mux.num_items); for (i = 0; i < spec->input_mux.num_items; i++) snd_printdd(" [%s] IDX=0x%x\n", spec->input_mux.items[i].label, spec->input_mux.items[i].index); spec->adc_node = adc_node; return 1; } /* * parse input */ static int parse_input(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int err; /* * At first we look for an audio input widget. * If it reaches to certain input PINs, we take it as the * input path. */ list_for_each_entry(node, &spec->nid_list, list) { if (node->wid_caps & AC_WCAP_DIGITAL) continue; /* skip SPDIF */ if (node->type == AC_WID_AUD_IN) { err = parse_input_path(codec, node); if (err < 0) return err; else if (err > 0) return 0; } } snd_printd("hda_generic: no proper input path found\n"); return 0; } #ifdef CONFIG_SND_HDA_POWER_SAVE static void add_input_loopback(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { struct hda_gspec *spec = codec->spec; struct hda_amp_list *p; if (spec->num_loopbacks >= MAX_LOOPBACK_AMPS) { snd_printk(KERN_ERR "hda_generic: Too many loopback ctls\n"); return; } p = &spec->loopback_list[spec->num_loopbacks++]; p->nid = nid; p->dir = dir; p->idx = idx; spec->loopback.amplist = spec->loopback_list; } #else #define add_input_loopback(codec,nid,dir,idx) #endif /* * create mixer controls if possible */ static int create_mixer(struct hda_codec *codec, struct hda_gnode *node, unsigned int index, const char *type, const char *dir_sfx, int is_loopback) { char name[32]; int err; int created = 0; struct snd_kcontrol_new knew; if (type) sprintf(name, "%s %s Switch", type, dir_sfx); else sprintf(name, "%s Switch", dir_sfx); if ((node->wid_caps & AC_WCAP_IN_AMP) && (node->amp_in_caps & AC_AMPCAP_MUTE)) { knew = (struct snd_kcontrol_new)HDA_CODEC_MUTE(name, node->nid, index, HDA_INPUT); if (is_loopback) add_input_loopback(codec, node->nid, HDA_INPUT, index); snd_printdd("[%s] NID=0x%x, DIR=IN, IDX=0x%x\n", name, node->nid, index); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } else if ((node->wid_caps & AC_WCAP_OUT_AMP) && (node->amp_out_caps & AC_AMPCAP_MUTE)) { knew = (struct snd_kcontrol_new)HDA_CODEC_MUTE(name, node->nid, 0, HDA_OUTPUT); if (is_loopback) add_input_loopback(codec, node->nid, HDA_OUTPUT, 0); snd_printdd("[%s] NID=0x%x, DIR=OUT\n", name, node->nid); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } if (type) sprintf(name, "%s %s Volume", type, dir_sfx); else sprintf(name, "%s Volume", dir_sfx); if ((node->wid_caps & AC_WCAP_IN_AMP) && (node->amp_in_caps & AC_AMPCAP_NUM_STEPS)) { knew = (struct snd_kcontrol_new)HDA_CODEC_VOLUME(name, node->nid, index, HDA_INPUT); snd_printdd("[%s] NID=0x%x, DIR=IN, IDX=0x%x\n", name, node->nid, index); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } else if ((node->wid_caps & AC_WCAP_OUT_AMP) && (node->amp_out_caps & AC_AMPCAP_NUM_STEPS)) { knew = (struct snd_kcontrol_new)HDA_CODEC_VOLUME(name, node->nid, 0, HDA_OUTPUT); snd_printdd("[%s] NID=0x%x, DIR=OUT\n", name, node->nid); err = snd_hda_ctl_add(codec, node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; created = 1; } return created; } /* * check whether the controls with the given name and direction suffix already exist */ static int check_existing_control(struct hda_codec *codec, const char *type, const char *dir) { struct snd_ctl_elem_id id; memset(&id, 0, sizeof(id)); sprintf(id.name, "%s %s Volume", type, dir); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; if (snd_ctl_find_id(codec->bus->card, &id)) return 1; sprintf(id.name, "%s %s Switch", type, dir); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; if (snd_ctl_find_id(codec->bus->card, &id)) return 1; return 0; } /* * build output mixer controls */ static int create_output_mixers(struct hda_codec *codec, const char * const *names) { struct hda_gspec *spec = codec->spec; int i, err; for (i = 0; i < spec->pcm_vol_nodes; i++) { err = create_mixer(codec, spec->pcm_vol[i].node, spec->pcm_vol[i].index, names[i], "Playback", 0); if (err < 0) return err; } return 0; } static int build_output_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; static const char * const types_speaker[] = { "Speaker", "Headphone" }; static const char * const types_line[] = { "Front", "Headphone" }; switch (spec->pcm_vol_nodes) { case 1: return create_mixer(codec, spec->pcm_vol[0].node, spec->pcm_vol[0].index, "Master", "Playback", 0); case 2: if (defcfg_type(spec->out_pin_node[0]) == AC_JACK_SPEAKER) return create_output_mixers(codec, types_speaker); else return create_output_mixers(codec, types_line); } return 0; } /* create capture volume/switch */ static int build_input_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *adc_node = spec->adc_node; int i, err; static struct snd_kcontrol_new cap_sel = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = capture_source_info, .get = capture_source_get, .put = capture_source_put, }; if (! adc_node || ! spec->input_mux.num_items) return 0; /* not found */ spec->cur_cap_src = 0; select_input_connection(codec, adc_node, spec->input_mux.items[0].index); /* create capture volume and switch controls if the ADC has an amp */ /* do we have only a single item? */ if (spec->input_mux.num_items == 1) { err = create_mixer(codec, adc_node, spec->input_mux.items[0].index, NULL, "Capture", 0); if (err < 0) return err; return 0; } /* create input MUX if multiple sources are available */ err = snd_hda_ctl_add(codec, spec->adc_node->nid, snd_ctl_new1(&cap_sel, codec)); if (err < 0) return err; /* no volume control? */ if (! (adc_node->wid_caps & AC_WCAP_IN_AMP) || ! (adc_node->amp_in_caps & AC_AMPCAP_NUM_STEPS)) return 0; for (i = 0; i < spec->input_mux.num_items; i++) { struct snd_kcontrol_new knew; char name[32]; sprintf(name, "%s Capture Volume", spec->input_mux.items[i].label); knew = (struct snd_kcontrol_new) HDA_CODEC_VOLUME(name, adc_node->nid, spec->input_mux.items[i].index, HDA_INPUT); err = snd_hda_ctl_add(codec, adc_node->nid, snd_ctl_new1(&knew, codec)); if (err < 0) return err; } return 0; } /* * parse the nodes recursively until reach to the output PIN. * * returns 0 - if not found, * 1 - if found, but no mixer is created * 2 - if found and mixer was already created, (just skip) * a negative error code */ static int parse_loopback_path(struct hda_codec *codec, struct hda_gspec *spec, struct hda_gnode *node, struct hda_gnode *dest_node, const char *type) { int i, err; if (node->checked) return 0; node->checked = 1; if (node == dest_node) { /* loopback connection found */ return 1; } for (i = 0; i < node->nconns; i++) { struct hda_gnode *child = hda_get_node(spec, node->conn_list[i]); if (! child) continue; err = parse_loopback_path(codec, spec, child, dest_node, type); if (err < 0) return err; else if (err >= 1) { if (err == 1) { err = create_mixer(codec, node, i, type, "Playback", 1); if (err < 0) return err; if (err > 0) return 2; /* ok, created */ /* not created, maybe in the lower path */ err = 1; } /* connect and unmute */ if (node->nconns > 1) select_input_connection(codec, node, i); unmute_input(codec, node, i); unmute_output(codec, node); return err; } } return 0; } /* * parse the tree and build the loopback controls */ static int build_loopback_controls(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_gnode *node; int err; const char *type; if (! spec->out_pin_node[0]) return 0; list_for_each_entry(node, &spec->nid_list, list) { if (node->type != AC_WID_PIN) continue; /* input capable? */ if (! (node->pin_caps & AC_PINCAP_IN)) return 0; type = get_input_type(node, NULL); if (type) { if (check_existing_control(codec, type, "Playback")) continue; clear_check_flags(spec); err = parse_loopback_path(codec, spec, spec->out_pin_node[0], node, type); if (err < 0) return err; if (! err) continue; } } return 0; } /* * build mixer controls */ static int build_generic_controls(struct hda_codec *codec) { int err; if ((err = build_input_controls(codec)) < 0 || (err = build_output_controls(codec)) < 0 || (err = build_loopback_controls(codec)) < 0) return err; return 0; } /* * PCM */ static struct hda_pcm_stream generic_pcm_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, }; static int generic_pcm2_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct hda_gspec *spec = codec->spec; snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); snd_hda_codec_setup_stream(codec, spec->dac_node[1]->nid, stream_tag, 0, format); return 0; } static int generic_pcm2_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct hda_gspec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, hinfo->nid); snd_hda_codec_cleanup_stream(codec, spec->dac_node[1]->nid); return 0; } static int build_generic_pcms(struct hda_codec *codec) { struct hda_gspec *spec = codec->spec; struct hda_pcm *info = &spec->pcm_rec; if (! spec->dac_node[0] && ! spec->adc_node) { snd_printd("hda_generic: no PCM found\n"); return 0; } codec->num_pcms = 1; codec->pcm_info = info; info->name = "HDA Generic"; if (spec->dac_node[0]) { info->stream[0] = generic_pcm_playback; info->stream[0].nid = spec->dac_node[0]->nid; if (spec->dac_node[1]) { info->stream[0].ops.prepare = generic_pcm2_prepare; info->stream[0].ops.cleanup = generic_pcm2_cleanup; } } if (spec->adc_node) { info->stream[1] = generic_pcm_playback; info->stream[1].nid = spec->adc_node->nid; } return 0; } #ifdef CONFIG_SND_HDA_POWER_SAVE static int generic_check_power_status(struct hda_codec *codec, hda_nid_t nid) { struct hda_gspec *spec = codec->spec; return snd_hda_check_amp_list_power(codec, &spec->loopback, nid); } #endif /* */ static struct hda_codec_ops generic_patch_ops = { .build_controls = build_generic_controls, .build_pcms = build_generic_pcms, .free = snd_hda_generic_free, #ifdef CONFIG_SND_HDA_POWER_SAVE .check_power_status = generic_check_power_status, #endif }; /* * the generic parser */ int snd_hda_parse_generic_codec(struct hda_codec *codec) { struct hda_gspec *spec; int err; if(!codec->afg) return 0; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) { printk(KERN_ERR "hda_generic: can't allocate spec\n"); return -ENOMEM; } codec->spec = spec; INIT_LIST_HEAD(&spec->nid_list); if ((err = build_afg_tree(codec)) < 0) goto error; if ((err = parse_input(codec)) < 0 || (err = parse_output(codec)) < 0) goto error; codec->patch_ops = generic_patch_ops; return 0; error: snd_hda_generic_free(codec); return err; } EXPORT_SYMBOL(snd_hda_parse_generic_codec);
gpl-2.0
xdajog/kernel_fx3q_aosp
drivers/infiniband/hw/ipath/ipath_mad.c
8515
42537
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include <rdma/ib_pma.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) static int reply(struct ib_smp *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static int recv_subn_get_nodedescription(struct ib_smp *smp, struct ib_device *ibdev) { if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } struct nodeinfo { u8 base_version; u8 class_version; u8 node_type; u8 num_ports; __be64 sys_guid; __be64 node_guid; __be64 port_guid; __be16 partition_cap; __be16 device_id; __be32 revision; u8 local_port_num; u8 vendor_id[3]; } __attribute__ ((packed)); static int recv_subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct nodeinfo *nip = (struct nodeinfo *)&smp->data; struct ipath_devdata *dd = to_idev(ibdev)->dd; u32 vendor, majrev, minrev; /* GUID 0 is illegal */ if (smp->attr_mod || (dd->ipath_guid == 0)) smp->status |= IB_SMP_INVALID_FIELD; nip->base_version = 1; nip->class_version = 1; nip->node_type = 1; /* channel adapter */ /* * XXX The num_ports value will need a layer function to get * the value if we ever have more than one IB port on a chip. * We will also need to get the GUID for the port. */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = to_idev(ibdev)->sys_image_guid; nip->node_guid = dd->ipath_guid; nip->port_guid = dd->ipath_guid; nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->ipath_deviceid); majrev = dd->ipath_majrev; minrev = dd->ipath_minrev; nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; vendor = dd->ipath_vendorid; nip->vendor_id[0] = IPATH_SRC_OUI_1; nip->vendor_id[1] = IPATH_SRC_OUI_2; nip->vendor_id[2] = IPATH_SRC_OUI_3; return reply(smp); } static int recv_subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) { u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; /* 32 blocks of 8 64-bit GUIDs per block */ memset(smp->data, 0, sizeof(smp->data)); /* * We only support one GUID for now. If this changes, the * portinfo.guid_cap field needs to be updated too. */ if (startgx == 0) { __be64 g = to_idev(ibdev)->dd->ipath_guid; if (g == 0) /* GUID 0 is illegal */ smp->status |= IB_SMP_INVALID_FIELD; else /* The first is a copy of the read-only HW GUID. */ *p = g; } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static void set_link_width_enabled(struct ipath_devdata *dd, u32 w) { (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w); } static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s) { (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s); } static int get_overrunthreshold(struct ipath_devdata *dd) { return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; } /** * set_overrunthreshold - set the overrun threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n) { unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0; } static int get_phyerrthreshold(struct ipath_devdata *dd) { return (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; } /** * set_phyerrthreshold - set the physical error threshold * @dd: the infinipath device * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) { unsigned v; v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; if (v != n) { dd->ipath_ibcctrl &= ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); dd->ipath_ibcctrl |= (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); } return 0; } /** * get_linkdowndefaultstate - get the default linkdown state * @dd: the infinipath device * * Returns zero if the default is POLL, 1 if the default is SLEEP. */ static int get_linkdowndefaultstate(struct ipath_devdata *dd) { return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); } static int recv_subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ipath_ibdev *dev; struct ipath_devdata *dd; struct ib_port_info *pip = (struct ib_port_info *)smp->data; u16 lid; u8 ibcstat; u8 mtu; int ret; if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) { smp->status |= IB_SMP_INVALID_FIELD; ret = reply(smp); goto bail; } dev = to_idev(ibdev); dd = dev->dd; /* Clear all fields. Only set the non-zero fields. */ memset(smp->data, 0, sizeof(smp->data)); /* Only return the mkey if the protection field allows it. */ if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey || dev->mkeyprot == 0) pip->mkey = dev->mkey; pip->gid_prefix = dev->gid_prefix; lid = dd->ipath_lid; pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; pip->sm_lid = cpu_to_be16(dev->sm_lid); pip->cap_mask = cpu_to_be32(dev->port_cap_flags); /* pip->diag_code; */ pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); pip->local_port_num = port; pip->link_width_enabled = dd->ipath_link_width_enabled; pip->link_width_supported = dd->ipath_link_width_supported; pip->link_width_active = dd->ipath_link_width_active; pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4; ibcstat = dd->ipath_lastibcstat; /* map LinkState to IB portinfo values. */ pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1; pip->portphysstate_linkdown = (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) | (get_linkdowndefaultstate(dd) ? 1 : 2); pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc; pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) | dd->ipath_link_speed_enabled; switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: /* oops, something is wrong */ mtu = IB_MTU_2048; break; } pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl; pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */ pip->vl_high_limit = dev->vl_high_limit; /* pip->vl_arb_high_cap; // only one VL */ /* pip->vl_arb_low_cap; // only one VL */ /* InitTypeReply = 0 */ /* our mtu cap depends on whether 4K MTU enabled or not */ pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; /* HCAs ignore VLStallCount and HOQLife */ /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ pip->mkey_violations = cpu_to_be16(dev->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = cpu_to_be16((ipath_get_cr_errpkey(dd) - dev->z_pkey_violations) & 0xFFFF); pip->qkey_violations = cpu_to_be16(dev->qkey_violations); /* Only the hardware GUID is supported for now */ pip->guid_cap = 1; pip->clientrereg_resv_subnetto = dev->subnet_timeout; /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = (get_phyerrthreshold(dd) << 4) | get_overrunthreshold(dd); /* pip->max_credit_hint; */ if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { u32 v; v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY); pip->link_roundtrip_latency[0] = v >> 16; pip->link_roundtrip_latency[1] = v >> 8; pip->link_roundtrip_latency[2] = v; } ret = reply(smp); bail: return ret; } /** * get_pkeys - return the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the pkey table is placed here */ static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) { /* always a kernel port, no locking needed */ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); return 0; } static int recv_subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); u16 *p = (u16 *) smp->data; __be16 *q = (__be16 *) smp->data; /* 64 blocks of 32 16-bit P_Key entries */ memset(smp->data, 0, sizeof(smp->data)); if (startpx == 0) { struct ipath_ibdev *dev = to_idev(ibdev); unsigned i, n = ipath_get_npkeys(dev->dd); get_pkeys(dev->dd, p); for (i = 0; i < n; i++) q[i] = cpu_to_be16(p[i]); } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int recv_subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) { /* The only GUID we support is the first read-only entry. */ return recv_subn_get_guidinfo(smp, ibdev); } /** * set_linkdowndefaultstate - set the default linkdown state * @dd: the infinipath device * @sleep: the new state * * Note that this will only take effect when the link state changes. */ static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep) { if (sleep) dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; else dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, dd->ipath_ibcctrl); return 0; } /** * recv_subn_set_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device * @port: the port on the device * * Set Portinfo (see ch. 14.2.5.6). */ static int recv_subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_port_info *pip = (struct ib_port_info *)smp->data; struct ib_event event; struct ipath_ibdev *dev; struct ipath_devdata *dd; char clientrereg = 0; u16 lid, smlid; u8 lwe; u8 lse; u8 state; u16 lstate; u32 mtu; int ret, ore; if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) goto err; dev = to_idev(ibdev); dd = dev->dd; event.device = ibdev; event.element.port_num = port; dev->mkey = pip->mkey; dev->gid_prefix = pip->gid_prefix; dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); if (dd->ipath_lid != lid || dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) { /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) goto err; ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } smlid = be16_to_cpu(pip->sm_lid); if (smlid != dev->sm_lid) { /* Must be a valid unicast LID address. */ if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE) goto err; dev->sm_lid = smlid; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } /* Allow 1x or 4x to be set (see 14.2.6.6). */ lwe = pip->link_width_enabled; if (lwe) { if (lwe == 0xFF) lwe = dd->ipath_link_width_supported; else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported)) goto err; set_link_width_enabled(dd, lwe); } /* Allow 2.5 or 5.0 Gbs. */ lse = pip->linkspeedactive_enabled & 0xF; if (lse) { if (lse == 15) lse = dd->ipath_link_speed_supported; else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported)) goto err; set_link_speed_enabled(dd, lse); } /* Set link down default state. */ switch (pip->portphysstate_linkdown & 0xF) { case 0: /* NOP */ break; case 1: /* SLEEP */ if (set_linkdowndefaultstate(dd, 1)) goto err; break; case 2: /* POLL */ if (set_linkdowndefaultstate(dd, 0)) goto err; break; default: goto err; } dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6; dev->vl_high_limit = pip->vl_high_limit; switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) { case IB_MTU_256: mtu = 256; break; case IB_MTU_512: mtu = 512; break; case IB_MTU_1024: mtu = 1024; break; case IB_MTU_2048: mtu = 2048; break; case IB_MTU_4096: if (!ipath_mtu4096) goto err; mtu = 4096; break; default: /* XXX We have already partially updated our state! */ goto err; } ipath_set_mtu(dd, mtu); dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; /* We only support VL0 */ if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1) goto err; if (pip->mkey_violations == 0) dev->mkey_violations = 0; /* * Hardware counter can't be reset so snapshot and subtract * later. */ if (pip->pkey_violations == 0) dev->z_pkey_violations = ipath_get_cr_errpkey(dd); if (pip->qkey_violations == 0) dev->qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(dd, (ore >> 4) & 0xF)) goto err; if (set_overrunthreshold(dd, (ore & 0xF))) goto err; dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; if (pip->clientrereg_resv_subnetto & 0x80) { clientrereg = 1; event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } /* * Do the port state change now that the other link parameters * have been set. * Changing the port physical state only makes sense if the link * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) goto err; /* * Only state changes of DOWN, ARM, and ACTIVE are valid * and must be in the correct state to take effect (see 7.2.6). */ switch (state) { case IB_PORT_NOP: if (lstate == 0) break; /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) lstate = IPATH_IB_LINKDOWN_ONLY; else if (lstate == 1) lstate = IPATH_IB_LINKDOWN_SLEEP; else if (lstate == 2) lstate = IPATH_IB_LINKDOWN; else if (lstate == 3) lstate = IPATH_IB_LINKDOWN_DISABLE; else goto err; ipath_set_linkstate(dd, lstate); if (lstate == IPATH_IB_LINKDOWN_DISABLE) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto done; } ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | IPATH_LINKACTIVE, 1000); break; case IB_PORT_ARMED: ipath_set_linkstate(dd, IPATH_IB_LINKARM); break; case IB_PORT_ACTIVE: ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); break; default: /* XXX We have already partially updated our state! */ goto err; } ret = recv_subn_get_portinfo(smp, ibdev, port); if (clientrereg) pip->clientrereg_resv_subnetto |= 0x80; goto done; err: smp->status |= IB_SMP_INVALID_FIELD; ret = recv_subn_get_portinfo(smp, ibdev, port); done: return ret; } /** * rm_pkey - decrecment the reference count for the given PKEY * @dd: the infinipath device * @key: the PKEY index * * Return true if this was the last reference and the hardware table entry * needs to be changed. */ static int rm_pkey(struct ipath_devdata *dd, u16 key) { int i; int ret; for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (dd->ipath_pkeys[i] != key) continue; if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { dd->ipath_pkeys[i] = 0; ret = 1; goto bail; } break; } ret = 0; bail: return ret; } /** * add_pkey - add the given PKEY to the hardware table * @dd: the infinipath device * @key: the PKEY * * Return an error code if unable to add the entry, zero if no change, * or 1 if the hardware PKEY register needs to be updated. */ static int add_pkey(struct ipath_devdata *dd, u16 key) { int i; u16 lkey = key & 0x7FFF; int any = 0; int ret; if (lkey == 0x7FFF) { ret = 0; goto bail; } /* Look for an empty slot or a matching PKEY. */ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i]) { any++; continue; } /* If it matches exactly, try to increment the ref count */ if (dd->ipath_pkeys[i] == key) { if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { ret = 0; goto bail; } /* Lost the race. Look for an empty slot below. */ atomic_dec(&dd->ipath_pkeyrefs[i]); any++; } /* * It makes no sense to have both the limited and unlimited * PKEY set at the same time since the unlimited one will * disable the limited one. */ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { ret = -EEXIST; goto bail; } } if (!any) { ret = -EBUSY; goto bail; } for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i] && atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { /* for ipathstats, etc. */ ipath_stats.sps_pkeys[i] = lkey; dd->ipath_pkeys[i] = key; ret = 1; goto bail; } } ret = -EBUSY; bail: return ret; } /** * set_pkeys - set the PKEY table for port 0 * @dd: the infinipath device * @pkeys: the PKEY table */ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) { struct ipath_portdata *pd; int i; int changed = 0; /* always a kernel port, no locking needed */ pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { u16 key = pkeys[i]; u16 okey = pd->port_pkeys[i]; if (key == okey) continue; /* * The value of this PKEY table entry is changing. * Remove the old entry in the hardware's array of PKEYs. */ if (okey & 0x7FFF) changed |= rm_pkey(dd, okey); if (key & 0x7FFF) { int ret = add_pkey(dd, key); if (ret < 0) key = 0; else changed |= ret; } pd->port_pkeys[i] = key; } if (changed) { u64 pkey; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); } return 0; } static int recv_subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); __be16 *p = (__be16 *) smp->data; u16 *q = (u16 *) smp->data; struct ipath_ibdev *dev = to_idev(ibdev); unsigned i, n = ipath_get_npkeys(dev->dd); for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); if (startpx != 0 || set_pkeys(dev->dd, q) != 0) smp->status |= IB_SMP_INVALID_FIELD; return recv_subn_get_pkeytable(smp, ibdev); } static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp) { struct ib_class_port_info *p = (struct ib_class_port_info *)pmp->data; memset(pmp->data, 0, sizeof(pmp->data)); if (pmp->mad_hdr.attr_mod != 0) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* Indicate AllPortSelect is valid (only one port anyway) */ p->capability_mask = cpu_to_be16(1 << 8); p->base_version = 1; p->class_version = 1; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 * sec. */ p->resp_time_value = 18; return reply((struct ib_smp *) pmp); } /* * The PortSamplesControl.CounterMasks field is an array of 3 bit fields * which specify the N'th counter's capabilities. See ch. 16.1.3.2. * We support 5 counters which only count the mandatory quantities. */ #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \ COUNTER_MASK(1, 1) | \ COUNTER_MASK(1, 2) | \ COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 4)) static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* * Ticks are 10x the link transfer period which for 2.5Gbs is 4 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample * intervals are counted in ticks. Since we use Linux timers, that * count in jiffies, we can't sample for less than 1000 ticks if HZ * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that * have hardware support for delaying packets. */ if (crp->cr_psstat) p->tick = dev->dd->ipath_link_speed_active - 1; else p->tick = 250; /* 1 usec. */ p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; spin_lock_irqsave(&dev->pending_lock, flags); if (crp->cr_psstat) p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat); else p->sample_status = dev->pma_sample_status; p->sample_start = cpu_to_be32(dev->pma_sample_start); p->sample_interval = cpu_to_be32(dev->pma_sample_interval); p->tag = cpu_to_be16(dev->pma_tag); p->counter_select[0] = dev->pma_counter_select[0]; p->counter_select[1] = dev->pma_counter_select[1]; p->counter_select[2] = dev->pma_counter_select[2]; p->counter_select[3] = dev->pma_counter_select[3]; p->counter_select[4] = dev->pma_counter_select[4]; spin_unlock_irqrestore(&dev->pending_lock, flags); return reply((struct ib_smp *) pmp); } static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; u8 status; int ret; if (pmp->mad_hdr.attr_mod != 0 || (p->port_select != port && p->port_select != 0xFF)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; ret = reply((struct ib_smp *) pmp); goto bail; } spin_lock_irqsave(&dev->pending_lock, flags); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; if (status == IB_PMA_SAMPLE_STATUS_DONE) { dev->pma_sample_start = be32_to_cpu(p->sample_start); dev->pma_sample_interval = be32_to_cpu(p->sample_interval); dev->pma_tag = be16_to_cpu(p->tag); dev->pma_counter_select[0] = p->counter_select[0]; dev->pma_counter_select[1] = p->counter_select[1]; dev->pma_counter_select[2] = p->counter_select[2]; dev->pma_counter_select[3] = p->counter_select[3]; dev->pma_counter_select[4] = p->counter_select[4]; if (crp->cr_psstat) { ipath_write_creg(dev->dd, crp->cr_psinterval, dev->pma_sample_interval); ipath_write_creg(dev->dd, crp->cr_psstart, dev->pma_sample_start); } else dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; } spin_unlock_irqrestore(&dev->pending_lock, flags); ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port); bail: return ret; } static u64 get_counter(struct ipath_ibdev *dev, struct ipath_cregs const *crp, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = (crp->cr_psxmitdatacount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) : dev->ipath_sword; break; case IB_PMA_PORT_RCV_DATA: ret = (crp->cr_psrcvdatacount) ? ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) : dev->ipath_rword; break; case IB_PMA_PORT_XMIT_PKTS: ret = (crp->cr_psxmitpktscount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) : dev->ipath_spkts; break; case IB_PMA_PORT_RCV_PKTS: ret = (crp->cr_psrcvpktscount) ? ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) : dev->ipath_rpkts; break; case IB_PMA_PORT_XMIT_WAIT: ret = (crp->cr_psxmitwaitcount) ? ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) : dev->ipath_xmit_wait; break; default: ret = 0; } return ret; } static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult *p = (struct ib_pma_portsamplesresult *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; p->sample_status = cpu_to_be16(status); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : cpu_to_be32( get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult_ext *p = (struct ib_pma_portsamplesresult_ext *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cregs const *crp = dev->dd->ipath_cregs; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); if (crp->cr_psstat) status = ipath_read_creg32(dev->dd, crp->cr_psstat); else status = dev->pma_sample_status; p->sample_status = cpu_to_be16(status); /* 64 bits */ p->extended_width = cpu_to_be32(0x80000000); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : cpu_to_be64( get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_verbs_counters cntrs; u8 port_select = p->port_select; ipath_get_counters(dev->dd, &cntrs); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= dev->z_symbol_error_counter; cntrs.link_error_recovery_counter -= dev->z_link_error_recovery_counter; cntrs.link_downed_counter -= dev->z_link_downed_counter; cntrs.port_rcv_errors += dev->rcv_errors; cntrs.port_rcv_errors -= dev->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= dev->z_port_xmit_discards; cntrs.port_xmit_data -= dev->z_port_xmit_data; cntrs.port_rcv_data -= dev->z_port_rcv_data; cntrs.port_xmit_packets -= dev->z_port_xmit_packets; cntrs.port_rcv_packets -= dev->z_port_rcv_packets; cntrs.local_link_integrity_errors -= dev->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= dev->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= dev->z_vl15_dropped; cntrs.vl15_dropped += dev->n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); return reply((struct ib_smp *) pmp); } static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); u64 swords, rwords, spkts, rpkts, xwait; u8 port_select = p->port_select; ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, &rpkts, &xwait); /* Adjust counters for any resets done. */ swords -= dev->z_port_xmit_data; rwords -= dev->z_port_rcv_data; spkts -= dev->z_port_xmit_packets; rpkts -= dev->z_port_rcv_packets; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; p->port_xmit_data = cpu_to_be64(swords); p->port_rcv_data = cpu_to_be64(rwords); p->port_xmit_packets = cpu_to_be64(spkts); p->port_rcv_packets = cpu_to_be64(rpkts); p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit); p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv); p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit); p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv); return reply((struct ib_smp *) pmp); } static int recv_pma_set_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_verbs_counters cntrs; /* * Since the HW doesn't support clearing counters, we save the * current count and subtract it from future responses. */ ipath_get_counters(dev->dd, &cntrs); if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) dev->z_symbol_error_counter = cntrs.symbol_error_counter; if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) dev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) dev->z_link_downed_counter = cntrs.link_downed_counter; if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) dev->z_port_rcv_errors = cntrs.port_rcv_errors + dev->rcv_errors; if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) dev->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) dev->z_port_xmit_discards = cntrs.port_xmit_discards; if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) dev->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) dev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { dev->n_vl15_dropped = 0; dev->z_vl15_dropped = cntrs.vl15_dropped; } if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) dev->z_port_xmit_data = cntrs.port_xmit_data; if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) dev->z_port_rcv_data = cntrs.port_rcv_data; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) dev->z_port_xmit_packets = cntrs.port_xmit_packets; if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) dev->z_port_rcv_packets = cntrs.port_rcv_packets; return recv_pma_get_portcounters(pmp, ibdev, port); } static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); u64 swords, rwords, spkts, rpkts, xwait; ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, &rpkts, &xwait); if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) dev->z_port_xmit_data = swords; if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) dev->z_port_rcv_data = rwords; if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) dev->z_port_xmit_packets = spkts; if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) dev->z_port_rcv_packets = rpkts; if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) dev->n_unicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) dev->n_unicast_rcv = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) dev->n_multicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) dev->n_multicast_rcv = 0; return recv_pma_get_portcounters_ext(pmp, ibdev, port); } static int process_subn(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; struct ipath_ibdev *dev = to_idev(ibdev); int ret; *out_mad = *in_mad; if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply(smp); goto bail; } /* Is the mkey in the process of expiring? */ if (dev->mkey_lease_timeout && time_after_eq(jiffies, dev->mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ dev->mkey_lease_timeout = 0; dev->mkeyprot = 0; } /* * M_Key checking depends on * Portinfo:M_Key_protect_bits */ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 && dev->mkey != smp->mkey && (smp->method == IB_MGMT_METHOD_SET || (smp->method == IB_MGMT_METHOD_GET && dev->mkeyprot >= 2))) { if (dev->mkey_violations != 0xFFFF) ++dev->mkey_violations; if (dev->mkey_lease_timeout || dev->mkey_lease_period == 0) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } dev->mkey_lease_timeout = jiffies + dev->mkey_lease_period * HZ; /* Future: Generate a trap notice. */ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } else if (dev->mkey_lease_timeout) dev->mkey_lease_timeout = 0; switch (smp->method) { case IB_MGMT_METHOD_GET: switch (smp->attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = recv_subn_get_nodedescription(smp, ibdev); goto bail; case IB_SMP_ATTR_NODE_INFO: ret = recv_subn_get_nodeinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_GUID_INFO: ret = recv_subn_get_guidinfo(smp, ibdev); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = recv_subn_get_portinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = recv_subn_get_pkeytable(smp, ibdev); goto bail; case IB_SMP_ATTR_SM_INFO: if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (dev->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_SET: switch (smp->attr_id) { case IB_SMP_ATTR_GUID_INFO: ret = recv_subn_set_guidinfo(smp, ibdev); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = recv_subn_set_portinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = recv_subn_set_pkeytable(smp, ibdev); goto bail; case IB_SMP_ATTR_SM_INFO: if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (dev->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: case IB_MGMT_METHOD_TRAP_REPRESS: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply(smp); } bail: return ret; } static int process_perf(struct ib_device *ibdev, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; int ret; *out_mad = *in_mad; if (pmp->mad_hdr.class_version != 1) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *) pmp); goto bail; } switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = recv_pma_get_classportinfo(pmp); goto bail; case IB_PMA_PORT_SAMPLES_CONTROL: ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_SAMPLES_RESULT: ret = recv_pma_get_portsamplesresult(pmp, ibdev); goto bail; case IB_PMA_PORT_SAMPLES_RESULT_EXT: ret = recv_pma_get_portsamplesresult_ext(pmp, ibdev); goto bail; case IB_PMA_PORT_COUNTERS: ret = recv_pma_get_portcounters(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = recv_pma_get_portcounters_ext(pmp, ibdev, port_num); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_SET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_PORT_SAMPLES_CONTROL: ret = recv_pma_set_portsamplescontrol(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS: ret = recv_pma_set_portcounters(pmp, ibdev, port_num); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = recv_pma_set_portcounters_ext(pmp, ibdev, port_num); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) pmp); } bail: return ret; } /** * ipath_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port_num: the port number this packet came in on * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in_mad: the incoming MAD * @out_mad: any outgoing MAD reply * * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not * interested in processing. * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. */ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port_num, in_mad, out_mad); goto bail; case IB_MGMT_CLASS_PERF_MGMT: ret = process_perf(ibdev, port_num, in_mad, out_mad); goto bail; default: ret = IB_MAD_RESULT_SUCCESS; } bail: return ret; }
gpl-2.0
subtek/axiomk
arch/mn10300/unit-asb2305/pci-irq.c
8771
1300
/* PCI IRQ routing on the MN103E010 based ASB2305 * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. * * This is simple: All PCI interrupts route through the CPU's XIRQ1 pin [IRQ 35] */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/smp.h> #include "pci-asb2305.h" void __init pcibios_irq_init(void) { } void __init pcibios_fixup_irqs(void) { struct pci_dev *dev = NULL; u8 line, pin; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { dev->irq = XIRQ1; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); } } void __init pcibios_penalize_isa_irq(int irq) { } void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); }
gpl-2.0
MoKee/android_kernel_htc_dlx
arch/mips/kernel/spinlock_test.c
8771
2381
#include <linux/init.h> #include <linux/kthread.h> #include <linux/hrtimer.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/export.h> #include <linux/spinlock.h> static int ss_get(void *data, u64 *val) { ktime_t start, finish; int loops; int cont; DEFINE_RAW_SPINLOCK(ss_spin); loops = 1000000; cont = 1; start = ktime_get(); while (cont) { raw_spin_lock(&ss_spin); loops--; if (loops == 0) cont = 0; raw_spin_unlock(&ss_spin); } finish = ktime_get(); *val = ktime_us_delta(finish, start); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); struct spin_multi_state { raw_spinlock_t lock; atomic_t start_wait; atomic_t enter_wait; atomic_t exit_wait; int loops; }; struct spin_multi_per_thread { struct spin_multi_state *state; ktime_t start; }; static int multi_other(void *data) { int loops; int cont; struct spin_multi_per_thread *pt = data; struct spin_multi_state *s = pt->state; loops = s->loops; cont = 1; atomic_dec(&s->enter_wait); while (atomic_read(&s->enter_wait)) ; /* spin */ pt->start = ktime_get(); atomic_dec(&s->start_wait); while (atomic_read(&s->start_wait)) ; /* spin */ while (cont) { raw_spin_lock(&s->lock); loops--; if (loops == 0) cont = 0; raw_spin_unlock(&s->lock); } atomic_dec(&s->exit_wait); while (atomic_read(&s->exit_wait)) ; /* spin */ return 0; } static int multi_get(void *data, u64 *val) { ktime_t finish; struct spin_multi_state ms; struct spin_multi_per_thread t1, t2; ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get"); ms.loops = 1000000; atomic_set(&ms.start_wait, 2); atomic_set(&ms.enter_wait, 2); atomic_set(&ms.exit_wait, 2); t1.state = &ms; t2.state = &ms; kthread_run(multi_other, &t2, "multi_get"); multi_other(&t1); finish = ktime_get(); *val = ktime_us_delta(finish, t1.start); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); extern struct dentry *mips_debugfs_dir; static int __init spinlock_test(void) { struct dentry *d; if (!mips_debugfs_dir) return -ENODEV; d = debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, &fops_ss); if (!d) return -ENOMEM; d = debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, &fops_multi); if (!d) return -ENOMEM; return 0; } device_initcall(spinlock_test);
gpl-2.0
phonelab/android_kernel
net/llc/llc_s_ev.c
15683
3613
/* * llc_s_ev.c - Defines SAP component events * * The followed event functions are SAP component events which are described * in 802.2 LLC protocol standard document. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/socket.h> #include <net/sock.h> #include <net/llc_if.h> #include <net/llc_s_ev.h> #include <net/llc_pdu.h> int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_ACTIVATION_REQ ? 0 : 1; } int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_UI ? 0 : 1; } int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_DATAUNIT_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_XID_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_TEST_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_DEACTIVATION_REQ ? 0 : 1; }
gpl-2.0
paulluo/linux
drivers/usb/isp1760/isp1760-if.c
68
7439
/* * Glue code for the ISP1760 driver and bus * Currently there is support for * - OpenFirmware * - PCI * - PDEV (generic platform device centralized driver model) * * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> * */ #include <linux/usb.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/usb/isp1760.h> #include <linux/usb/hcd.h> #include "isp1760-core.h" #include "isp1760-regs.h" #ifdef CONFIG_USB_PCI #include <linux/pci.h> #endif #ifdef CONFIG_USB_PCI static int isp1761_pci_init(struct pci_dev *dev) { resource_size_t mem_start; resource_size_t mem_length; u8 __iomem *iobase; u8 latency, limit; int retry_count; u32 reg_data; /* Grab the PLX PCI shared memory of the ISP 1761 we need */ mem_start = pci_resource_start(dev, 3); mem_length = pci_resource_len(dev, 3); if (mem_length < 0xffff) { printk(KERN_ERR "memory length for this resource is wrong\n"); return -ENOMEM; } if (!request_mem_region(mem_start, mem_length, "ISP-PCI")) { printk(KERN_ERR "host controller already in use\n"); return -EBUSY; } /* map available memory */ iobase = ioremap_nocache(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "Error ioremap failed\n"); release_mem_region(mem_start, mem_length); return -ENOMEM; } /* bad pci latencies can contribute to overruns */ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency); if (latency) { pci_read_config_byte(dev, PCI_MAX_LAT, &limit); if (limit && limit < latency) pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit); } /* Try to check whether we can access Scratch Register of * Host Controller or not. The initial PCI access is retried until * local init for the PCI bridge is completed */ retry_count = 20; reg_data = 0; while ((reg_data != 0xFACE) && retry_count) { /*by default host is in 16bit mode, so * io operations at this stage must be 16 bit * */ writel(0xface, iobase + HC_SCRATCH_REG); udelay(100); reg_data = readl(iobase + HC_SCRATCH_REG) & 0x0000ffff; retry_count--; } iounmap(iobase); release_mem_region(mem_start, mem_length); /* Host Controller presence is detected by writing to scratch register * and reading back and checking the contents are same or not */ if (reg_data != 0xFACE) { dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data); return -ENOMEM; } /* Grab the PLX PCI mem maped port start address we need */ mem_start = pci_resource_start(dev, 0); mem_length = pci_resource_len(dev, 0); if (!request_mem_region(mem_start, mem_length, "ISP1761 IO MEM")) { printk(KERN_ERR "request region #1\n"); return -EBUSY; } iobase = ioremap_nocache(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "ioremap #1\n"); release_mem_region(mem_start, mem_length); return -ENOMEM; } /* configure PLX PCI chip to pass interrupts */ #define PLX_INT_CSR_REG 0x68 reg_data = readl(iobase + PLX_INT_CSR_REG); reg_data |= 0x900; writel(reg_data, iobase + PLX_INT_CSR_REG); /* done with PLX IO access */ iounmap(iobase); release_mem_region(mem_start, mem_length); return 0; } static int isp1761_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned int devflags = 0; int ret; if (!dev->irq) return -ENODEV; if (pci_enable_device(dev) < 0) return -ENODEV; ret = isp1761_pci_init(dev); if (ret < 0) goto error; pci_set_master(dev); dev->dev.dma_mask = NULL; ret = isp1760_register(&dev->resource[3], dev->irq, 0, &dev->dev, devflags); if (ret < 0) goto error; return 0; error: pci_disable_device(dev); return ret; } static void isp1761_pci_remove(struct pci_dev *dev) { isp1760_unregister(&dev->dev); pci_disable_device(dev); } static void isp1761_pci_shutdown(struct pci_dev *dev) { printk(KERN_ERR "ips1761_pci_shutdown\n"); } static const struct pci_device_id isp1760_plx[] = { { .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_PLX, .device = 0x5406, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = 0x9054, }, { } }; MODULE_DEVICE_TABLE(pci, isp1760_plx); static struct pci_driver isp1761_pci_driver = { .name = "isp1760", .id_table = isp1760_plx, .probe = isp1761_pci_probe, .remove = isp1761_pci_remove, .shutdown = isp1761_pci_shutdown, }; #endif static int isp1760_plat_probe(struct platform_device *pdev) { unsigned long irqflags; unsigned int devflags = 0; struct resource *mem_res; struct resource *irq_res; int ret; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { pr_warn("isp1760: IRQ resource not available\n"); return -ENODEV; } irqflags = irq_res->flags & IRQF_TRIGGER_MASK; if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { struct device_node *dp = pdev->dev.of_node; u32 bus_width = 0; if (of_device_is_compatible(dp, "nxp,usb-isp1761")) devflags |= ISP1760_FLAG_ISP1761; /* Some systems wire up only 16 of the 32 data lines */ of_property_read_u32(dp, "bus-width", &bus_width); if (bus_width == 16) devflags |= ISP1760_FLAG_BUS_WIDTH_16; if (of_property_read_bool(dp, "port1-otg")) devflags |= ISP1760_FLAG_OTG_EN; if (of_property_read_bool(dp, "analog-oc")) devflags |= ISP1760_FLAG_ANALOG_OC; if (of_property_read_bool(dp, "dack-polarity")) devflags |= ISP1760_FLAG_DACK_POL_HIGH; if (of_property_read_bool(dp, "dreq-polarity")) devflags |= ISP1760_FLAG_DREQ_POL_HIGH; } else if (dev_get_platdata(&pdev->dev)) { struct isp1760_platform_data *pdata = dev_get_platdata(&pdev->dev); if (pdata->is_isp1761) devflags |= ISP1760_FLAG_ISP1761; if (pdata->bus_width_16) devflags |= ISP1760_FLAG_BUS_WIDTH_16; if (pdata->port1_otg) devflags |= ISP1760_FLAG_OTG_EN; if (pdata->analog_oc) devflags |= ISP1760_FLAG_ANALOG_OC; if (pdata->dack_polarity_high) devflags |= ISP1760_FLAG_DACK_POL_HIGH; if (pdata->dreq_polarity_high) devflags |= ISP1760_FLAG_DREQ_POL_HIGH; } ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev, devflags); if (ret < 0) return ret; pr_info("ISP1760 USB device initialised\n"); return 0; } static int isp1760_plat_remove(struct platform_device *pdev) { isp1760_unregister(&pdev->dev); return 0; } #ifdef CONFIG_OF static const struct of_device_id isp1760_of_match[] = { { .compatible = "nxp,usb-isp1760", }, { .compatible = "nxp,usb-isp1761", }, { }, }; MODULE_DEVICE_TABLE(of, isp1760_of_match); #endif static struct platform_driver isp1760_plat_driver = { .probe = isp1760_plat_probe, .remove = isp1760_plat_remove, .driver = { .name = "isp1760", .of_match_table = of_match_ptr(isp1760_of_match), }, }; static int __init isp1760_init(void) { int ret, any_ret = -ENODEV; isp1760_init_kmem_once(); ret = platform_driver_register(&isp1760_plat_driver); if (!ret) any_ret = 0; #ifdef CONFIG_USB_PCI ret = pci_register_driver(&isp1761_pci_driver); if (!ret) any_ret = 0; #endif if (any_ret) isp1760_deinit_kmem_cache(); return any_ret; } module_init(isp1760_init); static void __exit isp1760_exit(void) { platform_driver_unregister(&isp1760_plat_driver); #ifdef CONFIG_USB_PCI pci_unregister_driver(&isp1761_pci_driver); #endif isp1760_deinit_kmem_cache(); } module_exit(isp1760_exit);
gpl-2.0
sdwuyawen/linux2.6.22_helper2416
arch/um/sys-i386/ptrace.c
68
10832
/* * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ #include <linux/compiler.h> #include "linux/sched.h" #include "linux/mm.h" #include "asm/elf.h" #include "asm/ptrace.h" #include "asm/uaccess.h" #include "asm/unistd.h" #include "sysdep/ptrace.h" #include "sysdep/sigcontext.h" #include "sysdep/sc.h" void arch_switch_to_tt(struct task_struct *from, struct task_struct *to) { update_debugregs(to->thread.arch.debugregs_seq); arch_switch_tls_tt(from, to); } void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) { int err = arch_switch_tls_skas(from, to); if (!err) return; if (err != -EINVAL) printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err); else printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n"); } int is_syscall(unsigned long addr) { unsigned short instr; int n; n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); if(n){ /* access_process_vm() grants access to vsyscall and stub, * while copy_from_user doesn't. Maybe access_process_vm is * slow, but that doesn't matter, since it will be called only * in case of singlestepping, if copy_from_user failed. */ n = access_process_vm(current, addr, &instr, sizeof(instr), 0); if(n != sizeof(instr)) { printk("is_syscall : failed to read instruction from " "0x%lx\n", addr); return(1); } } /* int 0x80 or sysenter */ return((instr == 0x80cd) || (instr == 0x340f)); } /* determines which flags the user has access to. */ /* 1 = access 0 = no access */ #define FLAG_MASK 0x00044dd5 int putreg(struct task_struct *child, int regno, unsigned long value) { regno >>= 2; switch (regno) { case FS: if (value && (value & 3) != 3) return -EIO; PT_REGS_FS(&child->thread.regs) = value; return 0; case GS: if (value && (value & 3) != 3) return -EIO; PT_REGS_GS(&child->thread.regs) = value; return 0; case DS: case ES: if (value && (value & 3) != 3) return -EIO; value &= 0xffff; break; case SS: case CS: if ((value & 3) != 3) return -EIO; value &= 0xffff; break; case EFL: value &= FLAG_MASK; value |= PT_REGS_EFLAGS(&child->thread.regs); break; } PT_REGS_SET(&child->thread.regs, regno, value); return 0; } int poke_user(struct task_struct *child, long addr, long data) { if ((addr & 3) || addr < 0) return -EIO; if (addr < MAX_REG_OFFSET) return putreg(child, addr, data); else if((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))){ addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; if((addr == 4) || (addr == 5)) return -EIO; child->thread.arch.debugregs[addr] = data; return 0; } return -EIO; } unsigned long getreg(struct task_struct *child, int regno) { unsigned long retval = ~0UL; regno >>= 2; switch (regno) { case FS: case GS: case DS: case ES: case SS: case CS: retval = 0xffff; /* fall through */ default: retval &= PT_REG(&child->thread.regs, regno); } return retval; } int peek_user(struct task_struct *child, long addr, long data) { /* read the word at location addr in the USER area. */ unsigned long tmp; if ((addr & 3) || addr < 0) return -EIO; tmp = 0; /* Default return condition */ if(addr < MAX_REG_OFFSET){ tmp = getreg(child, addr); } else if((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))){ addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; tmp = child->thread.arch.debugregs[addr]; } return put_user(tmp, (unsigned long __user *) data); } struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long reserved; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; }; /* * FPU tag word conversions. */ static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) { unsigned int tmp; /* to avoid 16 bit prefixes in the code */ /* Transform each pair of bits into 01 (valid) or 00 (empty) */ tmp = ~twd; tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ /* and move the valid bits to the lower byte. */ tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ return tmp; } static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) { struct _fpxreg *st = NULL; unsigned long twd = (unsigned long) fxsave->twd; unsigned long tag; unsigned long ret = 0xffff0000; int i; #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16); for ( i = 0 ; i < 8 ; i++ ) { if ( twd & 0x1 ) { st = (struct _fpxreg *) FPREG_ADDR( fxsave, i ); switch ( st->exponent & 0x7fff ) { case 0x7fff: tag = 2; /* Special */ break; case 0x0000: if ( !st->significand[0] && !st->significand[1] && !st->significand[2] && !st->significand[3] ) { tag = 1; /* Zero */ } else { tag = 2; /* Special */ } break; default: if ( st->significand[3] & 0x8000 ) { tag = 0; /* Valid */ } else { tag = 2; /* Special */ } break; } } else { tag = 3; /* Empty */ } ret |= (tag << (2 * i)); twd = twd >> 1; } return ret; } /* * FXSR floating point environment conversions. */ #ifdef CONFIG_MODE_TT static inline int convert_fxsr_to_user_tt(struct _fpstate __user *buf, struct pt_regs *regs) { struct i387_fxsave_struct *fxsave = SC_FXSR_ENV(PT_REGS_SC(regs)); unsigned long env[7]; struct _fpreg __user *to; struct _fpxreg *from; int i; env[0] = (unsigned long)fxsave->cwd | 0xffff0000; env[1] = (unsigned long)fxsave->swd | 0xffff0000; env[2] = twd_fxsr_to_i387(fxsave); env[3] = fxsave->fip; env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); env[5] = fxsave->foo; env[6] = fxsave->fos; if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) return 1; to = &buf->_st[0]; from = (struct _fpxreg *) &fxsave->st_space[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { if ( __copy_to_user( to, from, sizeof(*to) ) ) return 1; } return 0; } #endif static inline int convert_fxsr_to_user(struct _fpstate __user *buf, struct pt_regs *regs) { return(CHOOSE_MODE(convert_fxsr_to_user_tt(buf, regs), 0)); } #ifdef CONFIG_MODE_TT static inline int convert_fxsr_from_user_tt(struct pt_regs *regs, struct _fpstate __user *buf) { struct i387_fxsave_struct *fxsave = SC_FXSR_ENV(PT_REGS_SC(regs)); unsigned long env[7]; struct _fpxreg *to; struct _fpreg __user *from; int i; if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) return 1; fxsave->cwd = (unsigned short)(env[0] & 0xffff); fxsave->swd = (unsigned short)(env[1] & 0xffff); fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); fxsave->fip = env[3]; fxsave->fop = (unsigned short)((env[4] & 0xffff0000) >> 16); fxsave->fcs = (env[4] & 0xffff); fxsave->foo = env[5]; fxsave->fos = env[6]; to = (struct _fpxreg *) &fxsave->st_space[0]; from = &buf->_st[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { if ( __copy_from_user( to, from, sizeof(*from) ) ) return 1; } return 0; } #endif static inline int convert_fxsr_from_user(struct pt_regs *regs, struct _fpstate __user *buf) { return(CHOOSE_MODE(convert_fxsr_from_user_tt(regs, buf), 0)); } int get_fpregs(unsigned long buf, struct task_struct *child) { int err; err = convert_fxsr_to_user((struct _fpstate __user *) buf, &child->thread.regs); if(err) return(-EFAULT); else return(0); } int set_fpregs(unsigned long buf, struct task_struct *child) { int err; err = convert_fxsr_from_user(&child->thread.regs, (struct _fpstate __user *) buf); if(err) return(-EFAULT); else return(0); } #ifdef CONFIG_MODE_TT int get_fpxregs_tt(unsigned long buf, struct task_struct *tsk) { struct pt_regs *regs = &tsk->thread.regs; struct i387_fxsave_struct *fxsave = SC_FXSR_ENV(PT_REGS_SC(regs)); int err; err = __copy_to_user((void __user *) buf, fxsave, sizeof(struct user_fxsr_struct)); if(err) return -EFAULT; else return 0; } #endif int get_fpxregs(unsigned long buf, struct task_struct *tsk) { return(CHOOSE_MODE(get_fpxregs_tt(buf, tsk), 0)); } #ifdef CONFIG_MODE_TT int set_fpxregs_tt(unsigned long buf, struct task_struct *tsk) { struct pt_regs *regs = &tsk->thread.regs; struct i387_fxsave_struct *fxsave = SC_FXSR_ENV(PT_REGS_SC(regs)); int err; err = __copy_from_user(fxsave, (void __user *) buf, sizeof(struct user_fxsr_struct) ); if(err) return -EFAULT; else return 0; } #endif int set_fpxregs(unsigned long buf, struct task_struct *tsk) { return(CHOOSE_MODE(set_fpxregs_tt(buf, tsk), 0)); } #ifdef notdef int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { fpu->cwd = (((SC_FP_CW(PT_REGS_SC(regs)) & 0xffff) << 16) | (SC_FP_SW(PT_REGS_SC(regs)) & 0xffff)); fpu->swd = SC_FP_CSSEL(PT_REGS_SC(regs)) & 0xffff; fpu->twd = SC_FP_IPOFF(PT_REGS_SC(regs)); fpu->fip = SC_FP_CSSEL(PT_REGS_SC(regs)) & 0xffff; fpu->fcs = SC_FP_DATAOFF(PT_REGS_SC(regs)); fpu->foo = SC_FP_DATASEL(PT_REGS_SC(regs)); fpu->fos = 0; memcpy(fpu->st_space, (void *) SC_FP_ST(PT_REGS_SC(regs)), sizeof(fpu->st_space)); return(1); } #endif #ifdef CONFIG_MODE_TT static inline void copy_fpu_fxsave_tt(struct pt_regs *regs, struct user_i387_struct *buf) { struct i387_fxsave_struct *fpu = SC_FXSR_ENV(PT_REGS_SC(regs)); unsigned short *to; unsigned short *from; int i; memcpy( buf, fpu, 7 * sizeof(long) ); to = (unsigned short *) &buf->st_space[0]; from = (unsigned short *) &fpu->st_space[0]; for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) { memcpy( to, from, 5 * sizeof(unsigned short) ); } } #endif static inline void copy_fpu_fxsave(struct pt_regs *regs, struct user_i387_struct *buf) { (void) CHOOSE_MODE(copy_fpu_fxsave_tt(regs, buf), 0); } int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) { copy_fpu_fxsave(regs, (struct user_i387_struct *) fpu); return(1); } /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end * of the file. * --------------------------------------------------------------------------- * Local variables: * c-file-style: "linux" * End: */
gpl-2.0
olegfusion/IM-A830S_kernel
drivers/media/video/msm/sensors/imx074_v4l2.c
68
9641
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_sensor.h" #define SENSOR_NAME "imx074" #define PLATFORM_DRIVER_NAME "msm_camera_imx074" #define imx074_obj imx074_##obj DEFINE_MUTEX(imx074_mut); static struct msm_sensor_ctrl_t imx074_s_ctrl; static struct msm_camera_i2c_reg_conf imx074_start_settings[] = { {0x0100, 0x01}, }; static struct msm_camera_i2c_reg_conf imx074_stop_settings[] = { {0x0100, 0x00}, }; static struct msm_camera_i2c_reg_conf imx074_groupon_settings[] = { {0x104, 0x01}, }; static struct msm_camera_i2c_reg_conf imx074_groupoff_settings[] = { {0x104, 0x00}, }; static struct msm_camera_i2c_reg_conf imx074_prev_settings[] = { {0x0307, 0x2D}, /*pll_multiplier*/ {0x0340, 0x06}, /*frame_length_lines_hi*/ {0x0341, 0x2D}, /*frame_length_lines_lo*/ {0x0342, 0x11}, /*line_length_pclk_hi*/ {0x0343, 0x78}, /*line_length_pclk_lo*/ {0x0347, 0x00}, /*y_addr_start*/ {0x034b, 0x2F}, /*y_add_end*/ {0x034c, 0x08}, /*x_output_size_msb*/ {0x034d, 0x38}, /*x_output_size_lsb*/ {0x034e, 0x06}, /*y_output_size_msb*/ {0x034f, 0x18}, /*y_output_size_lsb*/ {0x0381, 0x01}, /*x_even_inc*/ {0x0383, 0x03}, /*x_odd_inc*/ {0x0385, 0x01}, /*y_even_inc*/ {0x0387, 0x03}, /*y_odd_inc*/ {0x3001, 0x80}, /*hmodeadd*/ {0x3016, 0x16}, /*vmodeadd*/ {0x3069, 0x24}, /*vapplinepos_start*/ {0x306b, 0x53}, /*vapplinepos_end*/ {0x3086, 0x00}, /*shutter*/ {0x30e8, 0x80}, /*haddave*/ {0x3301, 0x83}, /*lanesel*/ }; static struct msm_camera_i2c_reg_conf imx074_snap_settings[] = { {0x0307, 0x26}, /*pll_multiplier*/ {0x0340, 0x0C}, /*frame_length_lines_hi*/ {0x0341, 0x90}, /*frame_length_lines_lo*/ {0x0342, 0x11}, /*line_length_pclk_hi*/ {0x0343, 0x78}, /*line_length_pclk_lo*/ {0x0347, 0x00}, /*y_addr_start*/ {0x034b, 0x2F}, /*y_add_end*/ {0x034c, 0x10}, /*x_output_size_msb*/ {0x034d, 0x70}, /*x_output_size_lsb*/ {0x034e, 0x0c}, /*y_output_size_msb*/ {0x034f, 0x30}, /*y_output_size_lsb*/ {0x0381, 0x01}, /*x_even_inc*/ {0x0383, 0x01}, /*x_odd_inc*/ {0x0385, 0x01}, /*y_even_inc*/ {0x0387, 0x01}, /*y_odd_inc*/ {0x3001, 0x00}, /*hmodeadd*/ {0x3016, 0x06}, /*vmodeadd*/ {0x3069, 0x24}, /*vapplinepos_start*/ {0x306b, 0x53}, /*vapplinepos_end*/ {0x3086, 0x00}, /*shutter*/ {0x30e8, 0x00}, /*haddave*/ {0x3301, 0x03}, /*lanesel*/ }; static struct msm_camera_i2c_reg_conf imx074_recommend_settings[] = { {0x0305, 0x02}, {0x302b, 0x4B}, {0x3024, 0x03}, {0x0101, 0x00}, {0x300a, 0x80}, {0x3014, 0x08}, {0x3015, 0x37}, {0x301c, 0x01}, {0x302c, 0x05}, {0x3031, 0x26}, {0x3041, 0x60}, {0x3051, 0x24}, {0x3053, 0x34}, {0x3057, 0xc0}, {0x305c, 0x09}, {0x305d, 0x07}, {0x3060, 0x30}, {0x3065, 0x00}, {0x30aa, 0x08}, {0x30ab, 0x1c}, {0x30b0, 0x32}, {0x30b2, 0x83}, {0x30d3, 0x04}, {0x3106, 0x78}, {0x310c, 0x82}, {0x3304, 0x05}, {0x3305, 0x04}, {0x3306, 0x11}, {0x3307, 0x02}, {0x3308, 0x0c}, {0x3309, 0x06}, {0x330a, 0x08}, {0x330b, 0x04}, {0x330c, 0x08}, {0x330d, 0x06}, {0x330f, 0x01}, {0x3381, 0x00}, }; static struct v4l2_subdev_info imx074_subdev_info[] = { { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 0, }, /* more can be supported, to be added later */ }; static struct msm_camera_i2c_conf_array imx074_init_conf[] = { {&imx074_recommend_settings[0], ARRAY_SIZE(imx074_recommend_settings), 0, MSM_CAMERA_I2C_BYTE_DATA} }; static struct msm_camera_i2c_conf_array imx074_confs[] = { {&imx074_snap_settings[0], ARRAY_SIZE(imx074_snap_settings), 0, MSM_CAMERA_I2C_BYTE_DATA}, {&imx074_prev_settings[0], ARRAY_SIZE(imx074_prev_settings), 0, MSM_CAMERA_I2C_BYTE_DATA}, }; static struct msm_sensor_output_info_t imx074_dimensions[] = { { .x_output = 0x1070, .y_output = 0xC30, .line_length_pclk = 0x1178, .frame_length_lines = 0xC90, .vt_pixel_clk = 182400000, .op_pixel_clk = 182400000, .binning_factor = 1, }, { .x_output = 0x838, .y_output = 0x618, .line_length_pclk = 0x1178, .frame_length_lines = 0x62D, .vt_pixel_clk = 216000000, .op_pixel_clk = 216000000, .binning_factor = 2, }, }; static struct msm_camera_csid_vc_cfg imx074_cid_cfg[] = { {0, CSI_RAW10, CSI_DECODE_10BIT}, {1, CSI_EMBED_DATA, CSI_DECODE_8BIT}, }; static struct msm_camera_csi2_params imx074_csi_params = { .csid_params = { .lane_assign = 0xe4, .lane_cnt = 4, .lut_params = { .num_cid = 2, .vc_cfg = imx074_cid_cfg, }, }, .csiphy_params = { .lane_cnt = 4, .settle_cnt = 0x1B, }, }; static struct msm_camera_csi2_params *imx074_csi_params_array[] = { &imx074_csi_params, &imx074_csi_params, }; static struct msm_sensor_output_reg_addr_t imx074_reg_addr = { .x_output = 0x34C, .y_output = 0x34E, .line_length_pclk = 0x342, .frame_length_lines = 0x340, }; static struct msm_sensor_id_info_t imx074_id_info = { .sensor_id_reg_addr = 0x0, .sensor_id = 0x0074, }; static struct msm_sensor_exp_gain_info_t imx074_exp_gain_info = { .coarse_int_time_addr = 0x202, .global_gain_addr = 0x204, .vert_offset = 3, }; static struct sensor_calib_data imx074_calib_data; static const struct i2c_device_id imx074_i2c_id[] = { {SENSOR_NAME, (kernel_ulong_t)&imx074_s_ctrl}, { } }; static struct i2c_driver imx074_i2c_driver = { .id_table = imx074_i2c_id, .probe = msm_sensor_i2c_probe, .driver = { .name = SENSOR_NAME, }, }; static struct msm_camera_i2c_client imx074_sensor_i2c_client = { .addr_type = MSM_CAMERA_I2C_WORD_ADDR, }; static struct msm_camera_i2c_client imx074_eeprom_i2c_client = { .addr_type = MSM_CAMERA_I2C_BYTE_ADDR, }; static struct msm_camera_eeprom_read_t imx074_eeprom_read_tbl[] = { {0x10, &imx074_calib_data.r_over_g, 2, 1}, {0x12, &imx074_calib_data.b_over_g, 2, 1}, {0x14, &imx074_calib_data.gr_over_gb, 2, 1}, }; static struct msm_camera_eeprom_data_t imx074_eeprom_data_tbl[] = { {&imx074_calib_data, sizeof(struct sensor_calib_data)}, }; static struct msm_camera_eeprom_client imx074_eeprom_client = { .i2c_client = &imx074_eeprom_i2c_client, .i2c_addr = 0xA4, .func_tbl = { .eeprom_set_dev_addr = NULL, .eeprom_init = msm_camera_eeprom_init, .eeprom_release = msm_camera_eeprom_release, .eeprom_get_data = msm_camera_eeprom_get_data, }, .read_tbl = imx074_eeprom_read_tbl, .read_tbl_size = ARRAY_SIZE(imx074_eeprom_read_tbl), .data_tbl = imx074_eeprom_data_tbl, .data_tbl_size = ARRAY_SIZE(imx074_eeprom_data_tbl), }; static int __init msm_sensor_init_module(void) { return i2c_add_driver(&imx074_i2c_driver); } static struct v4l2_subdev_core_ops imx074_subdev_core_ops = { .ioctl = msm_sensor_subdev_ioctl, .s_power = msm_sensor_power, }; static struct v4l2_subdev_video_ops imx074_subdev_video_ops = { .enum_mbus_fmt = msm_sensor_v4l2_enum_fmt, }; static struct v4l2_subdev_ops imx074_subdev_ops = { .core = &imx074_subdev_core_ops, .video = &imx074_subdev_video_ops, }; static struct msm_sensor_fn_t imx074_func_tbl = { .sensor_start_stream = msm_sensor_start_stream, .sensor_stop_stream = msm_sensor_stop_stream, .sensor_group_hold_on = msm_sensor_group_hold_on, .sensor_group_hold_off = msm_sensor_group_hold_off, .sensor_set_fps = msm_sensor_set_fps, .sensor_write_exp_gain = msm_sensor_write_exp_gain1, .sensor_write_snapshot_exp_gain = msm_sensor_write_exp_gain1, .sensor_setting = msm_sensor_setting, .sensor_set_sensor_mode = msm_sensor_set_sensor_mode, .sensor_mode_init = msm_sensor_mode_init, .sensor_get_output_info = msm_sensor_get_output_info, .sensor_config = msm_sensor_config, .sensor_power_up = msm_sensor_power_up, .sensor_power_down = msm_sensor_power_down, }; static struct msm_sensor_reg_t imx074_regs = { .default_data_type = MSM_CAMERA_I2C_BYTE_DATA, .start_stream_conf = imx074_start_settings, .start_stream_conf_size = ARRAY_SIZE(imx074_start_settings), .stop_stream_conf = imx074_stop_settings, .stop_stream_conf_size = ARRAY_SIZE(imx074_stop_settings), .group_hold_on_conf = imx074_groupon_settings, .group_hold_on_conf_size = ARRAY_SIZE(imx074_groupon_settings), .group_hold_off_conf = imx074_groupoff_settings, .group_hold_off_conf_size = ARRAY_SIZE(imx074_groupoff_settings), .init_settings = &imx074_init_conf[0], .init_size = ARRAY_SIZE(imx074_init_conf), .mode_settings = &imx074_confs[0], .output_settings = &imx074_dimensions[0], .num_conf = ARRAY_SIZE(imx074_confs), }; static struct msm_sensor_ctrl_t imx074_s_ctrl = { .msm_sensor_reg = &imx074_regs, .sensor_i2c_client = &imx074_sensor_i2c_client, .sensor_i2c_addr = 0x34, .sensor_eeprom_client = &imx074_eeprom_client, .sensor_output_reg_addr = &imx074_reg_addr, .sensor_id_info = &imx074_id_info, .sensor_exp_gain_info = &imx074_exp_gain_info, .cam_mode = MSM_SENSOR_MODE_INVALID, .csi_params = &imx074_csi_params_array[0], .msm_sensor_mutex = &imx074_mut, .sensor_i2c_driver = &imx074_i2c_driver, .sensor_v4l2_subdev_info = imx074_subdev_info, .sensor_v4l2_subdev_info_size = ARRAY_SIZE(imx074_subdev_info), .sensor_v4l2_subdev_ops = &imx074_subdev_ops, .func_tbl = &imx074_func_tbl, .clk_rate = MSM_SENSOR_MCLK_24HZ, }; module_init(msm_sensor_init_module); MODULE_DESCRIPTION("Sony 13MP Bayer sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
mupuf/linux-nouveau-pm
drivers/usb/host/ohci-omap3.c
68
6491
/* * ohci-omap3.c - driver for OHCI on OMAP3 and later processors * * Bus Glue for OMAP3 USBHOST 3 port OHCI controller * This controller is also used in later OMAPs and AM35x chips * * Copyright (C) 2007-2010 Texas Instruments, Inc. * Author: Vikram Pandita <vikram.pandita@ti.com> * Author: Anand Gadiyar <gadiyar@ti.com> * Author: Keshava Munegowda <keshava_mgowda@ti.com> * * Based on ehci-omap.c and some other ohci glue layers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO (last updated Feb 27, 2011): * - add kernel-doc */ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/dma-mapping.h> /*-------------------------------------------------------------------------*/ static int ohci_omap3_init(struct usb_hcd *hcd) { dev_dbg(hcd->self.controller, "starting OHCI controller\n"); return ohci_init(hcd_to_ohci(hcd)); } /*-------------------------------------------------------------------------*/ static int ohci_omap3_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; /* * RemoteWakeupConnected has to be set explicitly before * calling ohci_run. The reset value of RWC is 0. */ ohci->hc_control = OHCI_CTRL_RWC; writel(OHCI_CTRL_RWC, &ohci->regs->control); ret = ohci_run(ohci); if (ret < 0) { dev_err(hcd->self.controller, "can't start\n"); ohci_stop(hcd); } return ret; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_omap3_hc_driver = { .description = hcd_name, .product_desc = "OMAP3 OHCI Host Controller", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .reset = ohci_omap3_init, .start = ohci_omap3_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK */ /** * ohci_hcd_omap3_probe - initialize OMAP-based HCDs * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int ohci_hcd_omap3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = NULL; void __iomem *regs = NULL; struct resource *res; int ret = -ENODEV; int irq; if (usb_disabled()) return -ENODEV; if (!dev->parent) { dev_err(dev, "Missing parent device\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "OHCI irq failed\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "UHH OHCI get resource failed\n"); return -ENOMEM; } regs = ioremap(res->start, resource_size(res)); if (!regs) { dev_err(dev, "UHH OHCI ioremap failed\n"); return -ENOMEM; } /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; if (!dev->coherent_dma_mask) dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "usb_create_hcd failed\n"); goto err_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; pm_runtime_enable(dev); pm_runtime_get_sync(dev); ohci_hcd_init(hcd_to_ohci(hcd)); ret = usb_add_hcd(hcd, irq, 0); if (ret) { dev_dbg(dev, "failed to add hcd with err %d\n", ret); goto err_add_hcd; } return 0; err_add_hcd: pm_runtime_put_sync(dev); usb_put_hcd(hcd); err_io: iounmap(regs); return ret; } /* * may be called without controller electrically present * may be called with controller, bus, and devices active */ /** * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of ohci_hcd_omap3_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. */ static int ohci_hcd_omap3_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); iounmap(hcd->regs); usb_remove_hcd(hcd); pm_runtime_put_sync(dev); pm_runtime_disable(dev); usb_put_hcd(hcd); return 0; } static void ohci_hcd_omap3_shutdown(struct platform_device *pdev) { struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static const struct of_device_id omap_ohci_dt_ids[] = { { .compatible = "ti,ohci-omap3" }, { } }; MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids); static struct platform_driver ohci_hcd_omap3_driver = { .probe = ohci_hcd_omap3_probe, .remove = ohci_hcd_omap3_remove, .shutdown = ohci_hcd_omap3_shutdown, .driver = { .name = "ohci-omap3", .of_match_table = omap_ohci_dt_ids, }, }; MODULE_ALIAS("platform:ohci-omap3"); MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
gpl-2.0
vasudev-33/secureModule
drivers/acpi/acpica/utownerid.c
324
6911
/******************************************************************************* * * Module Name: utownerid - Support for Table/Method Owner IDs * ******************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utownerid") /******************************************************************************* * * FUNCTION: acpi_ut_allocate_owner_id * * PARAMETERS: owner_id - Where the new owner ID is returned * * RETURN: Status * * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to * track objects created by the table or method, to be deleted * when the method exits or the table is unloaded. * ******************************************************************************/ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) { u32 i; u32 j; u32 k; acpi_status status; ACPI_FUNCTION_TRACE(ut_allocate_owner_id); /* Guard against multiple allocations of ID to the same location */ if (*owner_id) { ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists", *owner_id)); return_ACPI_STATUS(AE_ALREADY_EXISTS); } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Find a free owner ID, cycle through all possible IDs on repeated * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have * to be scanned twice. */ for (i = 0, j = acpi_gbl_last_owner_id_index; i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) { if (j >= ACPI_NUM_OWNERID_MASKS) { j = 0; /* Wraparound to start of mask array */ } for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) { if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) { /* There are no free IDs in this mask */ break; } if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) { /* * Found a free ID. The actual ID is the bit index plus one, * making zero an invalid Owner ID. Save this as the last ID * allocated and update the global ID mask. */ acpi_gbl_owner_id_mask[j] |= (1 << k); acpi_gbl_last_owner_id_index = (u8)j; acpi_gbl_next_owner_id_offset = (u8)(k + 1); /* * Construct encoded ID from the index and bit position * * Note: Last [j].k (bit 255) is never used and is marked * permanently allocated (prevents +1 overflow) */ *owner_id = (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j)); ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "Allocated OwnerId: %2.2X\n", (unsigned int)*owner_id)); goto exit; } } acpi_gbl_next_owner_id_offset = 0; } /* * All owner_ids have been allocated. This typically should * not happen since the IDs are reused after deallocation. The IDs are * allocated upon table load (one per table) and method execution, and * they are released when a table is unloaded or a method completes * execution. * * If this error happens, there may be very deep nesting of invoked control * methods, or there may be a bug where the IDs are not released. */ status = AE_OWNER_ID_LIMIT; ACPI_ERROR((AE_INFO, "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT")); exit: (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_release_owner_id * * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID * * RETURN: None. No error is returned because we are either exiting a * control method or unloading a table. Either way, we would * ignore any error anyway. * * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 * ******************************************************************************/ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; u32 index; u32 bit; ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); /* Always clear the input owner_id (zero is an invalid ID) */ *owner_id_ptr = 0; /* Zero is not a valid owner_ID */ if (owner_id == 0) { ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id)); return_VOID; } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_VOID; } /* Normalize the ID to zero */ owner_id--; /* Decode ID to index/offset pair */ index = ACPI_DIV_32(owner_id); bit = 1 << ACPI_MOD_32(owner_id); /* Free the owner ID only if it is valid */ if (acpi_gbl_owner_id_mask[index] & bit) { acpi_gbl_owner_id_mask[index] ^= bit; } else { ACPI_ERROR((AE_INFO, "Release of non-allocated OwnerId: 0x%2.2X", owner_id + 1)); } (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_VOID; }
gpl-2.0
shankarathi07/linux_lg_lollipop
drivers/crypto/msm/qce.c
580
73815
/* Qualcomm Crypto Engine driver. * * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <linux/qcedev.h> #include <linux/qcota.h> #include <mach/dma.h> #include "qce.h" #include "qcryptohw_30.h" #include "qce_ota.h" /* ADM definitions */ #define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */ #define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16) #define DST_INDEX_SG_CMD(index) (index & 0x3fff) #define ADM_DESC_LAST (1 << 31) /* Data xfer between DM and CE in blocks of 16 bytes */ #define ADM_CE_BLOCK_SIZE 16 #define QCE_FIFO_SIZE 0x8000 /* Data xfer between DM and CE in blocks of 64 bytes */ #define ADM_SHA_BLOCK_SIZE 64 #define ADM_DESC_LENGTH_MASK 0xffff #define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK) struct dmov_desc { uint32_t addr; uint32_t len; }; #define ADM_STATUS_OK 0x80000002 /* Misc definitions */ /* QCE max number of descriptor in a descriptor list */ #define QCE_MAX_NUM_DESC 128 /* State of DM channel */ enum qce_chan_st_enum { QCE_CHAN_STATE_IDLE = 0, QCE_CHAN_STATE_IN_PROG = 1, QCE_CHAN_STATE_COMP = 2, QCE_CHAN_STATE_LAST }; /* * CE HW device structure. * Each engine has an instance of the structure. * Each engine can only handle one crypto operation at one time. It is up to * the sw above to ensure single threading of operation on an engine. */ struct qce_device { struct device *pdev; /* Handle to platform_device structure */ unsigned char *coh_vmem; /* Allocated coherent virtual memory */ dma_addr_t coh_pmem; /* Allocated coherent physical memory */ void __iomem *iobase; /* Virtual io base of CE HW */ unsigned int phy_iobase; /* Physical io base of CE HW */ struct clk *ce_clk; /* Handle to CE clk */ unsigned int crci_in; /* CRCI for CE DM IN Channel */ unsigned int crci_out; /* CRCI for CE DM OUT Channel */ unsigned int crci_hash; /* CRCI for CE HASH */ unsigned int chan_ce_in; /* ADM channel used for CE input * and auth result if authentication * only operation. */ unsigned int chan_ce_out; /* ADM channel used for CE output, and icv for esp */ unsigned int *cmd_pointer_list_ce_in; dma_addr_t phy_cmd_pointer_list_ce_in; unsigned int *cmd_pointer_list_ce_out; dma_addr_t phy_cmd_pointer_list_ce_out; unsigned char *cmd_list_ce_in; dma_addr_t phy_cmd_list_ce_in; unsigned char *cmd_list_ce_out; dma_addr_t phy_cmd_list_ce_out; struct dmov_desc *ce_out_src_desc; dma_addr_t phy_ce_out_src_desc; struct dmov_desc *ce_out_dst_desc; dma_addr_t phy_ce_out_dst_desc; struct dmov_desc *ce_in_src_desc; dma_addr_t phy_ce_in_src_desc; struct dmov_desc *ce_in_dst_desc; dma_addr_t phy_ce_in_dst_desc; unsigned char *ce_out_ignore; dma_addr_t phy_ce_out_ignore; unsigned char *ce_pad; dma_addr_t phy_ce_pad; struct msm_dmov_cmd *chan_ce_in_cmd; struct msm_dmov_cmd *chan_ce_out_cmd; uint32_t ce_out_ignore_size; int ce_out_dst_desc_index; int ce_in_dst_desc_index; int ce_out_src_desc_index; int ce_in_src_desc_index; enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */ enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */ int chan_ce_in_status; /* chan ce_in status */ int chan_ce_out_status; /* chan ce_out status */ unsigned char *dig_result; dma_addr_t phy_dig_result; /* cached aes key */ uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)]; uint32_t aes_key_size; /* cached aes key size in bytes */ int fastaes; /* ce supports fast aes */ int hmac; /* ce support hmac-sha1 */ bool ota; /* ce support ota */ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ int assoc_nents; int src_nents; int dst_nents; void *areq; enum qce_cipher_mode_enum mode; dma_addr_t phy_iv_in; dma_addr_t phy_ota_src; dma_addr_t phy_ota_dst; unsigned int ota_size; int err; }; /* Standard initialization vector for SHA-1, source: FIPS 180-2 */ static uint32_t _std_init_vector_sha1[] = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }; /* Standard initialization vector for SHA-256, source: FIPS 180-2 */ static uint32_t _std_init_vector_sha256[] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; /* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */ static const uint32_t _s_box[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; /* * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key * Expansion. */ static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY, uint32_t *AES_RND_KEY) { uint32_t i; uint32_t Nk; uint32_t Nr, rot_data; uint32_t Rcon = 0x01000000; uint32_t temp; uint32_t data_in; uint32_t MSB_store; uint32_t byte_for_sub; uint32_t word_sub[4]; switch (keysize) { case 192: Nk = 6; Nr = 12; break; case 256: Nk = 8; Nr = 14; break; case 128: default: /* default to AES128 */ Nk = 4; Nr = 10; break; } /* key expansion */ i = 0; while (i < Nk) { AES_RND_KEY[i] = AES_KEY[i]; i = i + 1; } i = Nk; while (i < (4 * (Nr + 1))) { temp = AES_RND_KEY[i-1]; if (Nr == 14) { switch (i) { case 8: Rcon = 0x01000000; break; case 16: Rcon = 0x02000000; break; case 24: Rcon = 0x04000000; break; case 32: Rcon = 0x08000000; break; case 40: Rcon = 0x10000000; break; case 48: Rcon = 0x20000000; break; case 56: Rcon = 0x40000000; break; } } else if (Nr == 12) { switch (i) { case 6: Rcon = 0x01000000; break; case 12: Rcon = 0x02000000; break; case 18: Rcon = 0x04000000; break; case 24: Rcon = 0x08000000; break; case 30: Rcon = 0x10000000; break; case 36: Rcon = 0x20000000; break; case 42: Rcon = 0x40000000; break; case 48: Rcon = 0x80000000; break; } } else if (Nr == 10) { switch (i) { case 4: Rcon = 0x01000000; break; case 8: Rcon = 0x02000000; break; case 12: Rcon = 0x04000000; break; case 16: Rcon = 0x08000000; break; case 20: Rcon = 0x10000000; break; case 24: Rcon = 0x20000000; break; case 28: Rcon = 0x40000000; break; case 32: Rcon = 0x80000000; break; case 36: Rcon = 0x1b000000; break; case 40: Rcon = 0x36000000; break; } } if ((i % Nk) == 0) { data_in = temp; MSB_store = (data_in >> 24 & 0xff); rot_data = (data_in << 8) | MSB_store; byte_for_sub = rot_data; word_sub[0] = _s_box[(byte_for_sub & 0xff)]; word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)] << 8); word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)] << 16); word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >> 24)] << 24); word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] | word_sub[3]; temp = word_sub[0] ^ Rcon; } else if ((Nk > 6) && ((i % Nk) == 4)) { byte_for_sub = temp; word_sub[0] = _s_box[(byte_for_sub & 0xff)]; word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)] << 8); word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)] << 16); word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >> 24)] << 24); word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] | word_sub[3]; temp = word_sub[0]; } AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp; i = i+1; } } static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned n; n = len / sizeof(uint32_t) ; for (; n > 0; n--) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) | (*(b+3) & 0xff); b += sizeof(uint32_t); iv++; } n = len % sizeof(uint32_t); if (n == 3) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) ; } else if (n == 2) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) ; } else if (n == 1) { *iv = ((*b << 24) & 0xff000000) ; } } static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned n = len / sizeof(uint32_t); for (; n > 0; n--) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b++ = (unsigned char) ((*iv >> 16) & 0xff); *b++ = (unsigned char) ((*iv >> 8) & 0xff); *b++ = (unsigned char) (*iv & 0xff); iv++; } n = len % sizeof(uint32_t); if (n == 3) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b++ = (unsigned char) ((*iv >> 16) & 0xff); *b = (unsigned char) ((*iv >> 8) & 0xff); } else if (n == 2) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b = (unsigned char) ((*iv >> 16) & 0xff); } else if (n == 1) { *b = (unsigned char) ((*iv >> 24) & 0xff); } } static int count_sg(struct scatterlist *sg, int nbytes) { int i; for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) nbytes -= sg->length; return i; } static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries, struct scatterlist *sg) { int i = 0; for (i = 0; i < entries; i++) { sg->dma_address = (dma_addr_t)pmem->offset; sg++; pmem++; } return 0; } static int _probe_ce_engine(struct qce_device *pce_dev) { unsigned int val; unsigned int rev; unsigned int eng_availability; /* engine available functions */ val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if ((val & 0xfffffff) != 0x0200004) { dev_err(pce_dev->pdev, "unknown Qualcomm crypto device at 0x%x 0x%x\n", pce_dev->phy_iobase, val); return -EIO; }; rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV; if (rev == 0x2) { dev_info(pce_dev->pdev, "Qualcomm Crypto 3e device found at 0x%x\n", pce_dev->phy_iobase); } else if (rev == 0x1) { dev_info(pce_dev->pdev, "Qualcomm Crypto 3 device found at 0x%x\n", pce_dev->phy_iobase); } else if (rev == 0x0) { dev_info(pce_dev->pdev, "Qualcomm Crypto 2 device found at 0x%x\n", pce_dev->phy_iobase); } else { dev_err(pce_dev->pdev, "unknown Qualcomm crypto device at 0x%x\n", pce_dev->phy_iobase); return -EIO; } eng_availability = readl_relaxed(pce_dev->iobase + CRYPTO_ENGINES_AVAIL); if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL) == CRYPTO_AES_SEL_FAST) pce_dev->fastaes = 1; else pce_dev->fastaes = 0; if (eng_availability & (1 << CRYPTO_HMAC_SEL)) pce_dev->hmac = 1; else pce_dev->hmac = 0; if ((eng_availability & (1 << CRYPTO_F9_SEL)) && (eng_availability & (1 << CRYPTO_F8_SEL))) pce_dev->ota = true; else pce_dev->ota = false; pce_dev->aes_key_size = 0; return 0; }; static int _init_ce_engine(struct qce_device *pce_dev) { unsigned int val; /* reset qce */ writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG); /* Ensure previous instruction (write to reset bit) * was completed. */ mb(); /* configure ce */ val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) | (1 << CRYPTO_MASK_AUTH_DONE_INTR) | (1 << CRYPTO_MASK_ERR_INTR); writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG); if (_probe_ce_engine(pce_dev) < 0) return -EIO; if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) { dev_err(pce_dev->pdev, "unknown Qualcomm crypto device at 0x%x\n", pce_dev->phy_iobase); return -EIO; }; return 0; }; static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq) { uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; uint32_t diglen; int rc; int i; uint32_t cfg = 0; /* if not the last, the size has to be on the block boundary */ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE)) return -EIO; switch (sreq->alg) { case QCE_HASH_SHA1: diglen = SHA1_DIGEST_SIZE; break; case QCE_HASH_SHA256: diglen = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } /* * write 20/32 bytes, 5/8 words into auth_iv * for SHA1/SHA256 */ if (sreq->first_blk) { if (sreq->alg == QCE_HASH_SHA1) { for (i = 0; i < 5; i++) auth32[i] = _std_init_vector_sha1[i]; } else { for (i = 0; i < 8; i++) auth32[i] = _std_init_vector_sha256[i]; } } else _byte_stream_to_net_words(auth32, sreq->digest, diglen); rc = clk_enable(pce_dev->ce_clk); if (rc) return rc; writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG); writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG); writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG); writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG); writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG); if (sreq->alg == QCE_HASH_SHA256) { writel_relaxed(auth32[5], pce_dev->iobase + CRYPTO_AUTH_IV5_REG); writel_relaxed(auth32[6], pce_dev->iobase + CRYPTO_AUTH_IV6_REG); writel_relaxed(auth32[7], pce_dev->iobase + CRYPTO_AUTH_IV7_REG); } /* write auth_bytecnt 0/1, start with 0 */ writel_relaxed(sreq->auth_data[0], pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); writel_relaxed(sreq->auth_data[1], pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); /* write auth_seg_cfg */ writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* * write seg_cfg */ if (sreq->alg == QCE_HASH_SHA1) cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE); else cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE); if (sreq->first_blk) cfg |= 1 << CRYPTO_FIRST; if (sreq->last_blk) cfg |= 1 << CRYPTO_LAST; cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG; writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); /* write seg_size */ writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* issue go to crypto */ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); /* Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; } static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req, uint32_t totallen, uint32_t coffset) { uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = { 0, 0, 0, 0, 0}; uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = { 0, 0, 0, 0, 0, 0, 0, 0}; uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { 0, 0, 0, 0}; uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t); int aes_key_chg; int i, rc; uint32_t aes_round_key[CRYPTO_AES_RNDKEYS]; uint32_t cfg; uint32_t ivsize = q_req->ivsize; rc = clk_enable(pce_dev->ce_clk); if (rc) return rc; cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST); if (q_req->op == QCE_REQ_AEAD) { /* do authentication setup */ cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)| (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG); /* write sha1 init vector */ writel_relaxed(_std_init_vector_sha1[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG); writel_relaxed(_std_init_vector_sha1[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG); writel_relaxed(_std_init_vector_sha1[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG); writel_relaxed(_std_init_vector_sha1[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG); writel_relaxed(_std_init_vector_sha1[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG); /* write hmac key */ _byte_stream_to_net_words(hmackey, q_req->authkey, q_req->authklen); writel_relaxed(hmackey[0], pce_dev->iobase + CRYPTO_AUTH_IV5_REG); writel_relaxed(hmackey[1], pce_dev->iobase + CRYPTO_AUTH_IV6_REG); writel_relaxed(hmackey[2], pce_dev->iobase + CRYPTO_AUTH_IV7_REG); writel_relaxed(hmackey[3], pce_dev->iobase + CRYPTO_AUTH_IV8_REG); writel_relaxed(hmackey[4], pce_dev->iobase + CRYPTO_AUTH_IV9_REG); writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); /* write auth_seg_cfg */ writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); } _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen); switch (q_req->mode) { case QCE_MODE_ECB: cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); break; case QCE_MODE_CBC: cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); break; case QCE_MODE_CTR: default: cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); break; } pce_dev->mode = q_req->mode; switch (q_req->alg) { case CIPHER_ALG_DES: if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); writel_relaxed(enciv32[1], pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); } writel_relaxed(enckey32[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG); writel_relaxed(enckey32[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG); cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); break; case CIPHER_ALG_3DES: if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); writel_relaxed(enciv32[1], pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); } writel_relaxed(enckey32[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG); writel_relaxed(enckey32[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG); writel_relaxed(enckey32[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG); writel_relaxed(enckey32[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG); writel_relaxed(enckey32[4], pce_dev->iobase + CRYPTO_DES_KEY4_REG); writel_relaxed(enckey32[5], pce_dev->iobase + CRYPTO_DES_KEY5_REG); cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); break; case CIPHER_ALG_AES: default: if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); writel_relaxed(enciv32[1], pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); writel_relaxed(enciv32[2], pce_dev->iobase + CRYPTO_CNTR2_IV2_REG); writel_relaxed(enciv32[3], pce_dev->iobase + CRYPTO_CNTR3_IV3_REG); } /* set number of counter bits */ writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG); if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ); cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; } else { switch (q_req->encklen) { case AES128_KEY_SIZE: cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ); break; case AES192_KEY_SIZE: cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 << CRYPTO_ENCR_KEY_SZ); break; case AES256_KEY_SIZE: default: cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ); /* check for null key. If null, use hw key*/ for (i = 0; i < enck_size_in_word; i++) { if (enckey32[i] != 0) break; } if (i == enck_size_in_word) cfg |= 1 << CRYPTO_USE_HW_KEY; break; } /* end of switch (q_req->encklen) */ cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; if (pce_dev->aes_key_size != q_req->encklen) aes_key_chg = 1; else { for (i = 0; i < enck_size_in_word; i++) { if (enckey32[i] != pce_dev->aeskey[i]) break; } aes_key_chg = (i == enck_size_in_word) ? 0 : 1; } if (aes_key_chg) { if (pce_dev->fastaes) { for (i = 0; i < enck_size_in_word; i++) { writel_relaxed(enckey32[i], pce_dev->iobase + CRYPTO_AES_RNDKEY0 + (i * sizeof(uint32_t))); } } else { /* size in bit */ _aes_expand_key_schedule( q_req->encklen * 8, enckey32, aes_round_key); for (i = 0; i < CRYPTO_AES_RNDKEYS; i++) { writel_relaxed(aes_round_key[i], pce_dev->iobase + CRYPTO_AES_RNDKEY0 + (i * sizeof(uint32_t))); } } pce_dev->aes_key_size = q_req->encklen; for (i = 0; i < enck_size_in_word; i++) pce_dev->aeskey[i] = enckey32[i]; } /*if (aes_key_chg) { */ } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ break; } /* end of switch (q_req->mode) */ if (q_req->dir == QCE_ENCRYPT) cfg |= (1 << CRYPTO_AUTH_POS); cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; /* write encr seg cfg */ writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) | (coffset & 0xffff), /* cipher offset */ pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write seg cfg and size */ writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* issue go to crypto */ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); /* Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request */ mb(); return 0; }; static int _aead_complete(struct qce_device *pce_dev) { struct aead_request *areq; struct crypto_aead *aead; uint32_t ivsize; uint32_t iv_out[4]; unsigned char iv[4 * sizeof(uint32_t)]; uint32_t status; areq = (struct aead_request *) pce_dev->areq; aead = crypto_aead_reqtfm(areq); ivsize = crypto_aead_ivsize(aead); if (areq->src != areq->dst) { dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO); return 0; }; /* get iv out */ if (pce_dev->mode == QCE_MODE_ECB) { clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); } else { iv_out[0] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); iv_out[1] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); iv_out[2] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR2_IV2_REG); iv_out[3] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR3_IV3_REG); _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, pce_dev->dig_result, iv, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); }; return 0; }; static void _sha_complete(struct qce_device *pce_dev) { struct ahash_request *areq; uint32_t auth_data[2]; uint32_t status; areq = (struct ahash_request *) pce_dev->areq; dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, DMA_TO_DEVICE); /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO); return; }; auth_data[0] = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); auth_data[1] = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); /* Ensure previous instruction (retriving byte count information) * was completed before disabling the clk. */ mb(); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data, pce_dev->chan_ce_in_status); }; static int _ablk_cipher_complete(struct qce_device *pce_dev) { struct ablkcipher_request *areq; uint32_t iv_out[4]; unsigned char iv[4 * sizeof(uint32_t)]; uint32_t status; areq = (struct ablkcipher_request *) pce_dev->areq; if (areq->src != areq->dst) { dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, NULL, -ENXIO); return 0; }; /* get iv out */ if (pce_dev->mode == QCE_MODE_ECB) { clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); } else { iv_out[0] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); iv_out[1] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); iv_out[2] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR2_IV2_REG); iv_out[3] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR3_IV3_REG); _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); } return 0; }; static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev) { struct ablkcipher_request *areq; uint32_t iv_out[4]; unsigned char iv[4 * sizeof(uint32_t)]; uint32_t status; areq = (struct ablkcipher_request *) pce_dev->areq; /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, NULL, -ENXIO); return 0; }; /* get iv out */ if (pce_dev->mode == QCE_MODE_ECB) { clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); } else { iv_out[0] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); iv_out[1] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); iv_out[2] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR2_IV2_REG); iv_out[3] = readl_relaxed(pce_dev->iobase + CRYPTO_CNTR3_IV3_REG); _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); clk_disable(pce_dev->ce_clk); pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); } return 0; }; static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc, unsigned int plen, unsigned int paddr, int *index) { while (plen > QCE_FIFO_SIZE) { pdesc->len = QCE_FIFO_SIZE; if (paddr > 0) { pdesc->addr = paddr; paddr += QCE_FIFO_SIZE; } plen -= pdesc->len; if (plen > 0) { *index = (*index) + 1; if ((*index) >= QCE_MAX_NUM_DESC) return -ENOMEM; pdesc++; } } if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) { pdesc->len = plen; if (paddr > 0) pdesc->addr = paddr; } return 0; } static int _chain_sg_buffer_in(struct qce_device *pce_dev, struct scatterlist *sg, unsigned int nbytes) { unsigned int len; unsigned int dlen; struct dmov_desc *pdesc; pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; /* * Two consective chunks may be handled by the old * buffer descriptor. */ while (nbytes > 0) { len = min(nbytes, sg_dma_len(sg)); dlen = pdesc->len & ADM_DESC_LENGTH_MASK; nbytes -= len; if (dlen == 0) { pdesc->addr = sg_dma_address(sg); pdesc->len = len; if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, sg_dma_address(sg), &pce_dev->ce_in_src_desc_index)) return -EIO; } } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { pdesc->len = dlen + len; if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, pdesc->addr, &pce_dev->ce_in_src_desc_index)) return -EIO; } } else { pce_dev->ce_in_src_desc_index++; if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC) return -ENOMEM; pdesc++; pdesc->len = len; pdesc->addr = sg_dma_address(sg); if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, sg_dma_address(sg), &pce_dev->ce_in_src_desc_index)) return -EIO; } } if (nbytes > 0) sg = sg_next(sg); } return 0; } static int _chain_pm_buffer_in(struct qce_device *pce_dev, unsigned int pmem, unsigned int nbytes) { unsigned int dlen; struct dmov_desc *pdesc; pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; dlen = pdesc->len & ADM_DESC_LENGTH_MASK; if (dlen == 0) { pdesc->addr = pmem; pdesc->len = nbytes; } else if (pmem == (pdesc->addr + dlen)) { pdesc->len = dlen + nbytes; } else { pce_dev->ce_in_src_desc_index++; if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC) return -ENOMEM; pdesc++; pdesc->len = nbytes; pdesc->addr = pmem; } return 0; } static void _chain_buffer_in_init(struct qce_device *pce_dev) { struct dmov_desc *pdesc; pce_dev->ce_in_src_desc_index = 0; pce_dev->ce_in_dst_desc_index = 0; pdesc = pce_dev->ce_in_src_desc; pdesc->len = 0; } static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total) { struct dmov_desc *pdesc; dmov_sg *pcmd; pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; pdesc->len |= ADM_DESC_LAST; pdesc = pce_dev->ce_in_dst_desc; if (total > QCE_FIFO_SIZE) { qce_split_and_insert_dm_desc(pdesc, total, 0, &pce_dev->ce_in_dst_desc_index); pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index; pdesc->len |= ADM_DESC_LAST; } else pdesc->len = ADM_DESC_LAST | total; pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in; if (ncmd == 1) pcmd->cmd |= CMD_LC; else { dmov_s *pscmd; pcmd->cmd &= ~CMD_LC; pcmd++; pscmd = (dmov_s *)pcmd; pscmd->cmd |= CMD_LC; } #ifdef QCE_DEBUG dev_info(pce_dev->pdev, "_ce_in_final %d\n", pce_dev->ce_in_src_desc_index); #endif } #ifdef QCE_DEBUG static void _ce_in_dump(struct qce_device *pce_dev) { int i; struct dmov_desc *pdesc; dev_info(pce_dev->pdev, "_ce_in_dump: src\n"); for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) { pdesc = pce_dev->ce_in_src_desc + i; dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, pdesc->len); } dev_info(pce_dev->pdev, "_ce_in_dump: dst\n"); for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) { pdesc = pce_dev->ce_in_dst_desc + i; dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, pdesc->len); } }; static void _ce_out_dump(struct qce_device *pce_dev) { int i; struct dmov_desc *pdesc; dev_info(pce_dev->pdev, "_ce_out_dump: src\n"); for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) { pdesc = pce_dev->ce_out_src_desc + i; dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, pdesc->len); } dev_info(pce_dev->pdev, "_ce_out_dump: dst\n"); for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) { pdesc = pce_dev->ce_out_dst_desc + i; dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, pdesc->len); } }; #endif static int _chain_sg_buffer_out(struct qce_device *pce_dev, struct scatterlist *sg, unsigned int nbytes) { unsigned int len; unsigned int dlen; struct dmov_desc *pdesc; pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; /* * Two consective chunks may be handled by the old * buffer descriptor. */ while (nbytes > 0) { len = min(nbytes, sg_dma_len(sg)); dlen = pdesc->len & ADM_DESC_LENGTH_MASK; nbytes -= len; if (dlen == 0) { pdesc->addr = sg_dma_address(sg); pdesc->len = len; if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, sg_dma_address(sg), &pce_dev->ce_out_dst_desc_index)) return -EIO; } } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { pdesc->len = dlen + len; if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, pdesc->addr, &pce_dev->ce_out_dst_desc_index)) return -EIO; } } else { pce_dev->ce_out_dst_desc_index++; if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC) return -EIO; pdesc++; pdesc->len = len; pdesc->addr = sg_dma_address(sg); if (pdesc->len > QCE_FIFO_SIZE) { if (qce_split_and_insert_dm_desc(pdesc, pdesc->len, sg_dma_address(sg), &pce_dev->ce_out_dst_desc_index)) return -EIO; } } if (nbytes > 0) sg = sg_next(sg); } return 0; } static int _chain_pm_buffer_out(struct qce_device *pce_dev, unsigned int pmem, unsigned int nbytes) { unsigned int dlen; struct dmov_desc *pdesc; pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; dlen = pdesc->len & ADM_DESC_LENGTH_MASK; if (dlen == 0) { pdesc->addr = pmem; pdesc->len = nbytes; } else if (pmem == (pdesc->addr + dlen)) { pdesc->len = dlen + nbytes; } else { pce_dev->ce_out_dst_desc_index++; if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC) return -EIO; pdesc++; pdesc->len = nbytes; pdesc->addr = pmem; } return 0; }; static void _chain_buffer_out_init(struct qce_device *pce_dev) { struct dmov_desc *pdesc; pce_dev->ce_out_dst_desc_index = 0; pce_dev->ce_out_src_desc_index = 0; pdesc = pce_dev->ce_out_dst_desc; pdesc->len = 0; }; static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total) { struct dmov_desc *pdesc; dmov_sg *pcmd; pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; pdesc->len |= ADM_DESC_LAST; pdesc = pce_dev->ce_out_src_desc; if (total > QCE_FIFO_SIZE) { qce_split_and_insert_dm_desc(pdesc, total, 0, &pce_dev->ce_out_src_desc_index); pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index; pdesc->len |= ADM_DESC_LAST; } else pdesc->len = ADM_DESC_LAST | total; pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out; if (ncmd == 1) pcmd->cmd |= CMD_LC; else { dmov_s *pscmd; pcmd->cmd &= ~CMD_LC; pcmd++; pscmd = (dmov_s *)pcmd; pscmd->cmd |= CMD_LC; } #ifdef QCE_DEBUG dev_info(pce_dev->pdev, "_ce_out_final %d\n", pce_dev->ce_out_dst_desc_index); #endif }; static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _aead_complete(pce_dev); } }; static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_out_status = -1; } else { pce_dev->chan_ce_out_status = 0; }; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _aead_complete(pce_dev); } }; static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; _sha_complete(pce_dev); }; static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _ablk_cipher_complete(pce_dev); } }; static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_out_status = -1; } else { pce_dev->chan_ce_out_status = 0; }; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _ablk_cipher_complete(pce_dev); } }; static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _ablk_cipher_use_pmem_complete(pce_dev); } }; static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_out_status = -1; } else { pce_dev->chan_ce_out_status = 0; }; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _ablk_cipher_use_pmem_complete(pce_dev); } }; static int _setup_cmd_template(struct qce_device *pce_dev) { dmov_sg *pcmd; dmov_s *pscmd; struct dmov_desc *pdesc; unsigned char *vaddr; int i = 0; /* Divide up the 4K coherent memory */ /* 1. ce_in channel 1st command src descriptors, 128 entries */ vaddr = pce_dev->coh_vmem; vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr; pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); /* 2. ce_in channel 1st command dst descriptor, 1 entry */ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr; pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); /* * 3. ce_in channel command list of one scatter gather command * and one simple command. */ pce_dev->cmd_list_ce_in = vaddr; pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg); /* 4. authentication result. */ pce_dev->dig_result = vaddr; pce_dev->phy_dig_result = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + SHA256_DIGESTSIZE; /* * 5. ce_out channel command list of one scatter gather command * and one simple command. */ pce_dev->cmd_list_ce_out = vaddr; pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg); /* 6. ce_out channel command src descriptors, 1 entry */ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr; pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); /* 7. ce_out channel command dst descriptors, 128 entries. */ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr; pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); /* 8. pad area. */ pce_dev->ce_pad = vaddr; pce_dev->phy_ce_pad = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + ADM_CE_BLOCK_SIZE; /* 9. ce_in channel command pointer list. */ vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16); pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr; pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + sizeof(unsigned char *); /* 10. ce_ou channel command pointer list. */ vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16); pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr; pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); vaddr = vaddr + sizeof(unsigned char *); /* 11. throw away area to store by-pass data from ce_out. */ pce_dev->ce_out_ignore = (unsigned char *) vaddr; pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem + (vaddr - pce_dev->coh_vmem); pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr - pce_dev->coh_vmem); /* at least 1.5 K of space */ /* * The first command of command list ce_in is for the input of * concurrent operation of encrypt/decrypt or for the input * of authentication. */ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in; /* swap byte and half word , dst crci , scatter gather */ pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS | CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG; pdesc = pce_dev->ce_in_src_desc; pdesc->addr = 0; /* to be filled in each operation */ pdesc->len = 0; /* to be filled in each operation */ pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc; pdesc = pce_dev->ce_in_dst_desc; for (i = 0; i < QCE_MAX_NUM_DESC; i++) { pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); pdesc->len = 0; /* to be filled in each operation */ pdesc++; } pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc; pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | DST_INDEX_SG_CMD(0); pcmd++; /* * The second command is for the digested data of * hashing operation only. For others, this command is not used. */ pscmd = (dmov_s *) pcmd; /* last command, swap byte, half word, src crci, single */ pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE; pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */ pscmd->dst = (unsigned) pce_dev->phy_dig_result; /* setup command pointer list */ *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST | DMOV_CMD_ADDR((unsigned int) pce_dev->phy_cmd_list_ce_in)); pce_dev->chan_ce_in_cmd->user = (void *) pce_dev; pce_dev->chan_ce_in_cmd->exec_func = NULL; pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR( (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in); /* * The first command in the command list ce_out. * It is for encry/decryp output. * If hashing only, ce_out is not used. */ pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out; /* swap byte, half word, source crci, scatter gather */ pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG; pdesc = pce_dev->ce_out_src_desc; for (i = 0; i < QCE_MAX_NUM_DESC; i++) { pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); pdesc->len = 0; /* to be filled in each operation */ pdesc++; } pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc; pdesc = pce_dev->ce_out_dst_desc; pdesc->addr = 0; /* to be filled in each operation */ pdesc->len = 0; /* to be filled in each operation */ pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc; pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | DST_INDEX_SG_CMD(0); pcmd++; /* * The second command is for digested data of esp operation. * For ciphering, this command is not used. */ pscmd = (dmov_s *) pcmd; /* last command, swap byte, half word, src crci, single */ pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE; pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */ pscmd->dst = (unsigned) pce_dev->phy_dig_result; /* setup command pointer list */ *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST | DMOV_CMD_ADDR((unsigned int)pce_dev-> phy_cmd_list_ce_out)); pce_dev->chan_ce_out_cmd->user = pce_dev; pce_dev->chan_ce_out_cmd->exec_func = NULL; pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR( (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out); return 0; }; static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out) { if (ce_in) pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG; else pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; if (ce_out) pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG; else pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; if (ce_in) msm_dmov_enqueue_cmd(pce_dev->chan_ce_in, pce_dev->chan_ce_in_cmd); if (ce_out) msm_dmov_enqueue_cmd(pce_dev->chan_ce_out, pce_dev->chan_ce_out_cmd); return 0; }; static void _f9_complete(struct qce_device *pce_dev) { uint32_t mac_i; uint32_t status; dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, pce_dev->ota_size, DMA_TO_DEVICE); /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return; }; mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG); pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL, pce_dev->chan_ce_in_status); }; static void _f8_complete(struct qce_device *pce_dev) { uint32_t status; if (pce_dev->phy_ota_dst != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, pce_dev->ota_size, DMA_FROM_DEVICE); if (pce_dev->phy_ota_src != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, pce_dev->ota_size, (pce_dev->phy_ota_dst) ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); /* check ce error status */ status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); if (status & (1 << CRYPTO_SW_ERR)) { pce_dev->err++; dev_err(pce_dev->pdev, "Qualcomm Crypto Error at 0x%x, status%x\n", pce_dev->phy_iobase, status); _init_ce_engine(pce_dev); pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return; }; pce_dev->qce_cb(pce_dev->areq, NULL, NULL, pce_dev->chan_ce_in_status | pce_dev->chan_ce_out_status); }; static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; _f9_complete(pce_dev); }; static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_in_status = -1; } else pce_dev->chan_ce_in_status = 0; pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _f8_complete(pce_dev); } }; static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, unsigned int result, struct msm_dmov_errdata *err) { struct qce_device *pce_dev; pce_dev = (struct qce_device *) cmd_ptr->user; if (result != ADM_STATUS_OK) { dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", result); pce_dev->chan_ce_out_status = -1; } else { pce_dev->chan_ce_out_status = 0; }; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; /* done */ _f8_complete(pce_dev); } }; static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req) { uint32_t cfg; uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)]; _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE); writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG); writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG); writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG); writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG); writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG); writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); writel_relaxed(req->count_i, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); /* write auth_seg_cfg */ writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write seg_cfg */ cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST); if (req->algorithm == QCE_OTA_ALGO_KASUMI) cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE); else cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ; if (req->direction == QCE_OTA_DIR_DOWNLINK) cfg |= 1 << CRYPTO_F9_DIRECTION; writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); /* write seg_size */ writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* issue go to crypto */ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); /* * barrier to ensure previous instructions * (including GO) to CE finish before issue DMA transfer * request. */ mb(); return 0; }; static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size) { uint32_t cfg; uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)]; if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) || (req->bearer >= QCE_OTA_MAX_BEARER)) return -EINVAL; /* write seg_cfg */ cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST); if (req->algorithm == QCE_OTA_ALGO_KASUMI) cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ); else cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ; if (key_stream_mode) cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE; if (req->direction == QCE_OTA_DIR_DOWNLINK) cfg |= 1 << CRYPTO_F8_DIRECTION; writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); /* write seg_size */ writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); /* write 0 to auth_size, auth_offset */ writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); /* write encr_seg_cfg seg_size, seg_offset */ writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) | (cipher_offset & 0xffff), pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); /* write keys */ _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE); writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG); writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG); writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG); writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG); /* write cntr0_iv0 for countC */ writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); /* write cntr1_iv1 for nPkts, and bearer */ if (npkts == 1) npkts = 0; writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT, pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); /* issue go to crypto */ writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); /* * barrier to ensure previous instructions * (including GO) to CE finish before issue DMA transfer * request. */ mb(); return 0; }; int qce_aead_req(void *handle, struct qce_req *q_req) { struct qce_device *pce_dev = (struct qce_device *) handle; struct aead_request *areq = (struct aead_request *) q_req->areq; struct crypto_aead *aead = crypto_aead_reqtfm(areq); uint32_t ivsize = crypto_aead_ivsize(aead); uint32_t totallen; uint32_t pad_len; uint32_t authsize = crypto_aead_authsize(aead); int rc = 0; q_req->ivsize = ivsize; if (q_req->dir == QCE_ENCRYPT) q_req->cryptlen = areq->cryptlen; else q_req->cryptlen = areq->cryptlen - authsize; totallen = q_req->cryptlen + ivsize + areq->assoclen; pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen; _chain_buffer_in_init(pce_dev); _chain_buffer_out_init(pce_dev); pce_dev->assoc_nents = 0; pce_dev->phy_iv_in = 0; pce_dev->src_nents = 0; pce_dev->dst_nents = 0; pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) { rc = -ENOMEM; goto bad; } /* cipher iv for input */ pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv, ivsize, DMA_TO_DEVICE); if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) { rc = -ENOMEM; goto bad; } /* for output, ignore associated data and cipher iv */ if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore, ivsize + areq->assoclen) < 0) { rc = -ENOMEM; goto bad; } /* cipher input */ pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen); dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) { rc = -ENOMEM; goto bad; } /* cipher output */ if (areq->src != areq->dst) { pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen); dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); }; if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) { rc = -ENOMEM; goto bad; } /* pad data */ if (pad_len) { if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } } /* finalize the ce_in and ce_out channels command lists */ _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE)); _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE)); /* set up crypto device */ rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen); if (rc < 0) goto bad; /* setup for callback, and issue command to adm */ pce_dev->areq = q_req->areq; pce_dev->qce_cb = q_req->qce_cb; pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back; pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back; rc = _qce_start_dma(pce_dev, true, true); if (rc == 0) return 0; bad: if (pce_dev->assoc_nents) { dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); } if (pce_dev->phy_iv_in) { dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); } if (pce_dev->src_nents) { dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } if (pce_dev->dst_nents) { dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } return rc; } EXPORT_SYMBOL(qce_aead_req); int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) { int rc = 0; struct qce_device *pce_dev = (struct qce_device *) handle; struct ablkcipher_request *areq = (struct ablkcipher_request *) c_req->areq; uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE) - areq->nbytes; _chain_buffer_in_init(pce_dev); _chain_buffer_out_init(pce_dev); pce_dev->src_nents = 0; pce_dev->dst_nents = 0; /* cipher input */ pce_dev->src_nents = count_sg(areq->src, areq->nbytes); if (c_req->use_pmem != 1) dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); else dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents, areq->src); if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) { rc = -ENOMEM; goto bad; } /* cipher output */ if (areq->src != areq->dst) { pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes); if (c_req->use_pmem != 1) dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); else dma_map_pmem_sg(&c_req->pmem->dst[0], pce_dev->dst_nents, areq->dst); }; if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) { rc = -ENOMEM; goto bad; } /* pad data */ if (pad_len) { if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } } /* finalize the ce_in and ce_out channels command lists */ _ce_in_final(pce_dev, 1, areq->nbytes + pad_len); _ce_out_final(pce_dev, 1, areq->nbytes + pad_len); #ifdef QCE_DEBUG _ce_in_dump(pce_dev); _ce_out_dump(pce_dev); #endif /* set up crypto device */ rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0); if (rc < 0) goto bad; /* setup for callback, and issue command to adm */ pce_dev->areq = areq; pce_dev->qce_cb = c_req->qce_cb; if (c_req->use_pmem == 1) { pce_dev->chan_ce_in_cmd->complete_func = _ablk_cipher_ce_in_call_back_pmem; pce_dev->chan_ce_out_cmd->complete_func = _ablk_cipher_ce_out_call_back_pmem; } else { pce_dev->chan_ce_in_cmd->complete_func = _ablk_cipher_ce_in_call_back; pce_dev->chan_ce_out_cmd->complete_func = _ablk_cipher_ce_out_call_back; } rc = _qce_start_dma(pce_dev, true, true); if (rc == 0) return 0; bad: if (c_req->use_pmem != 1) { if (pce_dev->dst_nents) { dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } if (pce_dev->src_nents) { dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } } return rc; } EXPORT_SYMBOL(qce_ablk_cipher_req); int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc; uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size; struct ahash_request *areq = (struct ahash_request *)sreq->areq; _chain_buffer_in_init(pce_dev); pce_dev->src_nents = count_sg(sreq->src, sreq->size); dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, DMA_TO_DEVICE); if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) { rc = -ENOMEM; goto bad; } if (pad_len) { if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } } _ce_in_final(pce_dev, 2, sreq->size + pad_len); #ifdef QCE_DEBUG _ce_in_dump(pce_dev); #endif rc = _sha_ce_setup(pce_dev, sreq); if (rc < 0) goto bad; pce_dev->areq = areq; pce_dev->qce_cb = sreq->qce_cb; pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back; rc = _qce_start_dma(pce_dev, true, false); if (rc == 0) return 0; bad: if (pce_dev->src_nents) { dma_unmap_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, DMA_TO_DEVICE); } return rc; } EXPORT_SYMBOL(qce_process_sha_req); /* * crypto engine open function. */ void *qce_open(struct platform_device *pdev, int *rc) { struct qce_device *pce_dev; struct resource *resource; struct clk *ce_clk; pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); if (!pce_dev) { *rc = -ENOMEM; dev_err(&pdev->dev, "Can not allocate memory\n"); return NULL; } pce_dev->pdev = &pdev->dev; ce_clk = clk_get(pce_dev->pdev, "core_clk"); if (IS_ERR(ce_clk)) { kfree(pce_dev); *rc = PTR_ERR(ce_clk); return NULL; } pce_dev->ce_clk = ce_clk; *rc = clk_enable(pce_dev->ce_clk); if (*rc) { kfree(pce_dev); return NULL; } resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { *rc = -ENXIO; dev_err(pce_dev->pdev, "Missing MEM resource\n"); goto err; }; pce_dev->phy_iobase = resource->start; pce_dev->iobase = ioremap_nocache(resource->start, resource->end - resource->start + 1); if (!pce_dev->iobase) { *rc = -ENOMEM; dev_err(pce_dev->pdev, "Can not map io memory\n"); goto err; } pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd), GFP_KERNEL); pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd), GFP_KERNEL); if (pce_dev->chan_ce_in_cmd == NULL || pce_dev->chan_ce_out_cmd == NULL) { dev_err(pce_dev->pdev, "Can not allocate memory\n"); *rc = -ENOMEM; goto err; } resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, "crypto_channels"); if (!resource) { *rc = -ENXIO; dev_err(pce_dev->pdev, "Missing DMA channel resource\n"); goto err; }; pce_dev->chan_ce_in = resource->start; pce_dev->chan_ce_out = resource->end; resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, "crypto_crci_in"); if (!resource) { *rc = -ENXIO; dev_err(pce_dev->pdev, "Missing DMA crci in resource\n"); goto err; }; pce_dev->crci_in = resource->start; resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, "crypto_crci_out"); if (!resource) { *rc = -ENXIO; dev_err(pce_dev->pdev, "Missing DMA crci out resource\n"); goto err; }; pce_dev->crci_out = resource->start; resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, "crypto_crci_hash"); if (!resource) { *rc = -ENXIO; dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n"); goto err; }; pce_dev->crci_hash = resource->start; pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL); if (pce_dev->coh_vmem == NULL) { *rc = -ENOMEM; dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n"); goto err; } _setup_cmd_template(pce_dev); pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; if (_init_ce_engine(pce_dev)) { *rc = -ENXIO; clk_disable(pce_dev->ce_clk); goto err; } *rc = 0; clk_disable(pce_dev->ce_clk); pce_dev->err = 0; return pce_dev; err: if (pce_dev) qce_close(pce_dev); return NULL; } EXPORT_SYMBOL(qce_open); /* * crypto engine close function. */ int qce_close(void *handle) { struct qce_device *pce_dev = (struct qce_device *) handle; if (handle == NULL) return -ENODEV; if (pce_dev->iobase) iounmap(pce_dev->iobase); if (pce_dev->coh_vmem) dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem, pce_dev->coh_pmem); kfree(pce_dev->chan_ce_in_cmd); kfree(pce_dev->chan_ce_out_cmd); clk_put(pce_dev->ce_clk); kfree(handle); return 0; } EXPORT_SYMBOL(qce_close); int qce_hw_support(void *handle, struct ce_hw_support *ce_support) { struct qce_device *pce_dev = (struct qce_device *) handle; if (ce_support == NULL) return -EINVAL; if (pce_dev->hmac == 1) ce_support->sha1_hmac_20 = true; else ce_support->sha1_hmac_20 = false; ce_support->sha1_hmac = false; ce_support->sha256_hmac = false; ce_support->sha_hmac = false; ce_support->cmac = false; ce_support->aes_key_192 = true; ce_support->aes_xts = false; ce_support->aes_ccm = false; ce_support->ota = pce_dev->ota; ce_support->aligned_only = false; ce_support->bam = false; return 0; } EXPORT_SYMBOL(qce_hw_support); int qce_f8_req(void *handle, struct qce_f8_req *req, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; bool key_stream_mode; dma_addr_t dst; int rc; uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) - req->data_len; _chain_buffer_in_init(pce_dev); _chain_buffer_out_init(pce_dev); key_stream_mode = (req->data_in == NULL); /* F8 cipher input */ if (key_stream_mode) pce_dev->phy_ota_src = 0; else { pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->data_len) < 0) { pce_dev->phy_ota_dst = 0; rc = -ENOMEM; goto bad; } } /* F8 cipher output */ if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, req->data_len, DMA_FROM_DEVICE); pce_dev->phy_ota_dst = dst; } else { dst = pce_dev->phy_ota_src; pce_dev->phy_ota_dst = 0; } if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) { rc = -ENOMEM; goto bad; } pce_dev->ota_size = req->data_len; /* pad data */ if (pad_len) { if (!key_stream_mode && _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } } /* finalize the ce_in and ce_out channels command lists */ if (!key_stream_mode) _ce_in_final(pce_dev, 1, req->data_len + pad_len); _ce_out_final(pce_dev, 1, req->data_len + pad_len); /* set up crypto device */ rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len); if (rc < 0) goto bad; /* setup for callback, and issue command to adm */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; if (!key_stream_mode) pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back; pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back; rc = _qce_start_dma(pce_dev, !(key_stream_mode), true); if (rc == 0) return 0; bad: if (pce_dev->phy_ota_dst != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, req->data_len, DMA_FROM_DEVICE); if (pce_dev->phy_ota_src != 0) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f8_req); int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; uint16_t num_pkt = mreq->num_pkt; uint16_t cipher_start = mreq->cipher_start; uint16_t cipher_size = mreq->cipher_size; struct qce_f8_req *req = &mreq->qce_f8_req; uint32_t total; uint32_t pad_len; dma_addr_t dst = 0; int rc = 0; total = num_pkt * req->data_len; pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total; _chain_buffer_in_init(pce_dev); _chain_buffer_out_init(pce_dev); /* F8 cipher input */ pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, total) < 0) { pce_dev->phy_ota_dst = 0; rc = -ENOMEM; goto bad; } /* F8 cipher output */ if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, total, DMA_FROM_DEVICE); pce_dev->phy_ota_dst = dst; } else { dst = pce_dev->phy_ota_src; pce_dev->phy_ota_dst = 0; } if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) { rc = -ENOMEM; goto bad; } pce_dev->ota_size = total; /* pad data */ if (pad_len) { if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, pad_len) < 0) { rc = -ENOMEM; goto bad; } } /* finalize the ce_in and ce_out channels command lists */ _ce_in_final(pce_dev, 1, total + pad_len); _ce_out_final(pce_dev, 1, total + pad_len); /* set up crypto device */ rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start, cipher_size); if (rc) goto bad ; /* setup for callback, and issue command to adm */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back; pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back; rc = _qce_start_dma(pce_dev, true, true); if (rc == 0) return 0; bad: if (pce_dev->phy_ota_dst) dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total, DMA_FROM_DEVICE); dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f8_multi_pkt_req); int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, qce_comp_func_ptr_t qce_cb) { struct qce_device *pce_dev = (struct qce_device *) handle; int rc; uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize; pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, req->msize, DMA_TO_DEVICE); _chain_buffer_in_init(pce_dev); rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize); if (rc < 0) { rc = -ENOMEM; goto bad; } pce_dev->ota_size = req->msize; if (pad_len) { rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, pad_len); if (rc < 0) { rc = -ENOMEM; goto bad; } } _ce_in_final(pce_dev, 2, req->msize + pad_len); rc = _ce_f9_setup(pce_dev, req); if (rc < 0) goto bad; /* setup for callback, and issue command to adm */ pce_dev->areq = cookie; pce_dev->qce_cb = qce_cb; pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back; rc = _qce_start_dma(pce_dev, true, false); if (rc == 0) return 0; bad: dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->msize, DMA_TO_DEVICE); return rc; } EXPORT_SYMBOL(qce_f9_req); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Crypto Engine driver");
gpl-2.0
GEWangLiang/linux
drivers/parport/parport_ax88796.c
1348
9493
/* linux/drivers/parport/parport_ax88796.c * * (c) 2005,2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/parport.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #define AX_SPR_BUSY (1<<7) #define AX_SPR_ACK (1<<6) #define AX_SPR_PE (1<<5) #define AX_SPR_SLCT (1<<4) #define AX_SPR_ERR (1<<3) #define AX_CPR_nDOE (1<<5) #define AX_CPR_SLCTIN (1<<3) #define AX_CPR_nINIT (1<<2) #define AX_CPR_ATFD (1<<1) #define AX_CPR_STRB (1<<0) struct ax_drvdata { struct parport *parport; struct parport_state suspend; struct device *dev; struct resource *io; unsigned char irq_enabled; void __iomem *base; void __iomem *spp_data; void __iomem *spp_spr; void __iomem *spp_cpr; }; static inline struct ax_drvdata *pp_to_drv(struct parport *p) { return p->private_data; } static unsigned char parport_ax88796_read_data(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); return readb(dd->spp_data); } static void parport_ax88796_write_data(struct parport *p, unsigned char data) { struct ax_drvdata *dd = pp_to_drv(p); writeb(data, dd->spp_data); } static unsigned char parport_ax88796_read_control(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); unsigned int cpr = readb(dd->spp_cpr); unsigned int ret = 0; if (!(cpr & AX_CPR_STRB)) ret |= PARPORT_CONTROL_STROBE; if (!(cpr & AX_CPR_ATFD)) ret |= PARPORT_CONTROL_AUTOFD; if (cpr & AX_CPR_nINIT) ret |= PARPORT_CONTROL_INIT; if (!(cpr & AX_CPR_SLCTIN)) ret |= PARPORT_CONTROL_SELECT; return ret; } static void parport_ax88796_write_control(struct parport *p, unsigned char control) { struct ax_drvdata *dd = pp_to_drv(p); unsigned int cpr = readb(dd->spp_cpr); cpr &= AX_CPR_nDOE; if (!(control & PARPORT_CONTROL_STROBE)) cpr |= AX_CPR_STRB; if (!(control & PARPORT_CONTROL_AUTOFD)) cpr |= AX_CPR_ATFD; if (control & PARPORT_CONTROL_INIT) cpr |= AX_CPR_nINIT; if (!(control & PARPORT_CONTROL_SELECT)) cpr |= AX_CPR_SLCTIN; dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr); writeb(cpr, dd->spp_cpr); if (parport_ax88796_read_control(p) != control) { dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n", parport_ax88796_read_control(p), control); } } static unsigned char parport_ax88796_read_status(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); unsigned int status = readb(dd->spp_spr); unsigned int ret = 0; if (status & AX_SPR_BUSY) ret |= PARPORT_STATUS_BUSY; if (status & AX_SPR_ACK) ret |= PARPORT_STATUS_ACK; if (status & AX_SPR_ERR) ret |= PARPORT_STATUS_ERROR; if (status & AX_SPR_SLCT) ret |= PARPORT_STATUS_SELECT; if (status & AX_SPR_PE) ret |= PARPORT_STATUS_PAPEROUT; return ret; } static unsigned char parport_ax88796_frob_control(struct parport *p, unsigned char mask, unsigned char val) { struct ax_drvdata *dd = pp_to_drv(p); unsigned char old = parport_ax88796_read_control(p); dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n", mask, val, old); parport_ax88796_write_control(p, (old & ~mask) | val); return old; } static void parport_ax88796_enable_irq(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); unsigned long flags; local_irq_save(flags); if (!dd->irq_enabled) { enable_irq(p->irq); dd->irq_enabled = 1; } local_irq_restore(flags); } static void parport_ax88796_disable_irq(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); unsigned long flags; local_irq_save(flags); if (dd->irq_enabled) { disable_irq(p->irq); dd->irq_enabled = 0; } local_irq_restore(flags); } static void parport_ax88796_data_forward(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); void __iomem *cpr = dd->spp_cpr; writeb((readb(cpr) & ~AX_CPR_nDOE), cpr); } static void parport_ax88796_data_reverse(struct parport *p) { struct ax_drvdata *dd = pp_to_drv(p); void __iomem *cpr = dd->spp_cpr; writeb(readb(cpr) | AX_CPR_nDOE, cpr); } static void parport_ax88796_init_state(struct pardevice *d, struct parport_state *s) { struct ax_drvdata *dd = pp_to_drv(d->port); memset(s, 0, sizeof(struct parport_state)); dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s); s->u.ax88796.cpr = readb(dd->spp_cpr); } static void parport_ax88796_save_state(struct parport *p, struct parport_state *s) { struct ax_drvdata *dd = pp_to_drv(p); dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s); s->u.ax88796.cpr = readb(dd->spp_cpr); } static void parport_ax88796_restore_state(struct parport *p, struct parport_state *s) { struct ax_drvdata *dd = pp_to_drv(p); dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s); writeb(s->u.ax88796.cpr, dd->spp_cpr); } static struct parport_operations parport_ax88796_ops = { .write_data = parport_ax88796_write_data, .read_data = parport_ax88796_read_data, .write_control = parport_ax88796_write_control, .read_control = parport_ax88796_read_control, .frob_control = parport_ax88796_frob_control, .read_status = parport_ax88796_read_status, .enable_irq = parport_ax88796_enable_irq, .disable_irq = parport_ax88796_disable_irq, .data_forward = parport_ax88796_data_forward, .data_reverse = parport_ax88796_data_reverse, .init_state = parport_ax88796_init_state, .save_state = parport_ax88796_save_state, .restore_state = parport_ax88796_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; static int parport_ax88796_probe(struct platform_device *pdev) { struct device *_dev = &pdev->dev; struct ax_drvdata *dd; struct parport *pp = NULL; struct resource *res; unsigned long size; int spacing; int irq; int ret; dd = kzalloc(sizeof(struct ax_drvdata), GFP_KERNEL); if (dd == NULL) { dev_err(_dev, "no memory for private data\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(_dev, "no MEM specified\n"); ret = -ENXIO; goto exit_mem; } size = resource_size(res); spacing = size / 3; dd->io = request_mem_region(res->start, size, pdev->name); if (dd->io == NULL) { dev_err(_dev, "cannot reserve memory\n"); ret = -ENXIO; goto exit_mem; } dd->base = ioremap(res->start, size); if (dd->base == NULL) { dev_err(_dev, "cannot ioremap region\n"); ret = -ENXIO; goto exit_res; } irq = platform_get_irq(pdev, 0); if (irq <= 0) irq = PARPORT_IRQ_NONE; pp = parport_register_port((unsigned long)dd->base, irq, PARPORT_DMA_NONE, &parport_ax88796_ops); if (pp == NULL) { dev_err(_dev, "failed to register parallel port\n"); ret = -ENOMEM; goto exit_unmap; } pp->private_data = dd; dd->parport = pp; dd->dev = _dev; dd->spp_data = dd->base; dd->spp_spr = dd->base + (spacing * 1); dd->spp_cpr = dd->base + (spacing * 2); /* initialise the port controls */ writeb(AX_CPR_STRB, dd->spp_cpr); if (irq >= 0) { /* request irq */ ret = request_irq(irq, parport_irq_handler, IRQF_TRIGGER_FALLING, pdev->name, pp); if (ret < 0) goto exit_port; dd->irq_enabled = 1; } platform_set_drvdata(pdev, pp); dev_info(_dev, "attached parallel port driver\n"); parport_announce_port(pp); return 0; exit_port: parport_remove_port(pp); exit_unmap: iounmap(dd->base); exit_res: release_resource(dd->io); kfree(dd->io); exit_mem: kfree(dd); return ret; } static int parport_ax88796_remove(struct platform_device *pdev) { struct parport *p = platform_get_drvdata(pdev); struct ax_drvdata *dd = pp_to_drv(p); free_irq(p->irq, p); parport_remove_port(p); iounmap(dd->base); release_resource(dd->io); kfree(dd->io); kfree(dd); return 0; } #ifdef CONFIG_PM static int parport_ax88796_suspend(struct platform_device *dev, pm_message_t state) { struct parport *p = platform_get_drvdata(dev); struct ax_drvdata *dd = pp_to_drv(p); parport_ax88796_save_state(p, &dd->suspend); writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr); return 0; } static int parport_ax88796_resume(struct platform_device *dev) { struct parport *p = platform_get_drvdata(dev); struct ax_drvdata *dd = pp_to_drv(p); parport_ax88796_restore_state(p, &dd->suspend); return 0; } #else #define parport_ax88796_suspend NULL #define parport_ax88796_resume NULL #endif MODULE_ALIAS("platform:ax88796-pp"); static struct platform_driver axdrv = { .driver = { .name = "ax88796-pp", }, .probe = parport_ax88796_probe, .remove = parport_ax88796_remove, .suspend = parport_ax88796_suspend, .resume = parport_ax88796_resume, }; module_platform_driver(axdrv); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("AX88796 Parport parallel port driver"); MODULE_LICENSE("GPL");
gpl-2.0
x456/linux
drivers/mmc/host/mmci_qcom_dml.c
1604
5406
/* * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/of.h> #include <linux/of_dma.h> #include <linux/bitops.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include "mmci.h" /* Registers */ #define DML_CONFIG 0x00 #define PRODUCER_CRCI_MSK GENMASK(1, 0) #define PRODUCER_CRCI_DISABLE 0 #define PRODUCER_CRCI_X_SEL BIT(0) #define PRODUCER_CRCI_Y_SEL BIT(1) #define CONSUMER_CRCI_MSK GENMASK(3, 2) #define CONSUMER_CRCI_DISABLE 0 #define CONSUMER_CRCI_X_SEL BIT(2) #define CONSUMER_CRCI_Y_SEL BIT(3) #define PRODUCER_TRANS_END_EN BIT(4) #define BYPASS BIT(16) #define DIRECT_MODE BIT(17) #define INFINITE_CONS_TRANS BIT(18) #define DML_SW_RESET 0x08 #define DML_PRODUCER_START 0x0c #define DML_CONSUMER_START 0x10 #define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x14 #define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x18 #define DML_PIPE_ID 0x1c #define PRODUCER_PIPE_ID_SHFT 0 #define PRODUCER_PIPE_ID_MSK GENMASK(4, 0) #define CONSUMER_PIPE_ID_SHFT 16 #define CONSUMER_PIPE_ID_MSK GENMASK(20, 16) #define DML_PRODUCER_BAM_BLOCK_SIZE 0x24 #define DML_PRODUCER_BAM_TRANS_SIZE 0x28 /* other definitions */ #define PRODUCER_PIPE_LOGICAL_SIZE 4096 #define CONSUMER_PIPE_LOGICAL_SIZE 4096 #define DML_OFFSET 0x800 void dml_start_xfer(struct mmci_host *host, struct mmc_data *data) { u32 config; void __iomem *base = host->base + DML_OFFSET; if (data->flags & MMC_DATA_READ) { /* Read operation: configure DML for producer operation */ /* Set producer CRCI-x and disable consumer CRCI */ config = readl_relaxed(base + DML_CONFIG); config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL; config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DISABLE; writel_relaxed(config, base + DML_CONFIG); /* Set the Producer BAM block size */ writel_relaxed(data->blksz, base + DML_PRODUCER_BAM_BLOCK_SIZE); /* Set Producer BAM Transaction size */ writel_relaxed(data->blocks * data->blksz, base + DML_PRODUCER_BAM_TRANS_SIZE); /* Set Producer Transaction End bit */ config = readl_relaxed(base + DML_CONFIG); config |= PRODUCER_TRANS_END_EN; writel_relaxed(config, base + DML_CONFIG); /* Trigger producer */ writel_relaxed(1, base + DML_PRODUCER_START); } else { /* Write operation: configure DML for consumer operation */ /* Set consumer CRCI-x and disable producer CRCI*/ config = readl_relaxed(base + DML_CONFIG); config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL; config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DISABLE; writel_relaxed(config, base + DML_CONFIG); /* Clear Producer Transaction End bit */ config = readl_relaxed(base + DML_CONFIG); config &= ~PRODUCER_TRANS_END_EN; writel_relaxed(config, base + DML_CONFIG); /* Trigger consumer */ writel_relaxed(1, base + DML_CONSUMER_START); } /* make sure the dml is configured before dma is triggered */ wmb(); } static int of_get_dml_pipe_index(struct device_node *np, const char *name) { int index; struct of_phandle_args dma_spec; index = of_property_match_string(np, "dma-names", name); if (index < 0) return -ENODEV; if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index, &dma_spec)) return -ENODEV; if (dma_spec.args_count) return dma_spec.args[0]; return -ENODEV; } /* Initialize the dml hardware connected to SD Card controller */ int dml_hw_init(struct mmci_host *host, struct device_node *np) { u32 config; void __iomem *base; int consumer_id, producer_id; consumer_id = of_get_dml_pipe_index(np, "tx"); producer_id = of_get_dml_pipe_index(np, "rx"); if (producer_id < 0 || consumer_id < 0) return -ENODEV; base = host->base + DML_OFFSET; /* Reset the DML block */ writel_relaxed(1, base + DML_SW_RESET); /* Disable the producer and consumer CRCI */ config = (PRODUCER_CRCI_DISABLE | CONSUMER_CRCI_DISABLE); /* * Disable the bypass mode. Bypass mode will only be used * if data transfer is to happen in PIO mode and don't * want the BAM interface to connect with SDCC-DML. */ config &= ~BYPASS; /* * Disable direct mode as we don't DML to MASTER the AHB bus. * BAM connected with DML should MASTER the AHB bus. */ config &= ~DIRECT_MODE; /* * Disable infinite mode transfer as we won't be doing any * infinite size data transfers. All data transfer will be * of finite data size. */ config &= ~INFINITE_CONS_TRANS; writel_relaxed(config, base + DML_CONFIG); /* * Initialize the logical BAM pipe size for producer * and consumer. */ writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE, base + DML_PRODUCER_PIPE_LOGICAL_SIZE); writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE, base + DML_CONSUMER_PIPE_LOGICAL_SIZE); /* Initialize Producer/consumer pipe id */ writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT), base + DML_PIPE_ID); /* Make sure dml intialization is finished */ mb(); return 0; }
gpl-2.0
zanezam/boeffla-kernel-omnirom-s3
arch/arm/mach-omap2/pm.c
2116
6127
/* * pm.c - Common OMAP2+ power management-related code * * Copyright (C) 2010 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/err.h> #include <linux/opp.h> #include <plat/omap-pm.h> #include <plat/omap_device.h> #include <plat/common.h> #include "voltage.h" #include "powerdomain.h" #include "clockdomain.h" #include "pm.h" static struct omap_device_pm_latency *pm_lats; static struct device *mpu_dev; static struct device *iva_dev; static struct device *l3_dev; static struct device *dsp_dev; struct device *omap2_get_mpuss_device(void) { WARN_ON_ONCE(!mpu_dev); return mpu_dev; } struct device *omap2_get_iva_device(void) { WARN_ON_ONCE(!iva_dev); return iva_dev; } struct device *omap2_get_l3_device(void) { WARN_ON_ONCE(!l3_dev); return l3_dev; } struct device *omap4_get_dsp_device(void) { WARN_ON_ONCE(!dsp_dev); return dsp_dev; } EXPORT_SYMBOL(omap4_get_dsp_device); /* static int _init_omap_device(struct omap_hwmod *oh, void *user) */ static int _init_omap_device(char *name, struct device **new_dev) { struct omap_hwmod *oh; struct omap_device *od; oh = omap_hwmod_lookup(name); if (WARN(!oh, "%s: could not find omap_hwmod for %s\n", __func__, name)) return -ENODEV; od = omap_device_build(oh->name, 0, oh, NULL, 0, pm_lats, 0, false); if (WARN(IS_ERR(od), "%s: could not build omap_device for %s\n", __func__, name)) return -ENODEV; *new_dev = &od->pdev.dev; return 0; } /* * Build omap_devices for processors and bus. */ static void omap2_init_processor_devices(void) { _init_omap_device("mpu", &mpu_dev); if (omap3_has_iva()) _init_omap_device("iva", &iva_dev); if (cpu_is_omap44xx()) { _init_omap_device("l3_main_1", &l3_dev); _init_omap_device("dsp", &dsp_dev); _init_omap_device("iva", &iva_dev); } else { _init_omap_device("l3_main", &l3_dev); } } /* Types of sleep_switch used in omap_set_pwrdm_state */ #define FORCEWAKEUP_SWITCH 0 #define LOWPOWERSTATE_SWITCH 1 /* * This sets pwrdm state (other than mpu & core. Currently only ON & * RET are supported. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) { u32 cur_state; int sleep_switch = 0; int ret = 0; if (pwrdm == NULL || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << state))) { if (state == PWRDM_POWER_OFF) return ret; state--; } cur_state = pwrdm_read_next_pwrst(pwrdm); if (cur_state == state) return ret; if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) { if ((pwrdm_read_pwrst(pwrdm) > state) && (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) { sleep_switch = LOWPOWERSTATE_SWITCH; } else { clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); pwrdm_wait_transition(pwrdm); sleep_switch = FORCEWAKEUP_SWITCH; } } ret = pwrdm_set_next_pwrst(pwrdm, state); if (ret) { printk(KERN_ERR "Unable to set state of powerdomain: %s\n", pwrdm->name); goto err; } switch (sleep_switch) { case FORCEWAKEUP_SWITCH: if (pwrdm->pwrdm_clkdms[0]->flags & CLKDM_CAN_ENABLE_AUTO) clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); else clkdm_sleep(pwrdm->pwrdm_clkdms[0]); break; case LOWPOWERSTATE_SWITCH: pwrdm_set_lowpwrstchange(pwrdm); break; default: return ret; } pwrdm_wait_transition(pwrdm); pwrdm_state_switch(pwrdm); err: return ret; } /* * This API is to be called during init to put the various voltage * domains to the voltage as per the opp table. Typically we boot up * at the nominal voltage. So this function finds out the rate of * the clock associated with the voltage domain, finds out the correct * opp entry and puts the voltage domain to the voltage specifies * in the opp entry */ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, struct device *dev) { struct voltagedomain *voltdm; struct clk *clk; struct opp *opp; unsigned long freq, bootup_volt; if (!vdd_name || !clk_name || !dev) { printk(KERN_ERR "%s: Invalid parameters!\n", __func__); goto exit; } voltdm = omap_voltage_domain_lookup(vdd_name); if (IS_ERR(voltdm)) { printk(KERN_ERR "%s: Unable to get vdd pointer for vdd_%s\n", __func__, vdd_name); goto exit; } clk = clk_get(NULL, clk_name); if (IS_ERR(clk)) { printk(KERN_ERR "%s: unable to get clk %s\n", __func__, clk_name); goto exit; } freq = clk->rate; clk_put(clk); opp = opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { printk(KERN_ERR "%s: unable to find boot up OPP for vdd_%s\n", __func__, vdd_name); goto exit; } bootup_volt = opp_get_voltage(opp); if (!bootup_volt) { printk(KERN_ERR "%s: unable to find voltage corresponding" "to the bootup OPP for vdd_%s\n", __func__, vdd_name); goto exit; } omap_voltage_scale_vdd(voltdm, bootup_volt); return 0; exit: printk(KERN_ERR "%s: Unable to put vdd_%s to its init voltage\n\n", __func__, vdd_name); return -EINVAL; } static void __init omap3_init_voltages(void) { if (!cpu_is_omap34xx()) return; omap2_set_init_voltage("mpu", "dpll1_ck", mpu_dev); omap2_set_init_voltage("core", "l3_ick", l3_dev); } static void __init omap4_init_voltages(void) { if (!cpu_is_omap44xx()) return; omap2_set_init_voltage("mpu", "dpll_mpu_ck", mpu_dev); omap2_set_init_voltage("core", "l3_div_ck", l3_dev); omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", iva_dev); } static int __init omap2_common_pm_init(void) { omap2_init_processor_devices(); omap_pm_if_init(); return 0; } postcore_initcall(omap2_common_pm_init); static int __init omap2_common_pm_late_init(void) { /* Init the OMAP TWL parameters */ omap3_twl_init(); omap4_twl_init(); /* Init the voltage layer */ omap_voltage_late_init(); /* Initialize the voltages */ omap3_init_voltages(); omap4_init_voltages(); /* Smartreflex device init */ omap_devinit_smartreflex(); return 0; } late_initcall(omap2_common_pm_late_init);
gpl-2.0
jcadduono/nethunter_kernel_g5
drivers/input/touchscreen/gunze.c
2116
4683
/* * Copyright (c) 2000-2001 Vojtech Pavlik */ /* * Gunze AHL-51S touchscreen driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #define DRIVER_DESC "Gunze AHL-51S touchscreen driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define GUNZE_MAX_LENGTH 10 /* * Per-touchscreen data. */ struct gunze { struct input_dev *dev; struct serio *serio; int idx; unsigned char data[GUNZE_MAX_LENGTH]; char phys[32]; }; static void gunze_process_packet(struct gunze* gunze) { struct input_dev *dev = gunze->dev; if (gunze->idx != GUNZE_MAX_LENGTH || gunze->data[5] != ',' || (gunze->data[0] != 'T' && gunze->data[0] != 'R')) { printk(KERN_WARNING "gunze.c: bad packet: >%.*s<\n", GUNZE_MAX_LENGTH, gunze->data); return; } input_report_abs(dev, ABS_X, simple_strtoul(gunze->data + 1, NULL, 10)); input_report_abs(dev, ABS_Y, 1024 - simple_strtoul(gunze->data + 6, NULL, 10)); input_report_key(dev, BTN_TOUCH, gunze->data[0] == 'T'); input_sync(dev); } static irqreturn_t gunze_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct gunze* gunze = serio_get_drvdata(serio); if (data == '\r') { gunze_process_packet(gunze); gunze->idx = 0; } else { if (gunze->idx < GUNZE_MAX_LENGTH) gunze->data[gunze->idx++] = data; } return IRQ_HANDLED; } /* * gunze_disconnect() is the opposite of gunze_connect() */ static void gunze_disconnect(struct serio *serio) { struct gunze *gunze = serio_get_drvdata(serio); input_get_device(gunze->dev); input_unregister_device(gunze->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(gunze->dev); kfree(gunze); } /* * gunze_connect() is the routine that is called when someone adds a * new serio device that supports Gunze protocol and registers it as * an input device. */ static int gunze_connect(struct serio *serio, struct serio_driver *drv) { struct gunze *gunze; struct input_dev *input_dev; int err; gunze = kzalloc(sizeof(struct gunze), GFP_KERNEL); input_dev = input_allocate_device(); if (!gunze || !input_dev) { err = -ENOMEM; goto fail1; } gunze->serio = serio; gunze->dev = input_dev; snprintf(gunze->phys, sizeof(serio->phys), "%s/input0", serio->phys); input_dev->name = "Gunze AHL-51S TouchScreen"; input_dev->phys = gunze->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_GUNZE; input_dev->id.product = 0x0051; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 24, 1000, 0, 0); input_set_abs_params(input_dev, ABS_Y, 24, 1000, 0, 0); serio_set_drvdata(serio, gunze); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(gunze->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(gunze); return err; } /* * The serio driver structure. */ static struct serio_device_id gunze_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_GUNZE, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, gunze_serio_ids); static struct serio_driver gunze_drv = { .driver = { .name = "gunze", }, .description = DRIVER_DESC, .id_table = gunze_serio_ids, .interrupt = gunze_interrupt, .connect = gunze_connect, .disconnect = gunze_disconnect, }; module_serio_driver(gunze_drv);
gpl-2.0
dkthompson/bricked-pyramid-3.0_own
drivers/usb/host/ehci-ppc-of.c
2372
6387
/* * EHCI HCD (Host Controller Driver) for USB. * * Bus Glue for PPC On-Chip EHCI driver on the of_platform bus * Tested on AMCC PPC 440EPx * * Valentine Barshak <vbarshak@ru.mvista.com> * * Based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> * * This file is licenced under the GPL. */ #include <linux/signal.h> #include <linux/of.h> #include <linux/of_platform.h> /* called during probe() after chip reset completes */ static int ehci_ppc_of_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; retval = ehci_halt(ehci); if (retval) return retval; retval = ehci_init(hcd); if (retval) return retval; ehci->sbrn = 0x20; return ehci_reset(ehci); } static const struct hc_driver ehci_ppc_of_hc_driver = { .description = hcd_name, .product_desc = "OF EHCI", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, /* * basic lifecycle operations */ .reset = ehci_ppc_of_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /* * 440EPx Errata USBH_3 * Fix: Enable Break Memory Transfer (BMT) in INSNREG3 */ #define PPC440EPX_EHCI0_INSREG_BMT (0x1 << 0) static int __devinit ppc44x_enable_bmt(struct device_node *dn) { __iomem u32 *insreg_virt; insreg_virt = of_iomap(dn, 1); if (!insreg_virt) return -EINVAL; out_be32(insreg_virt + 3, PPC440EPX_EHCI0_INSREG_BMT); iounmap(insreg_virt); return 0; } static int __devinit ehci_hcd_ppc_of_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci = NULL; struct resource res; int irq; int rv; struct device_node *np; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; hcd = usb_create_hcd(&ehci_ppc_of_hc_driver, &op->dev, "PPC-OF USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } ehci = hcd_to_ehci(hcd); np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx"); if (np != NULL) { /* claim we really affected by usb23 erratum */ if (!of_address_to_resource(np, 0, &res)) ehci->ohci_hcctrl_reg = ioremap(res.start + OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); else pr_debug("%s: no ohci offset in fdt\n", __FILE__); if (!ehci->ohci_hcctrl_reg) { pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__); } else { ehci->has_amcc_usb23 = 1; } } if (of_get_property(dn, "big-endian", NULL)) { ehci->big_endian_mmio = 1; ehci->big_endian_desc = 1; } if (of_get_property(dn, "big-endian-regs", NULL)) ehci->big_endian_mmio = 1; if (of_get_property(dn, "big-endian-desc", NULL)) ehci->big_endian_desc = 1; ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) { rv = ppc44x_enable_bmt(dn); ehci_dbg(ehci, "Break Memory Transfer (BMT) is %senabled!\n", rv ? "NOT ": ""); } rv = usb_add_hcd(hcd, irq, 0); if (rv) goto err_ehci; return 0; err_ehci: if (ehci->has_amcc_usb23) iounmap(ehci->ohci_hcctrl_reg); iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err_rmr: usb_put_hcd(hcd); return rv; } static int ehci_hcd_ppc_of_remove(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct device_node *np; struct resource res; dev_set_drvdata(&op->dev, NULL); dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n"); usb_remove_hcd(hcd); iounmap(hcd->regs); irq_dispose_mapping(hcd->irq); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); /* use request_mem_region to test if the ohci driver is loaded. if so * ensure the ohci core is operational. */ if (ehci->has_amcc_usb23) { np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx"); if (np != NULL) { if (!of_address_to_resource(np, 0, &res)) if (!request_mem_region(res.start, 0x4, hcd_name)) set_ohci_hcfs(ehci, 1); else release_mem_region(res.start, 0x4); else pr_debug("%s: no ohci offset in fdt\n", __FILE__); of_node_put(np); } iounmap(ehci->ohci_hcctrl_reg); } usb_put_hcd(hcd); return 0; } static void ehci_hcd_ppc_of_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static const struct of_device_id ehci_hcd_ppc_of_match[] = { { .compatible = "usb-ehci", }, {}, }; MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct platform_driver ehci_hcd_ppc_of_driver = { .probe = ehci_hcd_ppc_of_probe, .remove = ehci_hcd_ppc_of_remove, .shutdown = ehci_hcd_ppc_of_shutdown, .driver = { .name = "ppc-of-ehci", .owner = THIS_MODULE, .of_match_table = ehci_hcd_ppc_of_match, }, };
gpl-2.0
kbc-developers/android_kernel_samsung_exynos4210jpn
drivers/media/dvb/frontends/dib7000m.c
2372
42500
/* * Linux-DVB Driver for DiBcom's DiB7000M and * first generation DiB7000P-demodulator-family. * * Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_frontend.h" #include "dib7000m.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M: "); printk(args); printk("\n"); } } while (0) struct dib7000m_state { struct dvb_frontend demod; struct dib7000m_config cfg; u8 i2c_addr; struct i2c_adapter *i2c_adap; struct dibx000_i2c_master i2c_master; /* offset is 1 in case of the 7000MC */ u8 reg_offs; u16 wbd_ref; u8 current_band; fe_bandwidth_t current_bandwidth; struct dibx000_agc_config *current_agc; u32 timf; u32 timf_default; u32 internal_clk; u8 div_force_off : 1; u8 div_state : 1; u16 div_sync_wait; u16 revision; u8 agc_state; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[4]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; enum dib7000m_power_mode { DIB7000M_POWER_ALL = 0, DIB7000M_POWER_NO, DIB7000M_POWER_INTERF_ANALOG_AGC, DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD, DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD, DIB7000M_POWER_INTERFACE_ONLY, }; static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = (reg >> 8) | 0x80; state->i2c_write_buffer[1] = reg & 0xff; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 2; state->msg[1].addr = state->i2c_addr >> 1; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2) dprintk("i2c read error on %d",reg); ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = (reg >> 8) & 0xff; state->i2c_write_buffer[1] = reg & 0xff; state->i2c_write_buffer[2] = (val >> 8) & 0xff; state->i2c_write_buffer[3] = val & 0xff; memset(&state->msg[0], 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 4; ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0); mutex_unlock(&state->i2c_buffer_lock); return ret; } static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf) { u16 l = 0, r, *n; n = buf; l = *n++; while (l) { r = *n++; if (state->reg_offs && (r >= 112 && r <= 331)) // compensate for 7000MC r++; do { dib7000m_write_word(state, r, *n++); r++; } while (--l); l = *n++; } } static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode) { int ret = 0; u16 outreg, fifo_threshold, smo_mode, sram = 0x0005; /* by default SRAM output is disabled */ outreg = 0; fifo_threshold = 1792; smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1); dprintk( "setting output mode for demod %p to %d", &state->demod, mode); switch (mode) { case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock outreg = (1 << 10); /* 0x0400 */ break; case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock outreg = (1 << 10) | (1 << 6); /* 0x0440 */ break; case OUTMODE_MPEG2_SERIAL: // STBs with serial input outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */ break; case OUTMODE_DIVERSITY: if (state->cfg.hostbus_diversity) outreg = (1 << 10) | (4 << 6); /* 0x0500 */ else sram |= 0x0c00; break; case OUTMODE_MPEG2_FIFO: // e.g. USB feeding smo_mode |= (3 << 1); fifo_threshold = 512; outreg = (1 << 10) | (5 << 6); break; case OUTMODE_HIGH_Z: // disable outreg = 0; break; default: dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod); break; } if (state->cfg.output_mpeg2_in_188_bytes) smo_mode |= (1 << 5) ; ret |= dib7000m_write_word(state, 294 + state->reg_offs, smo_mode); ret |= dib7000m_write_word(state, 295 + state->reg_offs, fifo_threshold); /* synchronous fread */ ret |= dib7000m_write_word(state, 1795, outreg); ret |= dib7000m_write_word(state, 1805, sram); if (state->revision == 0x4003) { u16 clk_cfg1 = dib7000m_read_word(state, 909) & 0xfffd; if (mode == OUTMODE_DIVERSITY) clk_cfg1 |= (1 << 1); // P_O_CLK_en dib7000m_write_word(state, 909, clk_cfg1); } return ret; } static void dib7000m_set_power_mode(struct dib7000m_state *state, enum dib7000m_power_mode mode) { /* by default everything is going to be powered off */ u16 reg_903 = 0xffff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906 = 0x3fff; u8 offset = 0; /* now, depending on the requested mode, we power on */ switch (mode) { /* power up everything in the demod */ case DIB7000M_POWER_ALL: reg_903 = 0x0000; reg_904 = 0x0000; reg_905 = 0x0000; reg_906 = 0x0000; break; /* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */ case DIB7000M_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */ reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2)); break; case DIB7000M_POWER_INTERF_ANALOG_AGC: reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10)); reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2)); reg_906 &= ~((1 << 0)); break; case DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD: reg_903 = 0x0000; reg_904 = 0x801f; reg_905 = 0x0000; reg_906 = 0x0000; break; case DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD: reg_903 = 0x0000; reg_904 = 0x8000; reg_905 = 0x010b; reg_906 = 0x0000; break; case DIB7000M_POWER_NO: break; } /* always power down unused parts */ if (!state->cfg.mobile_mode) reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1); /* P_sdio_select_clk = 0 on MC and after*/ if (state->revision != 0x4000) reg_906 <<= 1; if (state->revision == 0x4003) offset = 1; dib7000m_write_word(state, 903 + offset, reg_903); dib7000m_write_word(state, 904 + offset, reg_904); dib7000m_write_word(state, 905 + offset, reg_905); dib7000m_write_word(state, 906 + offset, reg_906); } static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc_states no) { int ret = 0; u16 reg_913 = dib7000m_read_word(state, 913), reg_914 = dib7000m_read_word(state, 914); switch (no) { case DIBX000_SLOW_ADC_ON: reg_914 |= (1 << 1) | (1 << 0); ret |= dib7000m_write_word(state, 914, reg_914); reg_914 &= ~(1 << 1); break; case DIBX000_SLOW_ADC_OFF: reg_914 |= (1 << 1) | (1 << 0); break; case DIBX000_ADC_ON: if (state->revision == 0x4000) { // workaround for PA/MA // power-up ADC dib7000m_write_word(state, 913, 0); dib7000m_write_word(state, 914, reg_914 & 0x3); // power-down bandgag dib7000m_write_word(state, 913, (1 << 15)); dib7000m_write_word(state, 914, reg_914 & 0x3); } reg_913 &= 0x0fff; reg_914 &= 0x0003; break; case DIBX000_ADC_OFF: // leave the VBG voltage on reg_913 |= (1 << 14) | (1 << 13) | (1 << 12); reg_914 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2); break; case DIBX000_VBG_ENABLE: reg_913 &= ~(1 << 15); break; case DIBX000_VBG_DISABLE: reg_913 |= (1 << 15); break; default: break; } // dprintk( "913: %x, 914: %x", reg_913, reg_914); ret |= dib7000m_write_word(state, 913, reg_913); ret |= dib7000m_write_word(state, 914, reg_914); return ret; } static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw) { u32 timf; // store the current bandwidth for later use state->current_bandwidth = bw; if (state->timf == 0) { dprintk( "using default timf"); timf = state->timf_default; } else { dprintk( "using updated timf"); timf = state->timf; } timf = timf * (bw / 50) / 160; dib7000m_write_word(state, 23, (u16) ((timf >> 16) & 0xffff)); dib7000m_write_word(state, 24, (u16) ((timf ) & 0xffff)); return 0; } static int dib7000m_set_diversity_in(struct dvb_frontend *demod, int onoff) { struct dib7000m_state *state = demod->demodulator_priv; if (state->div_force_off) { dprintk( "diversity combination deactivated - forced by COFDM parameters"); onoff = 0; } state->div_state = (u8)onoff; if (onoff) { dib7000m_write_word(state, 263 + state->reg_offs, 6); dib7000m_write_word(state, 264 + state->reg_offs, 6); dib7000m_write_word(state, 266 + state->reg_offs, (state->div_sync_wait << 4) | (1 << 2) | (2 << 0)); } else { dib7000m_write_word(state, 263 + state->reg_offs, 1); dib7000m_write_word(state, 264 + state->reg_offs, 0); dib7000m_write_word(state, 266 + state->reg_offs, 0); } return 0; } static int dib7000m_sad_calib(struct dib7000m_state *state) { /* internal */ // dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 /* do the calibration */ dib7000m_write_word(state, 929, (1 << 0)); dib7000m_write_word(state, 929, (0 << 0)); msleep(1); return 0; } static void dib7000m_reset_pll_common(struct dib7000m_state *state, const struct dibx000_bandwidth_config *bw) { dib7000m_write_word(state, 18, (u16) (((bw->internal*1000) >> 16) & 0xffff)); dib7000m_write_word(state, 19, (u16) ( (bw->internal*1000) & 0xffff)); dib7000m_write_word(state, 21, (u16) ( (bw->ifreq >> 16) & 0xffff)); dib7000m_write_word(state, 22, (u16) ( bw->ifreq & 0xffff)); dib7000m_write_word(state, 928, bw->sad_cfg); } static void dib7000m_reset_pll(struct dib7000m_state *state) { const struct dibx000_bandwidth_config *bw = state->cfg.bw; u16 reg_907,reg_910; /* default */ reg_907 = (bw->pll_bypass << 15) | (bw->modulo << 7) | (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0); reg_910 = (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset; // for this oscillator frequency should be 30 MHz for the Master (default values in the board_parameters give that value) // this is only working only for 30 MHz crystals if (!state->cfg.quartz_direct) { reg_910 |= (1 << 5); // forcing the predivider to 1 // if the previous front-end is baseband, its output frequency is 15 MHz (prev freq divided by 2) if(state->cfg.input_clk_is_div_2) reg_907 |= (16 << 9); else // otherwise the previous front-end puts out its input (default 30MHz) - no extra division necessary reg_907 |= (8 << 9); } else { reg_907 |= (bw->pll_ratio & 0x3f) << 9; reg_910 |= (bw->pll_prediv << 5); } dib7000m_write_word(state, 910, reg_910); // pll cfg dib7000m_write_word(state, 907, reg_907); // clk cfg0 dib7000m_write_word(state, 908, 0x0006); // clk_cfg1 dib7000m_reset_pll_common(state, bw); } static void dib7000mc_reset_pll(struct dib7000m_state *state) { const struct dibx000_bandwidth_config *bw = state->cfg.bw; u16 clk_cfg1; // clk_cfg0 dib7000m_write_word(state, 907, (bw->pll_prediv << 8) | (bw->pll_ratio << 0)); // clk_cfg1 //dib7000m_write_word(state, 908, (1 << 14) | (3 << 12) |(0 << 11) | clk_cfg1 = (0 << 14) | (3 << 12) |(0 << 11) | (bw->IO_CLK_en_core << 10) | (bw->bypclk_div << 5) | (bw->enable_refdiv << 4) | (1 << 3) | (bw->pll_range << 1) | (bw->pll_reset << 0); dib7000m_write_word(state, 908, clk_cfg1); clk_cfg1 = (clk_cfg1 & 0xfff7) | (bw->pll_bypass << 3); dib7000m_write_word(state, 908, clk_cfg1); // smpl_cfg dib7000m_write_word(state, 910, (1 << 12) | (2 << 10) | (bw->modulo << 8) | (bw->ADClkSrc << 7)); dib7000m_reset_pll_common(state, bw); } static int dib7000m_reset_gpio(struct dib7000m_state *st) { /* reset the GPIOs */ dib7000m_write_word(st, 773, st->cfg.gpio_dir); dib7000m_write_word(st, 774, st->cfg.gpio_val); /* TODO 782 is P_gpio_od */ dib7000m_write_word(st, 775, st->cfg.gpio_pwm_pos); dib7000m_write_word(st, 780, st->cfg.pwm_freq_div); return 0; } static u16 dib7000m_defaults_common[] = { // auto search configuration 3, 2, 0x0004, 0x1000, 0x0814, 12, 6, 0x001b, 0x7740, 0x005b, 0x8d80, 0x01c9, 0xc380, 0x0000, 0x0080, 0x0000, 0x0090, 0x0001, 0xd4c0, 1, 26, 0x6680, // P_corm_thres Lock algorithms configuration 1, 170, 0x0410, // P_palf_alpha_regul, P_palf_filter_freeze, P_palf_filter_on 8, 173, 0, 0, 0, 0, 0, 0, 0, 0, 1, 182, 8192, // P_fft_nb_to_cut 2, 195, 0x0ccd, // P_pha3_thres 0, // P_cti_use_cpe, P_cti_use_prog 1, 205, 0x200f, // P_cspu_regul, P_cspu_win_cut 5, 214, 0x023d, // P_adp_regul_cnt 0x00a4, // P_adp_noise_cnt 0x00a4, // P_adp_regul_ext 0x7ff0, // P_adp_noise_ext 0x3ccc, // P_adp_fil 1, 226, 0, // P_2d_byp_ti_num 1, 255, 0x800, // P_equal_thres_wgn 1, 263, 0x0001, 1, 281, 0x0010, // P_fec_* 1, 294, 0x0062, // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard 0 }; static u16 dib7000m_defaults[] = { /* set ADC level to -16 */ 11, 76, (1 << 13) - 825 - 117, (1 << 13) - 837 - 117, (1 << 13) - 811 - 117, (1 << 13) - 766 - 117, (1 << 13) - 737 - 117, (1 << 13) - 693 - 117, (1 << 13) - 648 - 117, (1 << 13) - 619 - 117, (1 << 13) - 575 - 117, (1 << 13) - 531 - 117, (1 << 13) - 501 - 117, // Tuner IO bank: max drive (14mA) 1, 912, 0x2c8a, 1, 1817, 1, 0, }; static int dib7000m_demod_reset(struct dib7000m_state *state) { dib7000m_set_power_mode(state, DIB7000M_POWER_ALL); /* always leave the VBG voltage on - it consumes almost nothing but takes a long time to start */ dib7000m_set_adc_state(state, DIBX000_VBG_ENABLE); /* restart all parts */ dib7000m_write_word(state, 898, 0xffff); dib7000m_write_word(state, 899, 0xffff); dib7000m_write_word(state, 900, 0xff0f); dib7000m_write_word(state, 901, 0xfffc); dib7000m_write_word(state, 898, 0); dib7000m_write_word(state, 899, 0); dib7000m_write_word(state, 900, 0); dib7000m_write_word(state, 901, 0); if (state->revision == 0x4000) dib7000m_reset_pll(state); else dib7000mc_reset_pll(state); if (dib7000m_reset_gpio(state) != 0) dprintk( "GPIO reset was not successful."); if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0) dprintk( "OUTPUT_MODE could not be reset."); /* unforce divstr regardless whether i2c enumeration was done or not */ dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) ); dib7000m_set_bandwidth(state, 8000); dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON); dib7000m_sad_calib(state); dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_OFF); if (state->cfg.dvbt_mode) dib7000m_write_word(state, 1796, 0x0); // select DVB-T output if (state->cfg.mobile_mode) dib7000m_write_word(state, 261 + state->reg_offs, 2); else dib7000m_write_word(state, 224 + state->reg_offs, 1); // P_iqc_alpha_pha, P_iqc_alpha_amp, P_iqc_dcc_alpha, ... if(state->cfg.tuner_is_baseband) dib7000m_write_word(state, 36, 0x0755); else dib7000m_write_word(state, 36, 0x1f55); // P_divclksel=3 P_divbitsel=1 if (state->revision == 0x4000) dib7000m_write_word(state, 909, (3 << 10) | (1 << 6)); else dib7000m_write_word(state, 909, (3 << 4) | 1); dib7000m_write_tab(state, dib7000m_defaults_common); dib7000m_write_tab(state, dib7000m_defaults); dib7000m_set_power_mode(state, DIB7000M_POWER_INTERFACE_ONLY); state->internal_clk = state->cfg.bw->internal; return 0; } static void dib7000m_restart_agc(struct dib7000m_state *state) { // P_restart_iqc & P_restart_agc dib7000m_write_word(state, 898, 0x0c00); dib7000m_write_word(state, 898, 0x0000); } static int dib7000m_agc_soft_split(struct dib7000m_state *state) { u16 agc,split_offset; if(!state->current_agc || !state->current_agc->perform_agc_softsplit || state->current_agc->split.max == 0) return 0; // n_agc_global agc = dib7000m_read_word(state, 390); if (agc > state->current_agc->split.min_thres) split_offset = state->current_agc->split.min; else if (agc < state->current_agc->split.max_thres) split_offset = state->current_agc->split.max; else split_offset = state->current_agc->split.max * (agc - state->current_agc->split.min_thres) / (state->current_agc->split.max_thres - state->current_agc->split.min_thres); dprintk( "AGC split_offset: %d",split_offset); // P_agc_force_split and P_agc_split_offset return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset); } static int dib7000m_update_lna(struct dib7000m_state *state) { u16 dyn_gain; if (state->cfg.update_lna) { // read dyn_gain here (because it is demod-dependent and not fe) dyn_gain = dib7000m_read_word(state, 390); if (state->cfg.update_lna(&state->demod,dyn_gain)) { // LNA has changed dib7000m_restart_agc(state); return 1; } } return 0; } static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band) { struct dibx000_agc_config *agc = NULL; int i; if (state->current_band == band && state->current_agc != NULL) return 0; state->current_band = band; for (i = 0; i < state->cfg.agc_config_count; i++) if (state->cfg.agc[i].band_caps & band) { agc = &state->cfg.agc[i]; break; } if (agc == NULL) { dprintk( "no valid AGC configuration found for band 0x%02x",band); return -EINVAL; } state->current_agc = agc; /* AGC */ dib7000m_write_word(state, 72 , agc->setup); dib7000m_write_word(state, 73 , agc->inv_gain); dib7000m_write_word(state, 74 , agc->time_stabiliz); dib7000m_write_word(state, 97 , (agc->alpha_level << 12) | agc->thlock); // Demod AGC loop configuration dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp); dib7000m_write_word(state, 99, (agc->beta_mant << 6) | agc->beta_exp); dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d", state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel); /* AGC continued */ if (state->wbd_ref != 0) dib7000m_write_word(state, 102, state->wbd_ref); else // use default dib7000m_write_word(state, 102, agc->wbd_ref); dib7000m_write_word(state, 103, (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8) ); dib7000m_write_word(state, 104, agc->agc1_max); dib7000m_write_word(state, 105, agc->agc1_min); dib7000m_write_word(state, 106, agc->agc2_max); dib7000m_write_word(state, 107, agc->agc2_min); dib7000m_write_word(state, 108, (agc->agc1_pt1 << 8) | agc->agc1_pt2 ); dib7000m_write_word(state, 109, (agc->agc1_slope1 << 8) | agc->agc1_slope2); dib7000m_write_word(state, 110, (agc->agc2_pt1 << 8) | agc->agc2_pt2); dib7000m_write_word(state, 111, (agc->agc2_slope1 << 8) | agc->agc2_slope2); if (state->revision > 0x4000) { // settings for the MC dib7000m_write_word(state, 71, agc->agc1_pt3); // dprintk( "929: %x %d %d", // (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel); dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2)); } else { // wrong default values u16 b[9] = { 676, 696, 717, 737, 758, 778, 799, 819, 840 }; for (i = 0; i < 9; i++) dib7000m_write_word(state, 88 + i, b[i]); } return 0; } static void dib7000m_update_timf(struct dib7000m_state *state) { u32 timf = (dib7000m_read_word(state, 436) << 16) | dib7000m_read_word(state, 437); state->timf = timf * 160 / (state->current_bandwidth / 50); dib7000m_write_word(state, 23, (u16) (timf >> 16)); dib7000m_write_word(state, 24, (u16) (timf & 0xffff)); dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default); } static int dib7000m_agc_startup(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch) { struct dib7000m_state *state = demod->demodulator_priv; u16 cfg_72 = dib7000m_read_word(state, 72); int ret = -1; u8 *agc_state = &state->agc_state; u8 agc_split; switch (state->agc_state) { case 0: // set power-up level: interf+analog+AGC dib7000m_set_power_mode(state, DIB7000M_POWER_INTERF_ANALOG_AGC); dib7000m_set_adc_state(state, DIBX000_ADC_ON); if (dib7000m_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency/1000)) != 0) return -1; ret = 7; /* ADC power up */ (*agc_state)++; break; case 1: /* AGC initialization */ if (state->cfg.agc_control) state->cfg.agc_control(&state->demod, 1); dib7000m_write_word(state, 75, 32768); if (!state->current_agc->perform_agc_softsplit) { /* we are using the wbd - so slow AGC startup */ dib7000m_write_word(state, 103, 1 << 8); /* force 0 split on WBD and restart AGC */ (*agc_state)++; ret = 5; } else { /* default AGC startup */ (*agc_state) = 4; /* wait AGC rough lock time */ ret = 7; } dib7000m_restart_agc(state); break; case 2: /* fast split search path after 5sec */ dib7000m_write_word(state, 72, cfg_72 | (1 << 4)); /* freeze AGC loop */ dib7000m_write_word(state, 103, 2 << 9); /* fast split search 0.25kHz */ (*agc_state)++; ret = 14; break; case 3: /* split search ended */ agc_split = (u8)dib7000m_read_word(state, 392); /* store the split value for the next time */ dib7000m_write_word(state, 75, dib7000m_read_word(state, 390)); /* set AGC gain start value */ dib7000m_write_word(state, 72, cfg_72 & ~(1 << 4)); /* std AGC loop */ dib7000m_write_word(state, 103, (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */ dib7000m_restart_agc(state); dprintk( "SPLIT %p: %hd", demod, agc_split); (*agc_state)++; ret = 5; break; case 4: /* LNA startup */ /* wait AGC accurate lock time */ ret = 7; if (dib7000m_update_lna(state)) // wait only AGC rough lock time ret = 5; else (*agc_state)++; break; case 5: dib7000m_agc_soft_split(state); if (state->cfg.agc_control) state->cfg.agc_control(&state->demod, 0); (*agc_state)++; break; default: break; } return ret; } static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_frontend_parameters *ch, u8 seq) { u16 value, est[4]; dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth)); /* nfft, guard, qam, alpha */ value = 0; switch (ch->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_2K: value |= (0 << 7); break; case TRANSMISSION_MODE_4K: value |= (2 << 7); break; default: case TRANSMISSION_MODE_8K: value |= (1 << 7); break; } switch (ch->u.ofdm.guard_interval) { case GUARD_INTERVAL_1_32: value |= (0 << 5); break; case GUARD_INTERVAL_1_16: value |= (1 << 5); break; case GUARD_INTERVAL_1_4: value |= (3 << 5); break; default: case GUARD_INTERVAL_1_8: value |= (2 << 5); break; } switch (ch->u.ofdm.constellation) { case QPSK: value |= (0 << 3); break; case QAM_16: value |= (1 << 3); break; default: case QAM_64: value |= (2 << 3); break; } switch (HIERARCHY_1) { case HIERARCHY_2: value |= 2; break; case HIERARCHY_4: value |= 4; break; default: case HIERARCHY_1: value |= 1; break; } dib7000m_write_word(state, 0, value); dib7000m_write_word(state, 5, (seq << 4)); /* P_dintl_native, P_dintlv_inv, P_hrch, P_code_rate, P_select_hp */ value = 0; if (1 != 0) value |= (1 << 6); if (ch->u.ofdm.hierarchy_information == 1) value |= (1 << 4); if (1 == 1) value |= 1; switch ((ch->u.ofdm.hierarchy_information == 0 || 1 == 1) ? ch->u.ofdm.code_rate_HP : ch->u.ofdm.code_rate_LP) { case FEC_2_3: value |= (2 << 1); break; case FEC_3_4: value |= (3 << 1); break; case FEC_5_6: value |= (5 << 1); break; case FEC_7_8: value |= (7 << 1); break; default: case FEC_1_2: value |= (1 << 1); break; } dib7000m_write_word(state, 267 + state->reg_offs, value); /* offset loop parameters */ /* P_timf_alpha = 6, P_corm_alpha=6, P_corm_thres=0x80 */ dib7000m_write_word(state, 26, (6 << 12) | (6 << 8) | 0x80); /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=1, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */ dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (1 << 9) | (3 << 5) | (1 << 4) | (0x3)); /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max=3 */ dib7000m_write_word(state, 32, (0 << 4) | 0x3); /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step=5 */ dib7000m_write_word(state, 33, (0 << 4) | 0x5); /* P_dvsy_sync_wait */ switch (ch->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_8K: value = 256; break; case TRANSMISSION_MODE_4K: value = 128; break; case TRANSMISSION_MODE_2K: default: value = 64; break; } switch (ch->u.ofdm.guard_interval) { case GUARD_INTERVAL_1_16: value *= 2; break; case GUARD_INTERVAL_1_8: value *= 4; break; case GUARD_INTERVAL_1_4: value *= 8; break; default: case GUARD_INTERVAL_1_32: value *= 1; break; } state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO /* deactive the possibility of diversity reception if extended interleave - not for 7000MC */ /* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */ if (1 == 1 || state->revision > 0x4000) state->div_force_off = 0; else state->div_force_off = 1; dib7000m_set_diversity_in(&state->demod, state->div_state); /* channel estimation fine configuration */ switch (ch->u.ofdm.constellation) { case QAM_64: est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */ est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */ break; case QAM_16: est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */ est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */ break; default: est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */ est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */ est[2] = 0x0333; /* P_adp_regul_ext 0.1 */ est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */ break; } for (value = 0; value < 4; value++) dib7000m_write_word(state, 214 + value + state->reg_offs, est[value]); // set power-up level: autosearch dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD); } static int dib7000m_autosearch_start(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch) { struct dib7000m_state *state = demod->demodulator_priv; struct dvb_frontend_parameters schan; int ret = 0; u32 value, factor; schan = *ch; schan.u.ofdm.constellation = QAM_64; schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32; schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; schan.u.ofdm.code_rate_HP = FEC_2_3; schan.u.ofdm.code_rate_LP = FEC_3_4; schan.u.ofdm.hierarchy_information = 0; dib7000m_set_channel(state, &schan, 7); factor = BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth); if (factor >= 5000) factor = 1; else factor = 6; // always use the setting for 8MHz here lock_time for 7,6 MHz are longer value = 30 * state->internal_clk * factor; ret |= dib7000m_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); // lock0 wait time ret |= dib7000m_write_word(state, 7, (u16) (value & 0xffff)); // lock0 wait time value = 100 * state->internal_clk * factor; ret |= dib7000m_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); // lock1 wait time ret |= dib7000m_write_word(state, 9, (u16) (value & 0xffff)); // lock1 wait time value = 500 * state->internal_clk * factor; ret |= dib7000m_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); // lock2 wait time ret |= dib7000m_write_word(state, 11, (u16) (value & 0xffff)); // lock2 wait time // start search value = dib7000m_read_word(state, 0); ret |= dib7000m_write_word(state, 0, (u16) (value | (1 << 9))); /* clear n_irq_pending */ if (state->revision == 0x4000) dib7000m_write_word(state, 1793, 0); else dib7000m_read_word(state, 537); ret |= dib7000m_write_word(state, 0, (u16) value); return ret; } static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg) { u16 irq_pending = dib7000m_read_word(state, reg); if (irq_pending & 0x1) { // failed dprintk( "autosearch failed"); return 1; } if (irq_pending & 0x2) { // succeeded dprintk( "autosearch succeeded"); return 2; } return 0; // still pending } static int dib7000m_autosearch_is_irq(struct dvb_frontend *demod) { struct dib7000m_state *state = demod->demodulator_priv; if (state->revision == 0x4000) return dib7000m_autosearch_irq(state, 1793); else return dib7000m_autosearch_irq(state, 537); } static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch) { struct dib7000m_state *state = demod->demodulator_priv; int ret = 0; u16 value; // we are already tuned - just resuming from suspend if (ch != NULL) dib7000m_set_channel(state, ch, 0); else return -EINVAL; // restart demod ret |= dib7000m_write_word(state, 898, 0x4000); ret |= dib7000m_write_word(state, 898, 0x0000); msleep(45); dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD); /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */ ret |= dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3)); // never achieved a lock before - wait for timfreq to update if (state->timf == 0) msleep(200); //dump_reg(state); /* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */ value = (6 << 8) | 0x80; switch (ch->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_2K: value |= (7 << 12); break; case TRANSMISSION_MODE_4K: value |= (8 << 12); break; default: case TRANSMISSION_MODE_8K: value |= (9 << 12); break; } ret |= dib7000m_write_word(state, 26, value); /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */ value = (0 << 4); switch (ch->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_2K: value |= 0x6; break; case TRANSMISSION_MODE_4K: value |= 0x7; break; default: case TRANSMISSION_MODE_8K: value |= 0x8; break; } ret |= dib7000m_write_word(state, 32, value); /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */ value = (0 << 4); switch (ch->u.ofdm.transmission_mode) { case TRANSMISSION_MODE_2K: value |= 0x6; break; case TRANSMISSION_MODE_4K: value |= 0x7; break; default: case TRANSMISSION_MODE_8K: value |= 0x8; break; } ret |= dib7000m_write_word(state, 33, value); // we achieved a lock - it's time to update the timf freq if ((dib7000m_read_word(state, 535) >> 6) & 0x1) dib7000m_update_timf(state); dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth)); return ret; } static int dib7000m_wakeup(struct dvb_frontend *demod) { struct dib7000m_state *state = demod->demodulator_priv; dib7000m_set_power_mode(state, DIB7000M_POWER_ALL); if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0) dprintk( "could not start Slow ADC"); return 0; } static int dib7000m_sleep(struct dvb_frontend *demod) { struct dib7000m_state *st = demod->demodulator_priv; dib7000m_set_output_mode(st, OUTMODE_HIGH_Z); dib7000m_set_power_mode(st, DIB7000M_POWER_INTERFACE_ONLY); return dib7000m_set_adc_state(st, DIBX000_SLOW_ADC_OFF) | dib7000m_set_adc_state(st, DIBX000_ADC_OFF); } static int dib7000m_identify(struct dib7000m_state *state) { u16 value; if ((value = dib7000m_read_word(state, 896)) != 0x01b3) { dprintk( "wrong Vendor ID (0x%x)",value); return -EREMOTEIO; } state->revision = dib7000m_read_word(state, 897); if (state->revision != 0x4000 && state->revision != 0x4001 && state->revision != 0x4002 && state->revision != 0x4003) { dprintk( "wrong Device ID (0x%x)",value); return -EREMOTEIO; } /* protect this driver to be used with 7000PC */ if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) { dprintk( "this driver does not work with DiB7000PC"); return -EREMOTEIO; } switch (state->revision) { case 0x4000: dprintk( "found DiB7000MA/PA/MB/PB"); break; case 0x4001: state->reg_offs = 1; dprintk( "found DiB7000HC"); break; case 0x4002: state->reg_offs = 1; dprintk( "found DiB7000MC"); break; case 0x4003: state->reg_offs = 1; dprintk( "found DiB9000"); break; } return 0; } static int dib7000m_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { struct dib7000m_state *state = fe->demodulator_priv; u16 tps = dib7000m_read_word(state,480); fep->inversion = INVERSION_AUTO; fep->u.ofdm.bandwidth = state->current_bandwidth; switch ((tps >> 8) & 0x3) { case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break; case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break; /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */ } switch (tps & 0x3) { case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break; case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break; case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break; case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break; } switch ((tps >> 14) & 0x3) { case 0: fep->u.ofdm.constellation = QPSK; break; case 1: fep->u.ofdm.constellation = QAM_16; break; case 2: default: fep->u.ofdm.constellation = QAM_64; break; } /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */ /* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */ fep->u.ofdm.hierarchy_information = HIERARCHY_NONE; switch ((tps >> 5) & 0x7) { case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break; case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break; case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break; case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break; case 7: default: fep->u.ofdm.code_rate_HP = FEC_7_8; break; } switch ((tps >> 2) & 0x7) { case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break; case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break; case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break; case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break; case 7: default: fep->u.ofdm.code_rate_LP = FEC_7_8; break; } /* native interleaver: (dib7000m_read_word(state, 481) >> 5) & 0x1 */ return 0; } static int dib7000m_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { struct dib7000m_state *state = fe->demodulator_priv; int time, ret; dib7000m_set_output_mode(state, OUTMODE_HIGH_Z); state->current_bandwidth = fep->u.ofdm.bandwidth; dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->u.ofdm.bandwidth)); if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe, fep); /* start up the AGC */ state->agc_state = 0; do { time = dib7000m_agc_startup(fe, fep); if (time != -1) msleep(time); } while (time != -1); if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO || fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) { int i = 800, found; dib7000m_autosearch_start(fe, fep); do { msleep(1); found = dib7000m_autosearch_is_irq(fe); } while (found == 0 && i--); dprintk("autosearch returns: %d",found); if (found == 0 || found == 1) return 0; // no channel found dib7000m_get_frontend(fe, fep); } ret = dib7000m_tune(fe, fep); /* make this a config parameter */ dib7000m_set_output_mode(state, OUTMODE_MPEG2_FIFO); return ret; } static int dib7000m_read_status(struct dvb_frontend *fe, fe_status_t *stat) { struct dib7000m_state *state = fe->demodulator_priv; u16 lock = dib7000m_read_word(state, 535); *stat = 0; if (lock & 0x8000) *stat |= FE_HAS_SIGNAL; if (lock & 0x3000) *stat |= FE_HAS_CARRIER; if (lock & 0x0100) *stat |= FE_HAS_VITERBI; if (lock & 0x0010) *stat |= FE_HAS_SYNC; if (lock & 0x0008) *stat |= FE_HAS_LOCK; return 0; } static int dib7000m_read_ber(struct dvb_frontend *fe, u32 *ber) { struct dib7000m_state *state = fe->demodulator_priv; *ber = (dib7000m_read_word(state, 526) << 16) | dib7000m_read_word(state, 527); return 0; } static int dib7000m_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { struct dib7000m_state *state = fe->demodulator_priv; *unc = dib7000m_read_word(state, 534); return 0; } static int dib7000m_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dib7000m_state *state = fe->demodulator_priv; u16 val = dib7000m_read_word(state, 390); *strength = 65535 - val; return 0; } static int dib7000m_read_snr(struct dvb_frontend* fe, u16 *snr) { *snr = 0x0000; return 0; } static int dib7000m_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void dib7000m_release(struct dvb_frontend *demod) { struct dib7000m_state *st = demod->demodulator_priv; dibx000_exit_i2c_master(&st->i2c_master); kfree(st); } struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating) { struct dib7000m_state *st = demod->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating); } EXPORT_SYMBOL(dib7000m_get_i2c_master); int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) { struct dib7000m_state *state = fe->demodulator_priv; u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef; val |= (onoff & 0x1) << 4; dprintk("PID filter enabled %d", onoff); return dib7000m_write_word(state, 294 + state->reg_offs, val); } EXPORT_SYMBOL(dib7000m_pid_filter_ctrl); int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) { struct dib7000m_state *state = fe->demodulator_priv; dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); return dib7000m_write_word(state, 300 + state->reg_offs + id, onoff ? (1 << 13) | pid : 0); } EXPORT_SYMBOL(dib7000m_pid_filter); #if 0 /* used with some prototype boards */ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000m_config cfg[]) { struct dib7000m_state st = { .i2c_adap = i2c }; int k = 0; u8 new_addr = 0; for (k = no_of_demods-1; k >= 0; k--) { st.cfg = cfg[k]; /* designated i2c address */ new_addr = (0x40 + k) << 1; st.i2c_addr = new_addr; if (dib7000m_identify(&st) != 0) { st.i2c_addr = default_addr; if (dib7000m_identify(&st) != 0) { dprintk("DiB7000M #%d: not identified", k); return -EIO; } } /* start diversity to pull_down div_str - just for i2c-enumeration */ dib7000m_set_output_mode(&st, OUTMODE_DIVERSITY); dib7000m_write_word(&st, 1796, 0x0); // select DVB-T output /* set new i2c address and force divstart */ dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2); dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr); } for (k = 0; k < no_of_demods; k++) { st.cfg = cfg[k]; st.i2c_addr = (0x40 + k) << 1; // unforce divstr dib7000m_write_word(&st,1794, st.i2c_addr << 2); /* deactivate div - it was just for i2c-enumeration */ dib7000m_set_output_mode(&st, OUTMODE_HIGH_Z); } return 0; } EXPORT_SYMBOL(dib7000m_i2c_enumeration); #endif static struct dvb_frontend_ops dib7000m_ops; struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg) { struct dvb_frontend *demod; struct dib7000m_state *st; st = kzalloc(sizeof(struct dib7000m_state), GFP_KERNEL); if (st == NULL) return NULL; memcpy(&st->cfg, cfg, sizeof(struct dib7000m_config)); st->i2c_adap = i2c_adap; st->i2c_addr = i2c_addr; demod = &st->demod; demod->demodulator_priv = st; memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops)); mutex_init(&st->i2c_buffer_lock); st->timf_default = cfg->bw->timf; if (dib7000m_identify(st) != 0) goto error; if (st->revision == 0x4000) dibx000_init_i2c_master(&st->i2c_master, DIB7000, st->i2c_adap, st->i2c_addr); else dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c_adap, st->i2c_addr); dib7000m_demod_reset(st); return demod; error: kfree(st); return NULL; } EXPORT_SYMBOL(dib7000m_attach); static struct dvb_frontend_ops dib7000m_ops = { .info = { .name = "DiBcom 7000MA/MB/PA/PB/MC", .type = FE_OFDM, .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib7000m_release, .init = dib7000m_wakeup, .sleep = dib7000m_sleep, .set_frontend = dib7000m_set_frontend, .get_tune_settings = dib7000m_fe_get_tune_settings, .get_frontend = dib7000m_get_frontend, .read_status = dib7000m_read_status, .read_ber = dib7000m_read_ber, .read_signal_strength = dib7000m_read_signal_strength, .read_snr = dib7000m_read_snr, .read_ucblocks = dib7000m_read_unc_blocks, }; MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator"); MODULE_LICENSE("GPL");
gpl-2.0
qtekfun/kernel_htc_msm8939
arch/mips/jz4740/gpio.c
3396
13857
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform GPIO support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/mach-jz4740/base.h> #include "irq.h" #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) #define JZ4740_GPIO_BASE_C (32*2) #define JZ4740_GPIO_BASE_D (32*3) #define JZ4740_GPIO_NUM_A 32 #define JZ4740_GPIO_NUM_B 32 #define JZ4740_GPIO_NUM_C 31 #define JZ4740_GPIO_NUM_D 32 #define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A) #define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B) #define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C) #define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D) #define JZ_REG_GPIO_PIN 0x00 #define JZ_REG_GPIO_DATA 0x10 #define JZ_REG_GPIO_DATA_SET 0x14 #define JZ_REG_GPIO_DATA_CLEAR 0x18 #define JZ_REG_GPIO_MASK 0x20 #define JZ_REG_GPIO_MASK_SET 0x24 #define JZ_REG_GPIO_MASK_CLEAR 0x28 #define JZ_REG_GPIO_PULL 0x30 #define JZ_REG_GPIO_PULL_SET 0x34 #define JZ_REG_GPIO_PULL_CLEAR 0x38 #define JZ_REG_GPIO_FUNC 0x40 #define JZ_REG_GPIO_FUNC_SET 0x44 #define JZ_REG_GPIO_FUNC_CLEAR 0x48 #define JZ_REG_GPIO_SELECT 0x50 #define JZ_REG_GPIO_SELECT_SET 0x54 #define JZ_REG_GPIO_SELECT_CLEAR 0x58 #define JZ_REG_GPIO_DIRECTION 0x60 #define JZ_REG_GPIO_DIRECTION_SET 0x64 #define JZ_REG_GPIO_DIRECTION_CLEAR 0x68 #define JZ_REG_GPIO_TRIGGER 0x70 #define JZ_REG_GPIO_TRIGGER_SET 0x74 #define JZ_REG_GPIO_TRIGGER_CLEAR 0x78 #define JZ_REG_GPIO_FLAG 0x80 #define JZ_REG_GPIO_FLAG_CLEAR 0x14 #define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f) #define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg)) #define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg)) struct jz_gpio_chip { unsigned int irq; unsigned int irq_base; uint32_t edge_trigger_both; void __iomem *base; struct gpio_chip gpio_chip; }; static struct jz_gpio_chip jz4740_gpio_chips[]; static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio) { return &jz4740_gpio_chips[gpio >> 5]; } static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip) { return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip); } static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); return gc->private; } static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg) { writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg)); } int jz_gpio_set_function(int gpio, enum jz_gpio_function function) { if (function == JZ_GPIO_FUNC_NONE) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR); jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR); jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR); } else { jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET); jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR); switch (function) { case JZ_GPIO_FUNC1: jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR); break; case JZ_GPIO_FUNC3: jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET); case JZ_GPIO_FUNC2: /* Falltrough */ jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET); break; default: BUG(); break; } } return 0; } EXPORT_SYMBOL_GPL(jz_gpio_set_function); int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; int ret; for (i = 0; i < num; ++i, ++request) { ret = gpio_request(request->gpio, request->name); if (ret) goto err; jz_gpio_set_function(request->gpio, request->function); } return 0; err: for (--request; i > 0; --i, --request) { gpio_free(request->gpio); jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); } return ret; } EXPORT_SYMBOL_GPL(jz_gpio_bulk_request); void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) { gpio_free(request->gpio); jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); } } EXPORT_SYMBOL_GPL(jz_gpio_bulk_free); void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) { jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE); jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET); } } EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend); void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num) { size_t i; for (i = 0; i < num; ++i, ++request) jz_gpio_set_function(request->gpio, request->function); } EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume); void jz_gpio_enable_pullup(unsigned gpio) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR); } EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup); void jz_gpio_disable_pullup(unsigned gpio) { jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET); } EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup); static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio)); } static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET); reg += !value; writel(BIT(gpio), reg); } static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET)); jz_gpio_set_value(chip, gpio, value); return 0; } static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR)); return 0; } int jz_gpio_port_direction_input(int port, uint32_t mask) { writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR)); return 0; } EXPORT_SYMBOL(jz_gpio_port_direction_input); int jz_gpio_port_direction_output(int port, uint32_t mask) { writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET)); return 0; } EXPORT_SYMBOL(jz_gpio_port_direction_output); void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask) { writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR)); writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET)); } EXPORT_SYMBOL(jz_gpio_port_set_value); uint32_t jz_gpio_port_get_value(int port, uint32_t mask) { uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN)); return value & mask; } EXPORT_SYMBOL(jz_gpio_port_get_value); int gpio_to_irq(unsigned gpio) { return JZ4740_IRQ_GPIO(0) + gpio; } EXPORT_SYMBOL_GPL(gpio_to_irq); int irq_to_gpio(unsigned irq) { return irq - JZ4740_IRQ_GPIO(0); } EXPORT_SYMBOL_GPL(irq_to_gpio); #define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f) static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq) { uint32_t value; void __iomem *reg; uint32_t mask = IRQ_TO_BIT(irq); if (!(chip->edge_trigger_both & mask)) return; reg = chip->base; value = readl(chip->base + JZ_REG_GPIO_PIN); if (value & mask) reg += JZ_REG_GPIO_DIRECTION_CLEAR; else reg += JZ_REG_GPIO_DIRECTION_SET; writel(mask, reg); } static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) { uint32_t flag; unsigned int gpio_irq; struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc); flag = readl(chip->base + JZ_REG_GPIO_FLAG); if (!flag) return; gpio_irq = chip->irq_base + __fls(flag); jz_gpio_check_trigger_both(chip, gpio_irq); generic_handle_irq(gpio_irq); }; static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); writel(IRQ_TO_BIT(data->irq), chip->base + reg); } static void jz_gpio_irq_unmask(struct irq_data *data) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); jz_gpio_check_trigger_both(chip, data->irq); irq_gc_unmask_enable_reg(data); }; /* TODO: Check if function is gpio */ static unsigned int jz_gpio_irq_startup(struct irq_data *data) { jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET); jz_gpio_irq_unmask(data); return 0; } static void jz_gpio_irq_shutdown(struct irq_data *data) { irq_gc_mask_disable_reg(data); /* Set direction to input */ jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR); } static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); unsigned int irq = data->irq; if (flow_type == IRQ_TYPE_EDGE_BOTH) { uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN); if (value & IRQ_TO_BIT(irq)) flow_type = IRQ_TYPE_EDGE_FALLING; else flow_type = IRQ_TYPE_EDGE_RISING; chip->edge_trigger_both |= IRQ_TO_BIT(irq); } else { chip->edge_trigger_both &= ~IRQ_TO_BIT(irq); } switch (flow_type) { case IRQ_TYPE_EDGE_RISING: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET); break; case IRQ_TYPE_EDGE_FALLING: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET); break; case IRQ_TYPE_LEVEL_HIGH: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR); break; case IRQ_TYPE_LEVEL_LOW: jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR); break; default: return -EINVAL; } return 0; } static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on) { struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); irq_gc_set_wake(data, on); irq_set_irq_wake(chip->irq, on); return 0; } #define JZ4740_GPIO_CHIP(_bank) { \ .irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \ .gpio_chip = { \ .label = "Bank " # _bank, \ .owner = THIS_MODULE, \ .set = jz_gpio_set_value, \ .get = jz_gpio_get_value, \ .direction_output = jz_gpio_direction_output, \ .direction_input = jz_gpio_direction_input, \ .base = JZ4740_GPIO_BASE_ ## _bank, \ .ngpio = JZ4740_GPIO_NUM_ ## _bank, \ }, \ } static struct jz_gpio_chip jz4740_gpio_chips[] = { JZ4740_GPIO_CHIP(A), JZ4740_GPIO_CHIP(B), JZ4740_GPIO_CHIP(C), JZ4740_GPIO_CHIP(D), }; static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) { struct irq_chip_generic *gc; struct irq_chip_type *ct; chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100); chip->irq = JZ4740_IRQ_INTC_GPIO(id); irq_set_handler_data(chip->irq, chip); irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler); gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base, chip->base, handle_level_irq); gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio); gc->private = chip; ct = gc->chip_types; ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR; ct->regs.disable = JZ_REG_GPIO_MASK_SET; ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR; ct->chip.name = "GPIO"; ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_unmask = jz_gpio_irq_unmask; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_suspend = jz4740_irq_suspend; ct->chip.irq_resume = jz4740_irq_resume; ct->chip.irq_startup = jz_gpio_irq_startup; ct->chip.irq_shutdown = jz_gpio_irq_shutdown; ct->chip.irq_set_type = jz_gpio_irq_set_type; ct->chip.irq_set_wake = jz_gpio_irq_set_wake; ct->chip.flags = IRQCHIP_SET_TYPE_MASKED; irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio), IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL); gpiochip_add(&chip->gpio_chip); } static int __init jz4740_gpio_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); printk(KERN_INFO "JZ4740 GPIO initialized\n"); return 0; } arch_initcall(jz4740_gpio_init); #ifdef CONFIG_DEBUG_FS static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip, const char *name, unsigned int reg) { seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg)); } static int gpio_regs_show(struct seq_file *s, void *unused) { struct jz_gpio_chip *chip = jz4740_gpio_chips; int i; for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) { seq_printf(s, "==GPIO %d==\n", i); gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN); gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA); gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK); gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL); gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC); gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT); gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION); gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER); gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG); } return 0; } static int gpio_regs_open(struct inode *inode, struct file *file) { return single_open(file, gpio_regs_show, NULL); } static const struct file_operations gpio_regs_operations = { .open = gpio_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init gpio_debugfs_init(void) { (void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpio_regs_operations); return 0; } subsys_initcall(gpio_debugfs_init); #endif
gpl-2.0
TEAM-Gummy/android_kernel_samsung_smdk4412
arch/powerpc/platforms/83xx/mpc836x_rdk.c
4676
2495
/* * MPC8360E-RDK board file. * * Copyright (c) 2006 Freescale Semicondutor, Inc. * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/io.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" static struct of_device_id __initdata mpc836x_rdk_ids[] = { { .compatible = "simple-bus", }, {}, }; static int __init mpc836x_rdk_declare_of_platform_devices(void) { return of_platform_bus_probe(NULL, mpc836x_rdk_ids, NULL); } machine_device_initcall(mpc836x_rdk, mpc836x_rdk_declare_of_platform_devices); static void __init mpc836x_rdk_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc836x_rdk_setup_arch()", 0); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") mpc83xx_add_bridge(np); #endif #ifdef CONFIG_QUICC_ENGINE qe_reset(); #endif } static void __init mpc836x_rdk_init_IRQ(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); if (!np) return; ipic_init(np, 0); /* * Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); of_node_put(np); #ifdef CONFIG_QUICC_ENGINE np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (!np) return; qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); of_node_put(np); #endif } /* * Called very early, MMU is off, device-tree isn't unflattened. */ static int __init mpc836x_rdk_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc8360rdk"); } define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .probe = mpc836x_rdk_probe, .setup_arch = mpc836x_rdk_setup_arch, .init_IRQ = mpc836x_rdk_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
justin0406/HD-GEE
sound/i2c/other/ak4113.c
4676
17986
/* * Routines for control of the AK4113 via I2C/4-wire serial interface * IEC958 (S/PDIF) receiver by Asahi Kasei * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) by Pavel Hofman <pavel.hofman@ivitera.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/ak4113.h> #include <sound/asoundef.h> #include <sound/info.h> MODULE_AUTHOR("Pavel Hofman <pavel.hofman@ivitera.com>"); MODULE_DESCRIPTION("AK4113 IEC958 (S/PDIF) receiver by Asahi Kasei"); MODULE_LICENSE("GPL"); #define AK4113_ADDR 0x00 /* fixed address */ static void ak4113_stats(struct work_struct *work); static void ak4113_init_regs(struct ak4113 *chip); static void reg_write(struct ak4113 *ak4113, unsigned char reg, unsigned char val) { ak4113->write(ak4113->private_data, reg, val); if (reg < sizeof(ak4113->regmap)) ak4113->regmap[reg] = val; } static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg) { return ak4113->read(ak4113->private_data, reg); } static void snd_ak4113_free(struct ak4113 *chip) { chip->init = 1; /* don't schedule new work */ mb(); cancel_delayed_work_sync(&chip->work); kfree(chip); } static int snd_ak4113_dev_free(struct snd_device *device) { struct ak4113 *chip = device->device_data; snd_ak4113_free(chip); return 0; } int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, ak4113_write_t *write, const unsigned char *pgm, void *private_data, struct ak4113 **r_ak4113) { struct ak4113 *chip; int err = 0; unsigned char reg; static struct snd_device_ops ops = { .dev_free = snd_ak4113_dev_free, }; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; spin_lock_init(&chip->lock); chip->card = card; chip->read = read; chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4113_stats); for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) chip->regmap[reg] = pgm[reg]; ak4113_init_regs(chip); chip->rcs0 = reg_read(chip, AK4113_REG_RCS0) & ~(AK4113_QINT | AK4113_CINT | AK4113_STC); chip->rcs1 = reg_read(chip, AK4113_REG_RCS1); chip->rcs2 = reg_read(chip, AK4113_REG_RCS2); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) goto __fail; if (r_ak4113) *r_ak4113 = chip; return 0; __fail: snd_ak4113_free(chip); return err < 0 ? err : -EIO; } EXPORT_SYMBOL_GPL(snd_ak4113_create); void snd_ak4113_reg_write(struct ak4113 *chip, unsigned char reg, unsigned char mask, unsigned char val) { if (reg >= AK4113_WRITABLE_REGS) return; reg_write(chip, reg, (chip->regmap[reg] & ~mask) | val); } EXPORT_SYMBOL_GPL(snd_ak4113_reg_write); static void ak4113_init_regs(struct ak4113 *chip) { unsigned char old = chip->regmap[AK4113_REG_PWRDN], reg; /* bring the chip to reset state and powerdown state */ reg_write(chip, AK4113_REG_PWRDN, old & ~(AK4113_RST|AK4113_PWN)); udelay(200); /* release reset, but leave powerdown */ reg_write(chip, AK4113_REG_PWRDN, (old | AK4113_RST) & ~AK4113_PWN); udelay(200); for (reg = 1; reg < AK4113_WRITABLE_REGS; reg++) reg_write(chip, reg, chip->regmap[reg]); /* release powerdown, everything is initialized now */ reg_write(chip, AK4113_REG_PWRDN, old | AK4113_RST | AK4113_PWN); } void snd_ak4113_reinit(struct ak4113 *chip) { chip->init = 1; mb(); flush_delayed_work_sync(&chip->work); ak4113_init_regs(chip); /* bring up statistics / event queing */ chip->init = 0; if (chip->kctls[0]) schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL_GPL(snd_ak4113_reinit); static unsigned int external_rate(unsigned char rcs1) { switch (rcs1 & (AK4113_FS0|AK4113_FS1|AK4113_FS2|AK4113_FS3)) { case AK4113_FS_8000HZ: return 8000; case AK4113_FS_11025HZ: return 11025; case AK4113_FS_16000HZ: return 16000; case AK4113_FS_22050HZ: return 22050; case AK4113_FS_24000HZ: return 24000; case AK4113_FS_32000HZ: return 32000; case AK4113_FS_44100HZ: return 44100; case AK4113_FS_48000HZ: return 48000; case AK4113_FS_64000HZ: return 64000; case AK4113_FS_88200HZ: return 88200; case AK4113_FS_96000HZ: return 96000; case AK4113_FS_176400HZ: return 176400; case AK4113_FS_192000HZ: return 192000; default: return 0; } } static int snd_ak4113_in_error_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = LONG_MAX; return 0; } static int snd_ak4113_in_error_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); long *ptr; spin_lock_irq(&chip->lock); ptr = (long *)(((char *)chip) + kcontrol->private_value); ucontrol->value.integer.value[0] = *ptr; *ptr = 0; spin_unlock_irq(&chip->lock); return 0; } #define snd_ak4113_in_bit_info snd_ctl_boolean_mono_info static int snd_ak4113_in_bit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); unsigned char reg = kcontrol->private_value & 0xff; unsigned char bit = (kcontrol->private_value >> 8) & 0xff; unsigned char inv = (kcontrol->private_value >> 31) & 1; ucontrol->value.integer.value[0] = ((reg_read(chip, reg) & (1 << bit)) ? 1 : 0) ^ inv; return 0; } static int snd_ak4113_rx_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 5; return 0; } static int snd_ak4113_rx_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = (AK4113_IPS(chip->regmap[AK4113_REG_IO1])); return 0; } static int snd_ak4113_rx_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); int change; u8 old_val; spin_lock_irq(&chip->lock); old_val = chip->regmap[AK4113_REG_IO1]; change = ucontrol->value.integer.value[0] != AK4113_IPS(old_val); if (change) reg_write(chip, AK4113_REG_IO1, (old_val & (~AK4113_IPS(0xff))) | (AK4113_IPS(ucontrol->value.integer.value[0]))); spin_unlock_irq(&chip->lock); return change; } static int snd_ak4113_rate_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 192000; return 0; } static int snd_ak4113_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = external_rate(reg_read(chip, AK4113_REG_RCS1)); return 0; } static int snd_ak4113_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ak4113_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4113_REG_RXCSB_SIZE; i++) ucontrol->value.iec958.status[i] = reg_read(chip, AK4113_REG_RXCSB0 + i); return 0; } static int snd_ak4113_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ak4113_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { memset(ucontrol->value.iec958.status, 0xff, AK4113_REG_RXCSB_SIZE); return 0; } static int snd_ak4113_spdif_pinfo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0xffff; uinfo->count = 4; return 0; } static int snd_ak4113_spdif_pget(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); unsigned short tmp; ucontrol->value.integer.value[0] = 0xf8f2; ucontrol->value.integer.value[1] = 0x4e1f; tmp = reg_read(chip, AK4113_REG_Pc0) | (reg_read(chip, AK4113_REG_Pc1) << 8); ucontrol->value.integer.value[2] = tmp; tmp = reg_read(chip, AK4113_REG_Pd0) | (reg_read(chip, AK4113_REG_Pd1) << 8); ucontrol->value.integer.value[3] = tmp; return 0; } static int snd_ak4113_spdif_qinfo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = AK4113_REG_QSUB_SIZE; return 0; } static int snd_ak4113_spdif_qget(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ak4113 *chip = snd_kcontrol_chip(kcontrol); unsigned i; for (i = 0; i < AK4113_REG_QSUB_SIZE; i++) ucontrol->value.bytes.data[i] = reg_read(chip, AK4113_REG_QSUB_ADDR + i); return 0; } /* Don't forget to change AK4113_CONTROLS define!!! */ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Parity Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_error_info, .get = snd_ak4113_in_error_get, .private_value = offsetof(struct ak4113, parity_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 V-Bit Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_error_info, .get = snd_ak4113_in_error_get, .private_value = offsetof(struct ak4113, v_bit_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 C-CRC Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_error_info, .get = snd_ak4113_in_error_get, .private_value = offsetof(struct ak4113, ccrc_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Q-CRC Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_error_info, .get = snd_ak4113_in_error_get, .private_value = offsetof(struct ak4113, qcrc_errors), }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 External Rate", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_rate_info, .get = snd_ak4113_rate_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, MASK), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ak4113_spdif_mask_info, .get = snd_ak4113_spdif_mask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_spdif_info, .get = snd_ak4113_spdif_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Preample Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_spdif_pinfo, .get = snd_ak4113_spdif_pget, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Q-subcode Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_spdif_qinfo, .get = snd_ak4113_spdif_qget, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Audio", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_bit_info, .get = snd_ak4113_in_bit_get, .private_value = (1<<31) | (1<<8) | AK4113_REG_RCS0, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Non-PCM Bitstream", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_bit_info, .get = snd_ak4113_in_bit_get, .private_value = (0<<8) | AK4113_REG_RCS1, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 DTS Bitstream", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_in_bit_info, .get = snd_ak4113_in_bit_get, .private_value = (1<<8) | AK4113_REG_RCS1, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "AK4113 Input Select", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE, .info = snd_ak4113_rx_info, .get = snd_ak4113_rx_get, .put = snd_ak4113_rx_put, } }; static void snd_ak4113_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct ak4113 *ak4113 = entry->private_data; int reg, val; /* all ak4113 registers 0x00 - 0x1c */ for (reg = 0; reg < 0x1d; reg++) { val = reg_read(ak4113, reg); snd_iprintf(buffer, "0x%02x = 0x%02x\n", reg, val); } } static void snd_ak4113_proc_init(struct ak4113 *ak4113) { struct snd_info_entry *entry; if (!snd_card_proc_new(ak4113->card, "ak4113", &entry)) snd_info_set_text_ops(entry, ak4113, snd_ak4113_proc_regs_read); } int snd_ak4113_build(struct ak4113 *ak4113, struct snd_pcm_substream *cap_substream) { struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!cap_substream)) return -EINVAL; ak4113->substream = cap_substream; for (idx = 0; idx < AK4113_CONTROLS; idx++) { kctl = snd_ctl_new1(&snd_ak4113_iec958_controls[idx], ak4113); if (kctl == NULL) return -ENOMEM; kctl->id.device = cap_substream->pcm->device; kctl->id.subdevice = cap_substream->number; err = snd_ctl_add(ak4113->card, kctl); if (err < 0) return err; ak4113->kctls[idx] = kctl; } snd_ak4113_proc_init(ak4113); /* trigger workq */ schedule_delayed_work(&ak4113->work, HZ / 10); return 0; } EXPORT_SYMBOL_GPL(snd_ak4113_build); int snd_ak4113_external_rate(struct ak4113 *ak4113) { unsigned char rcs1; rcs1 = reg_read(ak4113, AK4113_REG_RCS1); return external_rate(rcs1); } EXPORT_SYMBOL_GPL(snd_ak4113_external_rate); int snd_ak4113_check_rate_and_errors(struct ak4113 *ak4113, unsigned int flags) { struct snd_pcm_runtime *runtime = ak4113->substream ? ak4113->substream->runtime : NULL; unsigned long _flags; int res = 0; unsigned char rcs0, rcs1, rcs2; unsigned char c0, c1; rcs1 = reg_read(ak4113, AK4113_REG_RCS1); if (flags & AK4113_CHECK_NO_STAT) goto __rate; rcs0 = reg_read(ak4113, AK4113_REG_RCS0); rcs2 = reg_read(ak4113, AK4113_REG_RCS2); spin_lock_irqsave(&ak4113->lock, _flags); if (rcs0 & AK4113_PAR) ak4113->parity_errors++; if (rcs0 & AK4113_V) ak4113->v_bit_errors++; if (rcs2 & AK4113_CCRC) ak4113->ccrc_errors++; if (rcs2 & AK4113_QCRC) ak4113->qcrc_errors++; c0 = (ak4113->rcs0 & (AK4113_QINT | AK4113_CINT | AK4113_STC | AK4113_AUDION | AK4113_AUTO | AK4113_UNLCK)) ^ (rcs0 & (AK4113_QINT | AK4113_CINT | AK4113_STC | AK4113_AUDION | AK4113_AUTO | AK4113_UNLCK)); c1 = (ak4113->rcs1 & (AK4113_DTSCD | AK4113_NPCM | AK4113_PEM | AK4113_DAT | 0xf0)) ^ (rcs1 & (AK4113_DTSCD | AK4113_NPCM | AK4113_PEM | AK4113_DAT | 0xf0)); ak4113->rcs0 = rcs0 & ~(AK4113_QINT | AK4113_CINT | AK4113_STC); ak4113->rcs1 = rcs1; ak4113->rcs2 = rcs2; spin_unlock_irqrestore(&ak4113->lock, _flags); if (rcs0 & AK4113_PAR) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[0]->id); if (rcs0 & AK4113_V) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[1]->id); if (rcs2 & AK4113_CCRC) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[2]->id); if (rcs2 & AK4113_QCRC) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[3]->id); /* rate change */ if (c1 & 0xf0) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[4]->id); if ((c1 & AK4113_PEM) | (c0 & AK4113_CINT)) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[6]->id); if (c0 & AK4113_QINT) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[8]->id); if (c0 & AK4113_AUDION) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[9]->id); if (c1 & AK4113_NPCM) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[10]->id); if (c1 & AK4113_DTSCD) snd_ctl_notify(ak4113->card, SNDRV_CTL_EVENT_MASK_VALUE, &ak4113->kctls[11]->id); if (ak4113->change_callback && (c0 | c1) != 0) ak4113->change_callback(ak4113, c0, c1); __rate: /* compare rate */ res = external_rate(rcs1); if (!(flags & AK4113_CHECK_NO_RATE) && runtime && (runtime->rate != res)) { snd_pcm_stream_lock_irqsave(ak4113->substream, _flags); if (snd_pcm_running(ak4113->substream)) { /*printk(KERN_DEBUG "rate changed (%i <- %i)\n", * runtime->rate, res); */ snd_pcm_stop(ak4113->substream, SNDRV_PCM_STATE_DRAINING); wake_up(&runtime->sleep); res = 1; } snd_pcm_stream_unlock_irqrestore(ak4113->substream, _flags); } return res; } EXPORT_SYMBOL_GPL(snd_ak4113_check_rate_and_errors); static void ak4113_stats(struct work_struct *work) { struct ak4113 *chip = container_of(work, struct ak4113, work.work); if (!chip->init) snd_ak4113_check_rate_and_errors(chip, chip->check_flags); schedule_delayed_work(&chip->work, HZ / 10); }
gpl-2.0
mseskir/android_kernel_vestel_g55
drivers/tty/serial/8250/serial_cs.c
4932
30102
/*====================================================================== A driver for PCMCIA serial devices serial_cs.c 1.134 2002/05/04 05:48:53 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/serial_core.h> #include <linux/delay.h> #include <linux/major.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include "8250.h" /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* Enable the speaker? */ static int do_sound = 1; /* Skip strict UART tests? */ static int buggy_uart; module_param(do_sound, int, 0444); module_param(buggy_uart, int, 0444); /*====================================================================*/ /* Table of multi-port card ID's */ struct serial_quirk { unsigned int manfid; unsigned int prodid; int multi; /* 1 = multifunction, > 1 = # ports */ void (*config)(struct pcmcia_device *); void (*setup)(struct pcmcia_device *, struct uart_port *); void (*wakeup)(struct pcmcia_device *); int (*post)(struct pcmcia_device *); }; struct serial_info { struct pcmcia_device *p_dev; int ndev; int multi; int slave; int manfid; int prodid; int c950ctrl; int line[4]; const struct serial_quirk *quirk; }; struct serial_cfg_mem { tuple_t tuple; cisparse_t parse; u_char buf[256]; }; /* * vers_1 5.0, "Brain Boxes", "2-Port RS232 card", "r6" * manfid 0x0160, 0x0104 * This card appears to have a 14.7456MHz clock. */ /* Generic Modem: MD55x (GPRS/EDGE) have * Elan VPU16551 UART with 14.7456MHz oscillator * manfid 0x015D, 0x4C45 */ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) { port->uartclk = 14745600; } static int quirk_post_ibm(struct pcmcia_device *link) { u8 val; int ret; ret = pcmcia_read_config_byte(link, 0x800, &val); if (ret) goto failed; ret = pcmcia_write_config_byte(link, 0x800, val | 1); if (ret) goto failed; return 0; failed: return -ENODEV; } /* * Nokia cards are not really multiport cards. Shouldn't this * be handled by setting the quirk entry .multi = 0 | 1 ? */ static void quirk_config_nokia(struct pcmcia_device *link) { struct serial_info *info = link->priv; if (info->multi > 1) info->multi = 1; } static void quirk_wakeup_oxsemi(struct pcmcia_device *link) { struct serial_info *info = link->priv; if (info->c950ctrl) outb(12, info->c950ctrl + 1); } /* request_region? oxsemi branch does no request_region too... */ /* * This sequence is needed to properly initialize MC45 attached to OXCF950. * I tried decreasing these msleep()s, but it worked properly (survived * 1000 stop/start operations) with these timeouts (or bigger). */ static void quirk_wakeup_possio_gcc(struct pcmcia_device *link) { struct serial_info *info = link->priv; unsigned int ctrl = info->c950ctrl; outb(0xA, ctrl + 1); msleep(100); outb(0xE, ctrl + 1); msleep(300); outb(0xC, ctrl + 1); msleep(100); outb(0xE, ctrl + 1); msleep(200); outb(0xF, ctrl + 1); msleep(100); outb(0xE, ctrl + 1); msleep(100); outb(0xC, ctrl + 1); } /* * Socket Dual IO: this enables irq's for second port */ static void quirk_config_socket(struct pcmcia_device *link) { struct serial_info *info = link->priv; if (info->multi) link->config_flags |= CONF_ENABLE_ESR; } static const struct serial_quirk quirks[] = { { .manfid = 0x0160, .prodid = 0x0104, .multi = -1, .setup = quirk_setup_brainboxes_0104, }, { .manfid = 0x015D, .prodid = 0x4C45, .multi = -1, .setup = quirk_setup_brainboxes_0104, }, { .manfid = MANFID_IBM, .prodid = ~0, .multi = -1, .post = quirk_post_ibm, }, { .manfid = MANFID_INTEL, .prodid = PRODID_INTEL_DUAL_RS232, .multi = 2, }, { .manfid = MANFID_NATINST, .prodid = PRODID_NATINST_QUAD_RS232, .multi = 4, }, { .manfid = MANFID_NOKIA, .prodid = ~0, .multi = -1, .config = quirk_config_nokia, }, { .manfid = MANFID_OMEGA, .prodid = PRODID_OMEGA_QSP_100, .multi = 4, }, { .manfid = MANFID_OXSEMI, .prodid = ~0, .multi = -1, .wakeup = quirk_wakeup_oxsemi, }, { .manfid = MANFID_POSSIO, .prodid = PRODID_POSSIO_GCC, .multi = -1, .wakeup = quirk_wakeup_possio_gcc, }, { .manfid = MANFID_QUATECH, .prodid = PRODID_QUATECH_DUAL_RS232, .multi = 2, }, { .manfid = MANFID_QUATECH, .prodid = PRODID_QUATECH_DUAL_RS232_D1, .multi = 2, }, { .manfid = MANFID_QUATECH, .prodid = PRODID_QUATECH_DUAL_RS232_G, .multi = 2, }, { .manfid = MANFID_QUATECH, .prodid = PRODID_QUATECH_QUAD_RS232, .multi = 4, }, { .manfid = MANFID_SOCKET, .prodid = PRODID_SOCKET_DUAL_RS232, .multi = 2, .config = quirk_config_socket, }, { .manfid = MANFID_SOCKET, .prodid = ~0, .multi = -1, .config = quirk_config_socket, } }; static int serial_config(struct pcmcia_device * link); static void serial_remove(struct pcmcia_device *link) { struct serial_info *info = link->priv; int i; dev_dbg(&link->dev, "serial_release\n"); /* * Recheck to see if the device is still configured. */ for (i = 0; i < info->ndev; i++) serial8250_unregister_port(info->line[i]); if (!info->slave) pcmcia_disable_device(link); } static int serial_suspend(struct pcmcia_device *link) { struct serial_info *info = link->priv; int i; for (i = 0; i < info->ndev; i++) serial8250_suspend_port(info->line[i]); return 0; } static int serial_resume(struct pcmcia_device *link) { struct serial_info *info = link->priv; int i; for (i = 0; i < info->ndev; i++) serial8250_resume_port(info->line[i]); if (info->quirk && info->quirk->wakeup) info->quirk->wakeup(link); return 0; } static int serial_probe(struct pcmcia_device *link) { struct serial_info *info; dev_dbg(&link->dev, "serial_attach()\n"); /* Create new serial device */ info = kzalloc(sizeof (*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; if (do_sound) link->config_flags |= CONF_ENABLE_SPKR; return serial_config(link); } static void serial_detach(struct pcmcia_device *link) { struct serial_info *info = link->priv; dev_dbg(&link->dev, "serial_detach\n"); /* * Ensure that the ports have been released. */ serial_remove(link); /* free bits */ kfree(info); } /*====================================================================*/ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, unsigned int iobase, int irq) { struct uart_port port; int line; memset(&port, 0, sizeof (struct uart_port)); port.iobase = iobase; port.irq = irq; port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; port.uartclk = 1843200; port.dev = &handle->dev; if (buggy_uart) port.flags |= UPF_BUGGY_UART; if (info->quirk && info->quirk->setup) info->quirk->setup(handle, &port); line = serial8250_register_port(&port); if (line < 0) { printk(KERN_NOTICE "serial_cs: serial8250_register_port() at " "0x%04lx, irq %d failed\n", (u_long)iobase, irq); return -EINVAL; } info->line[info->ndev] = line; info->ndev++; return 0; } /*====================================================================*/ static int pfc_config(struct pcmcia_device *p_dev) { unsigned int port = 0; struct serial_info *info = p_dev->priv; if ((p_dev->resource[1]->end != 0) && (resource_size(p_dev->resource[1]) == 8)) { port = p_dev->resource[1]->start; info->slave = 1; } else if ((info->manfid == MANFID_OSITECH) && (resource_size(p_dev->resource[0]) == 0x40)) { port = p_dev->resource[0]->start + 0x28; info->slave = 1; } if (info->slave) return setup_serial(p_dev, info, port, p_dev->irq); dev_warn(&p_dev->dev, "no usable port range found, giving up\n"); return -ENODEV; } static int simple_config_check(struct pcmcia_device *p_dev, void *priv_data) { static const int size_table[2] = { 8, 16 }; int *try = priv_data; if (p_dev->resource[0]->start == 0) return -ENODEV; if ((*try & 0x1) == 0) p_dev->io_lines = 16; if (p_dev->resource[0]->end != size_table[(*try >> 1)]) return -ENODEV; p_dev->resource[0]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int simple_config_check_notpicky(struct pcmcia_device *p_dev, void *priv_data) { static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; int j; if (p_dev->io_lines > 3) return -ENODEV; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = 8; for (j = 0; j < 5; j++) { p_dev->resource[0]->start = base[j]; p_dev->io_lines = base[j] ? 16 : 3; if (!pcmcia_request_io(p_dev)) return 0; } return -ENODEV; } static int simple_config(struct pcmcia_device *link) { struct serial_info *info = link->priv; int i = -ENODEV, try; /* First pass: look for a config entry that looks normal. * Two tries: without IO aliases, then with aliases */ link->config_flags |= CONF_AUTO_SET_VPP; for (try = 0; try < 4; try++) if (!pcmcia_loop_config(link, simple_config_check, &try)) goto found_port; /* Second pass: try to find an entry that isn't picky about its base address, then try to grab any standard serial port address, and finally try to get any free port. */ if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) goto found_port; dev_warn(&link->dev, "no usable port range found, giving up\n"); return -1; found_port: if (info->multi && (info->manfid == MANFID_3COM)) link->config_index &= ~(0x08); /* * Apply any configuration quirks. */ if (info->quirk && info->quirk->config) info->quirk->config(link); i = pcmcia_enable_device(link); if (i != 0) return -1; return setup_serial(link, info, link->resource[0]->start, link->irq); } static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data) { int *multi = priv_data; if (p_dev->resource[1]->end) return -EINVAL; /* The quad port cards have bad CIS's, so just look for a window larger than 8 ports and assume it will be right */ if (p_dev->resource[0]->end <= 8) return -EINVAL; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = *multi * 8; if (pcmcia_request_io(p_dev)) return -ENODEV; return 0; } static int multi_config_check_notpicky(struct pcmcia_device *p_dev, void *priv_data) { int *base2 = priv_data; if (!p_dev->resource[0]->end || !p_dev->resource[1]->end || p_dev->resource[0]->start + 8 != p_dev->resource[1]->start) return -ENODEV; p_dev->resource[0]->end = p_dev->resource[1]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; if (pcmcia_request_io(p_dev)) return -ENODEV; *base2 = p_dev->resource[0]->start + 8; return 0; } static int multi_config(struct pcmcia_device *link) { struct serial_info *info = link->priv; int i, base2 = 0; /* First, look for a generic full-sized window */ if (!pcmcia_loop_config(link, multi_config_check, &info->multi)) base2 = link->resource[0]->start + 8; else { /* If that didn't work, look for two windows */ info->multi = 2; if (pcmcia_loop_config(link, multi_config_check_notpicky, &base2)) { dev_warn(&link->dev, "no usable port range " "found, giving up\n"); return -ENODEV; } } if (!link->irq) dev_warn(&link->dev, "no usable IRQ found, continuing...\n"); /* * Apply any configuration quirks. */ if (info->quirk && info->quirk->config) info->quirk->config(link); i = pcmcia_enable_device(link); if (i != 0) return -ENODEV; /* The Oxford Semiconductor OXCF950 cards are in fact single-port: * 8 registers are for the UART, the others are extra registers. * Siemen's MC45 PCMCIA (Possio's GCC) is OXCF950 based too. */ if (info->manfid == MANFID_OXSEMI || (info->manfid == MANFID_POSSIO && info->prodid == PRODID_POSSIO_GCC)) { int err; if (link->config_index == 1 || link->config_index == 3) { err = setup_serial(link, info, base2, link->irq); base2 = link->resource[0]->start; } else { err = setup_serial(link, info, link->resource[0]->start, link->irq); } info->c950ctrl = base2; /* * FIXME: We really should wake up the port prior to * handing it over to the serial layer. */ if (info->quirk && info->quirk->wakeup) info->quirk->wakeup(link); return 0; } setup_serial(link, info, link->resource[0]->start, link->irq); for (i = 0; i < info->multi - 1; i++) setup_serial(link, info, base2 + (8 * i), link->irq); return 0; } static int serial_check_for_multi(struct pcmcia_device *p_dev, void *priv_data) { struct serial_info *info = p_dev->priv; if (!p_dev->resource[0]->end) return -EINVAL; if ((!p_dev->resource[1]->end) && (p_dev->resource[0]->end % 8 == 0)) info->multi = p_dev->resource[0]->end >> 3; if ((p_dev->resource[1]->end) && (p_dev->resource[0]->end == 8) && (p_dev->resource[1]->end == 8)) info->multi = 2; return 0; /* break */ } static int serial_config(struct pcmcia_device * link) { struct serial_info *info = link->priv; int i; dev_dbg(&link->dev, "serial_config\n"); /* Is this a compliant multifunction card? */ info->multi = (link->socket->functions > 1); /* Is this a multiport card? */ info->manfid = link->manf_id; info->prodid = link->card_id; for (i = 0; i < ARRAY_SIZE(quirks); i++) if ((quirks[i].manfid == ~0 || quirks[i].manfid == info->manfid) && (quirks[i].prodid == ~0 || quirks[i].prodid == info->prodid)) { info->quirk = &quirks[i]; break; } /* Another check for dual-serial cards: look for either serial or multifunction cards that ask for appropriate IO port ranges */ if ((info->multi == 0) && (link->has_func_id) && (link->socket->pcmcia_pfc == 0) && ((link->func_id == CISTPL_FUNCID_MULTI) || (link->func_id == CISTPL_FUNCID_SERIAL))) pcmcia_loop_config(link, serial_check_for_multi, info); /* * Apply any multi-port quirk. */ if (info->quirk && info->quirk->multi != -1) info->multi = info->quirk->multi; dev_info(&link->dev, "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n", link->manf_id, link->card_id, link->socket->pcmcia_pfc, info->multi, info->quirk); if (link->socket->pcmcia_pfc) i = pfc_config(link); else if (info->multi > 1) i = multi_config(link); else i = simple_config(link); if (i || info->ndev == 0) goto failed; /* * Apply any post-init quirk. FIXME: This should really happen * before we register the port, since it might already be in use. */ if (info->quirk && info->quirk->post) if (info->quirk->post(link)) goto failed; return 0; failed: dev_warn(&link->dev, "failed to initialize\n"); serial_remove(link); return -ENODEV; } static const struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0057, 0x0021), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0089, 0x110a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0104, 0x000a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0d0a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0xea15), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0109, 0x0501), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0138, 0x110a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0140, 0x000a), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0x3341), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0xc0ab), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x016c, 0x0081), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x021b, 0x0101), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x08a1, 0xc0ab), PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63), PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef), PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM33", 0x2e3ee845, 0x80609023), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070), PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x016c, 0x0020), PCMCIA_MFC_DEVICE_PROD_ID123(1, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77), PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301), PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x0276), PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039), PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006), PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0101), /* TDK DF2814 */ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x100a), /* Xircom CM-56G */ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x3e0a), /* TDK DF5660 */ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a), PCMCIA_DEVICE_MANF_CARD(0x0107, 0x0002), /* USRobotics 14,400 */ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d50), PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d51), PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d52), PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d53), PCMCIA_DEVICE_MANF_CARD(0x010b, 0xd180), PCMCIA_DEVICE_MANF_CARD(0x0115, 0x3330), /* USRobotics/SUN 14,400 */ PCMCIA_DEVICE_MANF_CARD(0x0124, 0x0100), /* Nokia DTP-2 ver II */ PCMCIA_DEVICE_MANF_CARD(0x0134, 0x5600), /* LASAT COMMUNICATIONS A/S */ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x000e), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x001b), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052), PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */ PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */ PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae), PCMCIA_DEVICE_PROD_ID124("GATEWAY2000", "CC3144", "PCMCIA MODEM", 0x506bccae, 0xcb3685f1, 0xbd6c43ef), PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef), PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef), PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0), PCMCIA_DEVICE_PROD_ID123("Novatel Wireless", "Merlin UMTS Modem", "U630", 0x32607776, 0xd9e73b13, 0xe87332e), PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a), PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02), PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa), PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 28800 FAX/DATA MODEM", 0xa3a3062c, 0x8cbd7c76), PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 33600 FAX/DATA MODEM", 0xa3a3062c, 0x5a00ce95), PCMCIA_DEVICE_PROD_ID12("Computerboards, Inc.", "PCM-COM422", 0xd0b78f51, 0x7e2d49ed), PCMCIA_DEVICE_PROD_ID12("Dr. Neuhaus", "FURY CARD 14K4", 0x76942813, 0x8b96ce65), PCMCIA_DEVICE_PROD_ID12("IBM", "ISDN/56K/GSM", 0xb569a6e5, 0xfee5297b), PCMCIA_DEVICE_PROD_ID12("Intelligent", "ANGIA FAX/MODEM", 0xb496e65e, 0xf31602a6), PCMCIA_DEVICE_PROD_ID12("Intel", "MODEM 2400+", 0x816cc815, 0x412729fb), PCMCIA_DEVICE_PROD_ID12("Intertex", "IX34-PCMCIA", 0xf8a097e3, 0x97880447), PCMCIA_DEVICE_PROD_ID12("IOTech Inc ", "PCMCIA Dual RS-232 Serial Port Card", 0x3bd2d898, 0x92abc92f), PCMCIA_DEVICE_PROD_ID12("MACRONIX", "FAX/MODEM", 0x668388b3, 0x3f9bdf2f), PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT1432LT", 0x5f73be51, 0x0b3e2383), PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e), PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a), PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41), PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab), PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f), PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d), PCMCIA_DEVICE_PROD_ID12("Telia", "SurfinBird 560P/A+", 0xe2cdd5e, 0xc9314b38), PCMCIA_DEVICE_PROD_ID1("Smart Serial Port", 0x2d8ce292), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676), PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767), PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7), PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41), PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029), PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b), /* too generic */ /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */ /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */ PCMCIA_DEVICE_FUNC_ID(2), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, serial_ids); MODULE_FIRMWARE("cis/PCMLM28.cis"); MODULE_FIRMWARE("cis/DP83903.cis"); MODULE_FIRMWARE("cis/3CCFEM556.cis"); MODULE_FIRMWARE("cis/3CXEM556.cis"); MODULE_FIRMWARE("cis/SW_8xx_SER.cis"); MODULE_FIRMWARE("cis/SW_7xx_SER.cis"); MODULE_FIRMWARE("cis/SW_555_SER.cis"); MODULE_FIRMWARE("cis/MT5634ZLX.cis"); MODULE_FIRMWARE("cis/COMpad2.cis"); MODULE_FIRMWARE("cis/COMpad4.cis"); MODULE_FIRMWARE("cis/RS-COM-2P.cis"); static struct pcmcia_driver serial_cs_driver = { .owner = THIS_MODULE, .name = "serial_cs", .probe = serial_probe, .remove = serial_detach, .id_table = serial_ids, .suspend = serial_suspend, .resume = serial_resume, }; static int __init init_serial_cs(void) { return pcmcia_register_driver(&serial_cs_driver); } static void __exit exit_serial_cs(void) { pcmcia_unregister_driver(&serial_cs_driver); } module_init(init_serial_cs); module_exit(exit_serial_cs); MODULE_LICENSE("GPL");
gpl-2.0
YUPlayGodDev/platform_kernel_cyanogen_msm8916
drivers/watchdog/iTCO_vendor_support.c
4932
11089
/* * intel TCO vendor specific watchdog driver support * * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Module and version information */ #define DRV_NAME "iTCO_vendor_support" #define DRV_VERSION "1.04" /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/ioport.h> /* For io-port access */ #include <linux/io.h> /* For inb/outb/... */ #include "iTCO_vendor.h" /* List of vendor support modes */ /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ #define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ #define SUPERMICRO_NEW_BOARD 2 /* Broken BIOS */ #define BROKEN_BIOS 911 static int vendorsupport; module_param(vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, " "911=Broken SMI BIOS"); /* * Vendor Specific Support */ /* * Vendor Support: 1 * Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE * iTCO chipset: ICH2 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes * This setting enables SMI to clear the watchdog expired flag. * If BIOS or CPU fail which may cause SMI hang, then system will * reboot. When application starts to use watchdog function, * application has to take over the control from SMI. * * For P3TSSE, J36 jumper needs to be removed to enable the Watchdog * function. * * Note: The system will reboot when Expire Flag is set TWICE. * So, if the watchdog timer is 20 seconds, then the maximum hang * time is about 40 seconds, and the minimum hang time is about * 20.6 seconds. */ static void supermicro_old_pre_start(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(smires->start); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ outl(val32, smires->start); /* Needed to activate watchdog */ } static void supermicro_old_pre_stop(struct resource *smires) { unsigned long val32; /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ val32 = inl(smires->start); val32 |= 0x00002000; /* Turn on SMI clearing watchdog */ outl(val32, smires->start); /* Needed to deactivate watchdog */ } /* * Vendor Support: 2 * Board: Super Micro Computer Inc. P4SBx, P4DPx * iTCO chipset: ICH4 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * 1. BIOS * For P4SBx: * BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature * For P4DPx: * BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog * This setting enables or disables Watchdog function. When enabled, the * default watchdog timer is set to be 5 minutes (about 4m35s). It is * enough to load and run the OS. The application (service or driver) has * to take over the control once OS is running up and before watchdog * expires. * * 2. JUMPER * For P4SBx: JP39 * For P4DPx: JP37 * This jumper is used for safety. Closed is enabled. This jumper * prevents user enables watchdog in BIOS by accident. * * To enable Watch Dog function, both BIOS and JUMPER must be enabled. * * The documentation lists motherboards P4SBx and P4DPx series as of * 20-March-2002. However, this code works flawlessly with much newer * motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82). * * The original iTCO driver as written does not actually reset the * watchdog timer on these machines, as a result they reboot after five * minutes. * * NOTE: You may leave the Watchdog function disabled in the SuperMicro * BIOS to avoid a "boot-race"... This driver will enable watchdog * functionality even if it's disabled in the BIOS once the /dev/watchdog * file is opened. */ /* I/O Port's */ #define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */ #define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */ /* Control Register's */ #define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */ #define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */ #define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */ #define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */ #define SM_ENDWATCH 0xAA /* Watchdog lock control page */ #define SM_COUNTMODE 0xf5 /* Watchdog count mode select */ /* (Bit 3: 0 = seconds, 1 = minutes */ #define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */ #define SM_RESETCONTROL 0xf7 /* Watchdog reset control */ /* Bit 6: timer is reset by kbd interrupt */ /* Bit 7: timer is reset by mouse interrupt */ static void supermicro_new_unlock_watchdog(void) { /* Write 0x87 to port 0x2e twice */ outb(SM_WATCHPAGE, SM_REGINDEX); outb(SM_WATCHPAGE, SM_REGINDEX); /* Switch to watchdog control page */ outb(SM_CTLPAGESW, SM_REGINDEX); outb(SM_CTLPAGE, SM_DATAIO); } static void supermicro_new_lock_watchdog(void) { outb(SM_ENDWATCH, SM_REGINDEX); } static void supermicro_new_pre_start(unsigned int heartbeat) { unsigned int val; supermicro_new_unlock_watchdog(); /* Watchdog timer setting needs to be in seconds*/ outb(SM_COUNTMODE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xF7; outb(val, SM_DATAIO); /* Write heartbeat interval to WDOG */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); /* Make sure keyboard/mouse interrupts don't interfere */ outb(SM_RESETCONTROL, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0x3f; outb(val, SM_DATAIO); /* enable watchdog by setting bit 0 of Watchdog Enable to 1 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val |= 0x01; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_stop(void) { unsigned int val; supermicro_new_unlock_watchdog(); /* disable watchdog by setting bit 0 of Watchdog Enable to 0 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xFE; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat) { supermicro_new_unlock_watchdog(); /* reset watchdog timeout to heartveat value */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); supermicro_new_lock_watchdog(); } /* * Vendor Support: 911 * Board: Some Intel ICHx based motherboards * iTCO chipset: ICH7+ * * Some Intel motherboards have a broken BIOS implementation: i.e. * the SMI handler clear's the TIMEOUT bit in the TC01_STS register * and does not reload the time. Thus the TCO watchdog does not reboot * the system. * * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after * debugging: the SMI handler is quite simple - it tests value in * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set * the handler goes into an infinite loop, apparently to allow the * second timeout and reboot. Otherwise it simply clears TIMEOUT bit * in TCO1_STS and that's it. * So the logic seems to be reversed, because it is hard to see how * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set * (other than a transitional effect). * * The only fix found to get the motherboard(s) to reboot is to put * the glb_smi_en bit to 0. This is a dirty hack that bypasses the * broken code by disabling Global SMI. * * WARNING: globally disabling SMI could possibly lead to dramatic * problems, especially on laptops! I.e. various ACPI things where * SMI is used for communication between OS and firmware. * * Don't use this fix if you don't need to!!! */ static void broken_bios_start(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ val32 &= 0xffffdffe; outl(val32, smires->start); } static void broken_bios_stop(struct resource *smires) { unsigned long val32; val32 = inl(smires->start); /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ val32 |= 0x00002001; outl(val32, smires->start); } /* * Generic Support Functions */ void iTCO_vendor_pre_start(struct resource *smires, unsigned int heartbeat) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_start(heartbeat); break; case BROKEN_BIOS: broken_bios_start(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(struct resource *smires) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(smires); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_stop(); break; case BROKEN_BIOS: broken_bios_stop(smires); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_stop); void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat); int iTCO_vendor_check_noreboot_on(void) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: return 1; } } EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { pr_info("vendor-support=%d\n", vendorsupport); return 0; } static void __exit iTCO_vendor_exit_module(void) { pr_info("Module Unloaded\n"); } module_init(iTCO_vendor_init_module); module_exit(iTCO_vendor_exit_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, " "R. Seretny <lkpatches@paypc.com>"); MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_xiaomi_cancro
drivers/usb/gadget/serial.c
4932
7760
/* * serial.c -- USB gadget serial driver * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include "u_serial.h" #include "gadget_chips.h" /* Defines */ #define GS_VERSION_STR "v2.4" #define GS_VERSION_NUM 0x2400 #define GS_LONG_NAME "Gadget Serial" #define GS_VERSION_NAME GS_LONG_NAME " " GS_VERSION_STR /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_acm.c" #include "f_obex.c" #include "f_serial.c" #include "u_serial.c" /*-------------------------------------------------------------------------*/ /* Thanks to NetChip Technologies for donating this product ID. * * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define GS_VENDOR_ID 0x0525 /* NetChip */ #define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */ #define GS_CDC_PRODUCT_ID 0xa4a7 /* ... as CDC-ACM */ #define GS_CDC_OBEX_PRODUCT_ID 0xa4a9 /* ... as CDC-OBEX */ /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 #define STRING_DESCRIPTION_IDX 2 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = GS_VERSION_NAME, [STRING_DESCRIPTION_IDX].s = NULL /* updated; f(use_acm) */, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), /* .bDeviceClass = f(use_acm) */ .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ .idVendor = cpu_to_le16(GS_VENDOR_ID), /* .idProduct = f(use_acm) */ /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /*-------------------------------------------------------------------------*/ /* Module */ MODULE_DESCRIPTION(GS_VERSION_NAME); MODULE_AUTHOR("Al Borchers"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); static bool use_acm = true; module_param(use_acm, bool, 0); MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes"); static bool use_obex = false; module_param(use_obex, bool, 0); MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no"); static unsigned n_ports = 1; module_param(n_ports, uint, 0); MODULE_PARM_DESC(n_ports, "number of ports to create, default=1"); /*-------------------------------------------------------------------------*/ static int __init serial_bind_config(struct usb_configuration *c) { unsigned i; int status = 0; for (i = 0; i < n_ports && status == 0; i++) { if (use_acm) status = acm_bind_config(c, i); else if (use_obex) status = obex_bind_config(c, i); else status = gser_bind_config(c, i); } return status; } static struct usb_configuration serial_config_driver = { /* .label = f(use_acm) */ /* .bConfigurationValue = f(use_acm) */ /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; static int __init gs_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; status = gserial_setup(cdev->gadget, n_ports); if (status < 0) return status; /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device description: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* config description */ status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_DESCRIPTION_IDX].id = status; serial_config_driver.iConfiguration = status; /* set up other descriptors */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | gcnum); else { /* this is so simple (for now, no altsettings) that it * SHOULD NOT have problems with bulk-capable hardware. * so warn about unrcognized controllers -- don't panic. * * things like configuration and altsetting numbering * can need hardware-specific attention though. */ pr_warning("gs_bind: controller '%s' not recognized\n", gadget->name); device_desc.bcdDevice = cpu_to_le16(GS_VERSION_NUM | 0x0099); } if (gadget_is_otg(cdev->gadget)) { serial_config_driver.descriptors = otg_desc; serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; } /* register our configuration */ status = usb_add_config(cdev, &serial_config_driver, serial_bind_config); if (status < 0) goto fail; INFO(cdev, "%s\n", GS_VERSION_NAME); return 0; fail: gserial_cleanup(); return status; } static struct usb_composite_driver gserial_driver = { .name = "g_serial", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, }; static int __init init(void) { /* We *could* export two configs; that'd be much cleaner... * but neither of these product IDs was defined that way. */ if (use_acm) { serial_config_driver.label = "CDC ACM config"; serial_config_driver.bConfigurationValue = 2; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_PRODUCT_ID); } else if (use_obex) { serial_config_driver.label = "CDC OBEX config"; serial_config_driver.bConfigurationValue = 3; device_desc.bDeviceClass = USB_CLASS_COMM; device_desc.idProduct = cpu_to_le16(GS_CDC_OBEX_PRODUCT_ID); } else { serial_config_driver.label = "Generic Serial config"; serial_config_driver.bConfigurationValue = 1; device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; device_desc.idProduct = cpu_to_le16(GS_PRODUCT_ID); } strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label; return usb_composite_probe(&gserial_driver, gs_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&gserial_driver); gserial_cleanup(); } module_exit(cleanup);
gpl-2.0
DooMLoRD/android_kernel_sony_msm8974
drivers/net/wireless/prism54/isl_ioctl.c
5188
77305
/* * Copyright (C) 2002 Intersil Americas Inc. * (C) 2003,2004 Aurelien Alleaume <slts@free.fr> * (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/pci.h> #include <asm/uaccess.h> #include "prismcompat.h" #include "isl_ioctl.h" #include "islpci_mgt.h" #include "isl_oid.h" /* additional types and defs for isl38xx fw */ #include "oid_mgt.h" #include <net/iw_handler.h> /* New driver API */ #define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */ #define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */ /* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */ #define KEY_SIZE_TKIP 32 /* TKIP keys */ static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len); static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); static int prism54_set_wpa(struct net_device *, struct iw_request_info *, __u32 *, char *); /* In 500 kbps */ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 }; /** * prism54_mib_mode_helper - MIB change mode helper function * @mib: the &struct islpci_mib object to modify * @iw_mode: new mode (%IW_MODE_*) * * This is a helper function, hence it does not lock. Make sure * caller deals with locking *if* necessary. This function sets the * mode-dependent mib values and does the mapping of the Linux * Wireless API modes to Device firmware modes. It also checks for * correct valid Linux wireless modes. */ static int prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) { u32 config = INL_CONFIG_MANUALRUN; u32 mode, bsstype; /* For now, just catch early the Repeater and Secondary modes here */ if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) { printk(KERN_DEBUG "%s(): Sorry, Repeater mode and Secondary mode " "are not yet supported by this driver.\n", __func__); return -EINVAL; } priv->iw_mode = iw_mode; switch (iw_mode) { case IW_MODE_AUTO: mode = INL_MODE_CLIENT; bsstype = DOT11_BSSTYPE_ANY; break; case IW_MODE_ADHOC: mode = INL_MODE_CLIENT; bsstype = DOT11_BSSTYPE_IBSS; break; case IW_MODE_INFRA: mode = INL_MODE_CLIENT; bsstype = DOT11_BSSTYPE_INFRA; break; case IW_MODE_MASTER: mode = INL_MODE_AP; bsstype = DOT11_BSSTYPE_INFRA; break; case IW_MODE_MONITOR: mode = INL_MODE_PROMISCUOUS; bsstype = DOT11_BSSTYPE_ANY; config |= INL_CONFIG_RXANNEX; break; default: return -EINVAL; } if (init_wds) config |= INL_CONFIG_WDS; mgt_set(priv, DOT11_OID_BSSTYPE, &bsstype); mgt_set(priv, OID_INL_CONFIG, &config); mgt_set(priv, OID_INL_MODE, &mode); return 0; } /** * prism54_mib_init - fill MIB cache with defaults * * this function initializes the struct given as @mib with defaults, * of which many are retrieved from the global module parameter * variables. */ void prism54_mib_init(islpci_private *priv) { u32 channel, authen, wep, filter, dot1x, mlme, conformance, power, mode; struct obj_buffer psm_buffer = { .size = PSM_BUFFER_SIZE, .addr = priv->device_psm_buffer }; channel = CARD_DEFAULT_CHANNEL; authen = CARD_DEFAULT_AUTHEN; wep = CARD_DEFAULT_WEP; filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */ dot1x = CARD_DEFAULT_DOT1X; mlme = CARD_DEFAULT_MLME_MODE; conformance = CARD_DEFAULT_CONFORMANCE; power = 127; mode = CARD_DEFAULT_IW_MODE; mgt_set(priv, DOT11_OID_CHANNEL, &channel); mgt_set(priv, DOT11_OID_AUTHENABLE, &authen); mgt_set(priv, DOT11_OID_PRIVACYINVOKED, &wep); mgt_set(priv, DOT11_OID_PSMBUFFER, &psm_buffer); mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &filter); mgt_set(priv, DOT11_OID_DOT1XENABLE, &dot1x); mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlme); mgt_set(priv, OID_INL_DOT11D_CONFORMANCE, &conformance); mgt_set(priv, OID_INL_OUTPUTPOWER, &power); /* This sets all of the mode-dependent values */ prism54_mib_mode_helper(priv, mode); } /* this will be executed outside of atomic context thanks to * schedule_work(), thus we can as well use sleeping semaphore * locking */ void prism54_update_stats(struct work_struct *work) { islpci_private *priv = container_of(work, islpci_private, stats_work); char *data; int j; struct obj_bss bss, *bss2; union oid_res_t r; mutex_lock(&priv->stats_lock); /* Noise floor. * I'm not sure if the unit is dBm. * Note : If we are not connected, this value seems to be irrelevant. */ mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); priv->local_iwstatistics.qual.noise = r.u; /* Get the rssi of the link. To do this we need to retrieve a bss. */ /* First get the MAC address of the AP we are associated with. */ mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r); data = r.ptr; /* copy this MAC to the bss */ memcpy(bss.address, data, 6); kfree(data); /* now ask for the corresponding bss */ j = mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r); bss2 = r.ptr; /* report the rssi and use it to calculate * link quality through a signal-noise * ratio */ priv->local_iwstatistics.qual.level = bss2->rssi; priv->local_iwstatistics.qual.qual = bss2->rssi - priv->iwstatistics.qual.noise; kfree(bss2); /* report that the stats are new */ priv->local_iwstatistics.qual.updated = 0x7; /* Rx : unable to decrypt the MPDU */ mgt_get_request(priv, DOT11_OID_PRIVRXFAILED, 0, NULL, &r); priv->local_iwstatistics.discard.code = r.u; /* Tx : Max MAC retries num reached */ mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r); priv->local_iwstatistics.discard.retries = r.u; mutex_unlock(&priv->stats_lock); } struct iw_statistics * prism54_get_wireless_stats(struct net_device *ndev) { islpci_private *priv = netdev_priv(ndev); /* If the stats are being updated return old data */ if (mutex_trylock(&priv->stats_lock)) { memcpy(&priv->iwstatistics, &priv->local_iwstatistics, sizeof (struct iw_statistics)); /* They won't be marked updated for the next time */ priv->local_iwstatistics.qual.updated = 0; mutex_unlock(&priv->stats_lock); } else priv->iwstatistics.qual.updated = 0; /* Update our wireless stats, but do not schedule to often * (max 1 HZ) */ if ((priv->stats_timestamp == 0) || time_after(jiffies, priv->stats_timestamp + 1 * HZ)) { schedule_work(&priv->stats_work); priv->stats_timestamp = jiffies; } return &priv->iwstatistics; } static int prism54_commit(struct net_device *ndev, struct iw_request_info *info, char *cwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); /* simply re-set the last set SSID, this should commit most stuff */ /* Commit in Monitor mode is not necessary, also setting essid * in Monitor mode does not make sense and isn't allowed for this * device's firmware */ if (priv->iw_mode != IW_MODE_MONITOR) return mgt_set_request(priv, DOT11_OID_SSID, 0, NULL); return 0; } static int prism54_get_name(struct net_device *ndev, struct iw_request_info *info, char *cwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); char *capabilities; union oid_res_t r; int rvalue; if (islpci_get_state(priv) < PRV_STATE_INIT) { strncpy(cwrq, "NOT READY!", IFNAMSIZ); return 0; } rvalue = mgt_get_request(priv, OID_INL_PHYCAPABILITIES, 0, NULL, &r); switch (r.u) { case INL_PHYCAP_5000MHZ: capabilities = "IEEE 802.11a/b/g"; break; case INL_PHYCAP_FAA: capabilities = "IEEE 802.11b/g - FAA Support"; break; case INL_PHYCAP_2400MHZ: default: capabilities = "IEEE 802.11b/g"; /* Default */ break; } strncpy(cwrq, capabilities, IFNAMSIZ); return rvalue; } static int prism54_set_freq(struct net_device *ndev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); int rvalue; u32 c; if (fwrq->m < 1000) /* we have a channel number */ c = fwrq->m; else c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0; rvalue = c ? mgt_set_request(priv, DOT11_OID_CHANNEL, 0, &c) : -EINVAL; /* Call commit handler */ return (rvalue ? rvalue : -EINPROGRESS); } static int prism54_get_freq(struct net_device *ndev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, DOT11_OID_CHANNEL, 0, NULL, &r); fwrq->i = r.u; rvalue |= mgt_get_request(priv, DOT11_OID_FREQUENCY, 0, NULL, &r); fwrq->m = r.u; fwrq->e = 3; return rvalue; } static int prism54_set_mode(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE; /* Let's see if the user passed a valid Linux Wireless mode */ if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) { printk(KERN_DEBUG "%s: %s() You passed a non-valid init_mode.\n", priv->ndev->name, __func__); return -EINVAL; } down_write(&priv->mib_sem); if (prism54_mib_mode_helper(priv, *uwrq)) { up_write(&priv->mib_sem); return -EOPNOTSUPP; } /* the ACL code needs an intermediate mlmeautolevel. The wpa stuff an * extended one. */ if ((*uwrq == IW_MODE_MASTER) && (priv->acl.policy != MAC_POLICY_OPEN)) mlmeautolevel = DOT11_MLME_INTERMEDIATE; if (priv->wpa) mlmeautolevel = DOT11_MLME_EXTENDED; mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel); if (mgt_commit(priv)) { up_write(&priv->mib_sem); return -EIO; } priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? priv->monitor_type : ARPHRD_ETHER; up_write(&priv->mib_sem); return 0; } /* Use mib cache */ static int prism54_get_mode(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode > IW_MODE_MONITOR)); *uwrq = priv->iw_mode; return 0; } /* we use DOT11_OID_EDTHRESHOLD. From what I guess the card will not try to * emit data if (sensitivity > rssi - noise) (in dBm). * prism54_set_sens does not seem to work. */ static int prism54_set_sens(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 sens; /* by default the card sets this to 20. */ sens = vwrq->disabled ? 20 : vwrq->value; return mgt_set_request(priv, DOT11_OID_EDTHRESHOLD, 0, &sens); } static int prism54_get_sens(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, DOT11_OID_EDTHRESHOLD, 0, NULL, &r); vwrq->value = r.u; vwrq->disabled = (vwrq->value == 0); vwrq->fixed = 1; return rvalue; } static int prism54_get_range(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct iw_range *range = (struct iw_range *) extra; islpci_private *priv = netdev_priv(ndev); u8 *data; int i, m, rvalue; struct obj_frequencies *freq; union oid_res_t r; memset(range, 0, sizeof (struct iw_range)); dwrq->length = sizeof (struct iw_range); /* set the wireless extension version number */ range->we_version_source = SUPPORTED_WIRELESS_EXT; range->we_version_compiled = WIRELESS_EXT; /* Now the encoding capabilities */ range->num_encoding_sizes = 3; /* 64(40) bits WEP */ range->encoding_size[0] = 5; /* 128(104) bits WEP */ range->encoding_size[1] = 13; /* 256 bits for WPA-PSK */ range->encoding_size[2] = 32; /* 4 keys are allowed */ range->max_encoding_tokens = 4; /* we don't know the quality range... */ range->max_qual.level = 0; range->max_qual.noise = 0; range->max_qual.qual = 0; /* these value describe an average quality. Needs more tweaking... */ range->avg_qual.level = -80; /* -80 dBm */ range->avg_qual.noise = 0; /* don't know what to put here */ range->avg_qual.qual = 0; range->sensitivity = 200; /* retry limit capabilities */ range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = IW_RETRY_LIFETIME; /* I don't know the range. Put stupid things here */ range->min_retry = 1; range->max_retry = 65535; range->min_r_time = 1024; range->max_r_time = 65535 * 1024; /* txpower is supported in dBm's */ range->txpower_capa = IW_TXPOW_DBM; /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | IW_EVENT_CAPA_MASK(SIOCGIWAP)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; /* Request the device for the supported frequencies * not really relevant since some devices will report the 5 GHz band * frequencies even if they don't support them. */ rvalue = mgt_get_request(priv, DOT11_OID_SUPPORTEDFREQUENCIES, 0, NULL, &r); freq = r.ptr; range->num_channels = freq->nr; range->num_frequency = freq->nr; m = min(IW_MAX_FREQUENCIES, (int) freq->nr); for (i = 0; i < m; i++) { range->freq[i].m = freq->mhz[i]; range->freq[i].e = 6; range->freq[i].i = channel_of_freq(freq->mhz[i]); } kfree(freq); rvalue |= mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r); data = r.ptr; /* We got an array of char. It is NULL terminated. */ i = 0; while ((i < IW_MAX_BITRATES) && (*data != 0)) { /* the result must be in bps. The card gives us 500Kbps */ range->bitrate[i] = *data * 500000; i++; data++; } range->num_bitrates = i; kfree(r.ptr); return rvalue; } /* Set AP address*/ static int prism54_set_wap(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); char bssid[6]; int rvalue; if (awrq->sa_family != ARPHRD_ETHER) return -EINVAL; /* prepare the structure for the set object */ memcpy(&bssid[0], awrq->sa_data, 6); /* set the bssid -- does this make sense when in AP mode? */ rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid); return (rvalue ? rvalue : -EINPROGRESS); /* Call commit handler */ } /* get AP address*/ static int prism54_get_wap(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r); memcpy(awrq->sa_data, r.ptr, 6); awrq->sa_family = ARPHRD_ETHER; kfree(r.ptr); return rvalue; } static int prism54_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { /* hehe the device does this automagicaly */ return 0; } /* a little helper that will translate our data into a card independent * format that the Wireless Tools will understand. This was inspired by * the "Aironet driver for 4500 and 4800 series cards" (GPL) */ static char * prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info, char *current_ev, char *end_buf, struct obj_bss *bss, char noise) { struct iw_event iwe; /* Temporary buffer */ short cap; islpci_private *priv = netdev_priv(ndev); u8 wpa_ie[MAX_WPA_IE_LEN]; size_t wpa_ie_len; /* The first entry must be the MAC address */ memcpy(iwe.u.ap_addr.sa_data, bss->address, 6); iwe.u.ap_addr.sa_family = ARPHRD_ETHER; iwe.cmd = SIOCGIWAP; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); /* The following entries will be displayed in the same order we give them */ /* The ESSID. */ iwe.u.data.length = bss->ssid.length; iwe.u.data.flags = 1; iwe.cmd = SIOCGIWESSID; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->ssid.octets); /* Capabilities */ #define CAP_ESS 0x01 #define CAP_IBSS 0x02 #define CAP_CRYPT 0x10 /* Mode */ cap = bss->capinfo; iwe.u.mode = 0; if (cap & CAP_ESS) iwe.u.mode = IW_MODE_MASTER; else if (cap & CAP_IBSS) iwe.u.mode = IW_MODE_ADHOC; iwe.cmd = SIOCGIWMODE; if (iwe.u.mode) current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); /* Encryption capability */ if (cap & CAP_CRYPT) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; iwe.cmd = SIOCGIWENCODE; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, NULL); /* Add frequency. (short) bss->channel is the frequency in MHz */ iwe.u.freq.m = bss->channel; iwe.u.freq.e = 6; iwe.cmd = SIOCGIWFREQ; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); /* Add quality statistics */ iwe.u.qual.level = bss->rssi; iwe.u.qual.noise = noise; /* do a simple SNR for quality */ iwe.u.qual.qual = bss->rssi - noise; iwe.cmd = IWEVQUAL; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); /* Add WPA/RSN Information Element, if any */ wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie); if (wpa_ie_len > 0) { iwe.cmd = IWEVGENIE; iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, wpa_ie); } /* Do the bitrates */ { char *current_val = current_ev + iwe_stream_lcp_len(info); int i; int mask; iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; /* Parse the bitmask */ mask = 0x1; for(i = 0; i < sizeof(scan_rate_list); i++) { if(bss->rates & mask) { iwe.u.bitrate.value = (scan_rate_list[i] * 500000); current_val = iwe_stream_add_value( info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } mask <<= 1; } /* Check if we added any event */ if ((current_val - current_ev) > iwe_stream_lcp_len(info)) current_ev = current_val; } return current_ev; } static int prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); int i, rvalue; struct obj_bsslist *bsslist; u32 noise = 0; char *current_ev = extra; union oid_res_t r; if (islpci_get_state(priv) < PRV_STATE_INIT) { /* device is not ready, fail gently */ dwrq->length = 0; return 0; } /* first get the noise value. We will use it to report the link quality */ rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); noise = r.u; /* Ask the device for a list of known bss. * The old API, using SIOCGIWAPLIST, had a hard limit of IW_MAX_AP=64. * The new API, using SIOCGIWSCAN, is only limited by the buffer size. * WE-14->WE-16, the buffer is limited to IW_SCAN_MAX_DATA bytes. * Starting with WE-17, the buffer can be as big as needed. * But the device won't repport anything if you change the value * of IWMAX_BSS=24. */ rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); bsslist = r.ptr; /* ok now, scan the list and translate its info */ for (i = 0; i < (int) bsslist->nr; i++) { current_ev = prism54_translate_bss(ndev, info, current_ev, extra + dwrq->length, &(bsslist->bsslist[i]), noise); /* Check if there is space for one more entry */ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ rvalue = -E2BIG; break; } } kfree(bsslist); dwrq->length = (current_ev - extra); dwrq->flags = 0; /* todo */ return rvalue; } static int prism54_set_essid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct obj_ssid essid; memset(essid.octets, 0, 33); /* Check if we were asked for `any' */ if (dwrq->flags && dwrq->length) { if (dwrq->length > 32) return -E2BIG; essid.length = dwrq->length; memcpy(essid.octets, extra, dwrq->length); } else essid.length = 0; if (priv->iw_mode != IW_MODE_MONITOR) return mgt_set_request(priv, DOT11_OID_SSID, 0, &essid); /* If in monitor mode, just save to mib */ mgt_set(priv, DOT11_OID_SSID, &essid); return 0; } static int prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct obj_ssid *essid; union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, DOT11_OID_SSID, 0, NULL, &r); essid = r.ptr; if (essid->length) { dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ /* if it is too big, trunk it */ dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length); } else { dwrq->flags = 0; dwrq->length = 0; } essid->octets[dwrq->length] = '\0'; memcpy(extra, essid->octets, dwrq->length); kfree(essid); return rvalue; } /* Provides no functionality, just completes the ioctl. In essence this is a * just a cosmetic ioctl. */ static int prism54_set_nick(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); if (dwrq->length > IW_ESSID_MAX_SIZE) return -E2BIG; down_write(&priv->mib_sem); memset(priv->nickname, 0, sizeof (priv->nickname)); memcpy(priv->nickname, extra, dwrq->length); up_write(&priv->mib_sem); return 0; } static int prism54_get_nick(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); dwrq->length = 0; down_read(&priv->mib_sem); dwrq->length = strlen(priv->nickname); memcpy(extra, priv->nickname, dwrq->length); up_read(&priv->mib_sem); return 0; } /* Set the allowed Bitrates */ static int prism54_set_rate(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 rate, profile; char *data; int ret, i; union oid_res_t r; if (vwrq->value == -1) { /* auto mode. No limit. */ profile = 1; return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile); } ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r); if (ret) { kfree(r.ptr); return ret; } rate = (u32) (vwrq->value / 500000); data = r.ptr; i = 0; while (data[i]) { if (rate && (data[i] == rate)) { break; } if (vwrq->value == i) { break; } data[i] |= 0x80; i++; } if (!data[i]) { kfree(r.ptr); return -EINVAL; } data[i] |= 0x80; data[i + 1] = 0; /* Now, check if we want a fixed or auto value */ if (vwrq->fixed) { data[0] = data[i]; data[1] = 0; } /* i = 0; printk("prism54 rate: "); while(data[i]) { printk("%u ", data[i]); i++; } printk("0\n"); */ profile = -1; ret = mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile); ret |= mgt_set_request(priv, DOT11_OID_EXTENDEDRATES, 0, data); ret |= mgt_set_request(priv, DOT11_OID_RATES, 0, data); kfree(r.ptr); return ret; } /* Get the current bit rate */ static int prism54_get_rate(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); int rvalue; char *data; union oid_res_t r; /* Get the current bit rate */ if ((rvalue = mgt_get_request(priv, GEN_OID_LINKSTATE, 0, NULL, &r))) return rvalue; vwrq->value = r.u * 500000; /* request the device for the enabled rates */ rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r); if (rvalue) { kfree(r.ptr); return rvalue; } data = r.ptr; vwrq->fixed = (data[0] != 0) && (data[1] == 0); kfree(r.ptr); return 0; } static int prism54_set_rts(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value); } static int prism54_get_rts(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; /* get the rts threshold */ rvalue = mgt_get_request(priv, DOT11_OID_RTSTHRESH, 0, NULL, &r); vwrq->value = r.u; return rvalue; } static int prism54_set_frag(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value); } static int prism54_get_frag(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, DOT11_OID_FRAGTHRESH, 0, NULL, &r); vwrq->value = r.u; return rvalue; } /* Here we have (min,max) = max retries for (small frames, big frames). Where * big frame <=> bigger than the rts threshold * small frame <=> smaller than the rts threshold * This is not really the behavior expected by the wireless tool but it seems * to be a common behavior in other drivers. */ static int prism54_set_retry(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 slimit = 0, llimit = 0; /* short and long limit */ u32 lifetime = 0; int rvalue = 0; if (vwrq->disabled) /* we cannot disable this feature */ return -EINVAL; if (vwrq->flags & IW_RETRY_LIMIT) { if (vwrq->flags & IW_RETRY_SHORT) slimit = vwrq->value; else if (vwrq->flags & IW_RETRY_LONG) llimit = vwrq->value; else { /* we are asked to set both */ slimit = vwrq->value; llimit = vwrq->value; } } if (vwrq->flags & IW_RETRY_LIFETIME) /* Wireless tools use us unit while the device uses 1024 us unit */ lifetime = vwrq->value / 1024; /* now set what is requested */ if (slimit) rvalue = mgt_set_request(priv, DOT11_OID_SHORTRETRIES, 0, &slimit); if (llimit) rvalue |= mgt_set_request(priv, DOT11_OID_LONGRETRIES, 0, &llimit); if (lifetime) rvalue |= mgt_set_request(priv, DOT11_OID_MAXTXLIFETIME, 0, &lifetime); return rvalue; } static int prism54_get_retry(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue = 0; vwrq->disabled = 0; /* It cannot be disabled */ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { /* we are asked for the life time */ rvalue = mgt_get_request(priv, DOT11_OID_MAXTXLIFETIME, 0, NULL, &r); vwrq->value = r.u * 1024; vwrq->flags = IW_RETRY_LIFETIME; } else if ((vwrq->flags & IW_RETRY_LONG)) { /* we are asked for the long retry limit */ rvalue |= mgt_get_request(priv, DOT11_OID_LONGRETRIES, 0, NULL, &r); vwrq->value = r.u; vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; } else { /* default. get the short retry limit */ rvalue |= mgt_get_request(priv, DOT11_OID_SHORTRETRIES, 0, NULL, &r); vwrq->value = r.u; vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; } return rvalue; } static int prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); int rvalue = 0, force = 0; int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; union oid_res_t r; /* with the new API, it's impossible to get a NULL pointer. * New version of iwconfig set the IW_ENCODE_NOKEY flag * when no key is given, but older versions don't. */ if (dwrq->length > 0) { /* we have a key to set */ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int current_index; struct obj_key key = { DOT11_PRIV_WEP, 0, "" }; /* get the current key index */ rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); current_index = r.u; /* Verify that the key is not marked as invalid */ if (!(dwrq->flags & IW_ENCODE_NOKEY)) { if (dwrq->length > KEY_SIZE_TKIP) { /* User-provided key data too big */ return -EINVAL; } if (dwrq->length > KEY_SIZE_WEP104) { /* WPA-PSK TKIP */ key.type = DOT11_PRIV_TKIP; key.length = KEY_SIZE_TKIP; } else if (dwrq->length > KEY_SIZE_WEP40) { /* WEP 104/128 */ key.length = KEY_SIZE_WEP104; } else { /* WEP 40/64 */ key.length = KEY_SIZE_WEP40; } memset(key.key, 0, sizeof (key.key)); memcpy(key.key, extra, dwrq->length); if ((index < 0) || (index > 3)) /* no index provided use the current one */ index = current_index; /* now send the key to the card */ rvalue |= mgt_set_request(priv, DOT11_OID_DEFKEYX, index, &key); } /* * If a valid key is set, encryption should be enabled * (user may turn it off later). * This is also how "iwconfig ethX key on" works */ if ((index == current_index) && (key.length > 0)) force = 1; } else { int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if ((index >= 0) && (index <= 3)) { /* we want to set the key index */ rvalue |= mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &index); } else { if (!(dwrq->flags & IW_ENCODE_MODE)) { /* we cannot do anything. Complain. */ return -EINVAL; } } } /* now read the flags */ if (dwrq->flags & IW_ENCODE_DISABLED) { /* Encoding disabled, * authen = DOT11_AUTH_OS; * invoke = 0; * exunencrypt = 0; */ } if (dwrq->flags & IW_ENCODE_OPEN) /* Encode but accept non-encoded packets. No auth */ invoke = 1; if ((dwrq->flags & IW_ENCODE_RESTRICTED) || force) { /* Refuse non-encoded packets. Auth */ authen = DOT11_AUTH_BOTH; invoke = 1; exunencrypt = 1; } /* do the change if requested */ if ((dwrq->flags & IW_ENCODE_MODE) || force) { rvalue |= mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); rvalue |= mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke); rvalue |= mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt); } return rvalue; } static int prism54_get_encode(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct obj_key *key; u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1; u32 authen = 0, invoke = 0, exunencrypt = 0; int rvalue; union oid_res_t r; /* first get the flags */ rvalue = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); authen = r.u; rvalue |= mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); invoke = r.u; rvalue |= mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); exunencrypt = r.u; if (invoke && (authen == DOT11_AUTH_BOTH) && exunencrypt) dwrq->flags = IW_ENCODE_RESTRICTED; else if ((authen == DOT11_AUTH_OS) && !exunencrypt) { if (invoke) dwrq->flags = IW_ENCODE_OPEN; else dwrq->flags = IW_ENCODE_DISABLED; } else /* The card should not work in this state */ dwrq->flags = 0; /* get the current device key index */ rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); devindex = r.u; /* Now get the key, return it */ if (index == -1 || index > 3) /* no index provided, use the current one */ index = devindex; rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYX, index, NULL, &r); key = r.ptr; dwrq->length = key->length; memcpy(extra, key->key, dwrq->length); kfree(key); /* return the used key index */ dwrq->flags |= devindex + 1; return rvalue; } static int prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); union oid_res_t r; int rvalue; rvalue = mgt_get_request(priv, OID_INL_OUTPUTPOWER, 0, NULL, &r); /* intersil firmware operates in 0.25 dBm (1/4 dBm) */ vwrq->value = (s32) r.u / 4; vwrq->fixed = 1; /* radio is not turned of * btw: how is possible to turn off only the radio */ vwrq->disabled = 0; return rvalue; } static int prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); s32 u = vwrq->value; /* intersil firmware operates in 0.25 dBm (1/4) */ u *= 4; if (vwrq->disabled) { /* don't know how to disable radio */ printk(KERN_DEBUG "%s: %s() disabling radio is not yet supported.\n", priv->ndev->name, __func__); return -ENOTSUPP; } else if (vwrq->fixed) /* currently only fixed value is supported */ return mgt_set_request(priv, OID_INL_OUTPUTPOWER, 0, &u); else { printk(KERN_DEBUG "%s: %s() auto power will be implemented later.\n", priv->ndev->name, __func__); return -ENOTSUPP; } } static int prism54_set_genie(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra) { islpci_private *priv = netdev_priv(ndev); int alen, ret = 0; struct obj_attachment *attach; if (data->length > MAX_WPA_IE_LEN || (data->length && extra == NULL)) return -EINVAL; memcpy(priv->wpa_ie, extra, data->length); priv->wpa_ie_len = data->length; alen = sizeof(*attach) + priv->wpa_ie_len; attach = kzalloc(alen, GFP_KERNEL); if (attach == NULL) return -ENOMEM; #define WLAN_FC_TYPE_MGMT 0 #define WLAN_FC_STYPE_ASSOC_REQ 0 #define WLAN_FC_STYPE_REASSOC_REQ 2 /* Note: endianness is covered by mgt_set_varlen */ attach->type = (WLAN_FC_TYPE_MGMT << 2) | (WLAN_FC_STYPE_ASSOC_REQ << 4); attach->id = -1; attach->size = priv->wpa_ie_len; memcpy(attach->data, extra, priv->wpa_ie_len); ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, priv->wpa_ie_len); if (ret == 0) { attach->type = (WLAN_FC_TYPE_MGMT << 2) | (WLAN_FC_STYPE_REASSOC_REQ << 4); ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, priv->wpa_ie_len); if (ret == 0) printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", ndev->name); } kfree(attach); return ret; } static int prism54_get_genie(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra) { islpci_private *priv = netdev_priv(ndev); int len = priv->wpa_ie_len; if (len <= 0) { data->length = 0; return 0; } if (data->length < len) return -E2BIG; data->length = len; memcpy(extra, priv->wpa_ie, len); return 0; } static int prism54_set_auth(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { islpci_private *priv = netdev_priv(ndev); struct iw_param *param = &wrqu->param; u32 mlmelevel = 0, authen = 0, dot1x = 0; u32 exunencrypt = 0, privinvoked = 0, wpa = 0; u32 old_wpa; int ret = 0; union oid_res_t r; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; /* first get the flags */ down_write(&priv->mib_sem); wpa = old_wpa = priv->wpa; up_write(&priv->mib_sem); ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); authen = r.u; ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); privinvoked = r.u; ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); exunencrypt = r.u; ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); dot1x = r.u; ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r); mlmelevel = r.u; if (ret < 0) goto out; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: break; case IW_AUTH_WPA_ENABLED: /* Do the same thing as IW_AUTH_WPA_VERSION */ if (param->value) { wpa = 1; privinvoked = 1; /* For privacy invoked */ exunencrypt = 1; /* Filter out all unencrypted frames */ dot1x = 0x01; /* To enable eap filter */ mlmelevel = DOT11_MLME_EXTENDED; authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ } else { wpa = 0; privinvoked = 0; exunencrypt = 0; /* Do not filter un-encrypted data */ dot1x = 0; mlmelevel = DOT11_MLME_AUTO; } break; case IW_AUTH_WPA_VERSION: if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { wpa = 0; privinvoked = 0; exunencrypt = 0; /* Do not filter un-encrypted data */ dot1x = 0; mlmelevel = DOT11_MLME_AUTO; } else { if (param->value & IW_AUTH_WPA_VERSION_WPA) wpa = 1; else if (param->value & IW_AUTH_WPA_VERSION_WPA2) wpa = 2; privinvoked = 1; /* For privacy invoked */ exunencrypt = 1; /* Filter out all unencrypted frames */ dot1x = 0x01; /* To enable eap filter */ mlmelevel = DOT11_MLME_EXTENDED; authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ } break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* dot1x should be the opposite of RX_UNENCRYPTED_EAPOL; * turn off dot1x when allowing receipt of unencrypted EAPOL * frames, turn on dot1x when receipt should be disallowed */ dot1x = param->value ? 0 : 0x01; break; case IW_AUTH_PRIVACY_INVOKED: privinvoked = param->value ? 1 : 0; break; case IW_AUTH_DROP_UNENCRYPTED: exunencrypt = param->value ? 1 : 0; break; case IW_AUTH_80211_AUTH_ALG: if (param->value & IW_AUTH_ALG_SHARED_KEY) { /* Only WEP uses _SK and _BOTH */ if (wpa > 0) { ret = -EINVAL; goto out; } authen = DOT11_AUTH_SK; } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { authen = DOT11_AUTH_OS; } else { ret = -EINVAL; goto out; } break; default: return -EOPNOTSUPP; } /* Set all the values */ down_write(&priv->mib_sem); priv->wpa = wpa; up_write(&priv->mib_sem); mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked); mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt); mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x); mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel); out: return ret; } static int prism54_get_auth(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { islpci_private *priv = netdev_priv(ndev); struct iw_param *param = &wrqu->param; u32 wpa = 0; int ret = 0; union oid_res_t r; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; /* first get the flags */ down_write(&priv->mib_sem); wpa = priv->wpa; up_write(&priv->mib_sem); switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * wpa_supplicant will control these internally */ ret = -EOPNOTSUPP; break; case IW_AUTH_WPA_VERSION: switch (wpa) { case 1: param->value = IW_AUTH_WPA_VERSION_WPA; break; case 2: param->value = IW_AUTH_WPA_VERSION_WPA2; break; case 0: default: param->value = IW_AUTH_WPA_VERSION_DISABLED; break; } break; case IW_AUTH_DROP_UNENCRYPTED: ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); if (ret >= 0) param->value = r.u > 0 ? 1 : 0; break; case IW_AUTH_80211_AUTH_ALG: ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); if (ret >= 0) { switch (r.u) { case DOT11_AUTH_OS: param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; case DOT11_AUTH_BOTH: case DOT11_AUTH_SK: param->value = IW_AUTH_ALG_SHARED_KEY; case DOT11_AUTH_NONE: default: param->value = 0; break; } } break; case IW_AUTH_WPA_ENABLED: param->value = wpa > 0 ? 1 : 0; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); if (ret >= 0) param->value = r.u > 0 ? 1 : 0; break; case IW_AUTH_PRIVACY_INVOKED: ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); if (ret >= 0) param->value = r.u > 0 ? 1 : 0; break; default: return -EOPNOTSUPP; } return ret; } static int prism54_set_encodeext(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { islpci_private *priv = netdev_priv(ndev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, alg = ext->alg, set_key = 1; union oid_res_t r; int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; int ret = 0; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; /* Determine and validate the key index */ idx = (encoding->flags & IW_ENCODE_INDEX) - 1; if (idx) { if (idx < 0 || idx > 3) return -EINVAL; } else { ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); if (ret < 0) goto out; idx = r.u; } if (encoding->flags & IW_ENCODE_DISABLED) alg = IW_ENCODE_ALG_NONE; if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { /* Only set transmit key index here, actual * key is set below if needed. */ ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx); set_key = ext->key_len > 0 ? 1 : 0; } if (set_key) { struct obj_key key = { DOT11_PRIV_WEP, 0, "" }; switch (alg) { case IW_ENCODE_ALG_NONE: break; case IW_ENCODE_ALG_WEP: if (ext->key_len > KEY_SIZE_WEP104) { ret = -EINVAL; goto out; } if (ext->key_len > KEY_SIZE_WEP40) key.length = KEY_SIZE_WEP104; else key.length = KEY_SIZE_WEP40; break; case IW_ENCODE_ALG_TKIP: if (ext->key_len > KEY_SIZE_TKIP) { ret = -EINVAL; goto out; } key.type = DOT11_PRIV_TKIP; key.length = KEY_SIZE_TKIP; break; default: return -EINVAL; } if (key.length) { memset(key.key, 0, sizeof(key.key)); memcpy(key.key, ext->key, ext->key_len); ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx, &key); if (ret < 0) goto out; } } /* Read the flags */ if (encoding->flags & IW_ENCODE_DISABLED) { /* Encoding disabled, * authen = DOT11_AUTH_OS; * invoke = 0; * exunencrypt = 0; */ } if (encoding->flags & IW_ENCODE_OPEN) { /* Encode but accept non-encoded packets. No auth */ invoke = 1; } if (encoding->flags & IW_ENCODE_RESTRICTED) { /* Refuse non-encoded packets. Auth */ authen = DOT11_AUTH_BOTH; invoke = 1; exunencrypt = 1; } /* do the change if requested */ if (encoding->flags & IW_ENCODE_MODE) { ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke); ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt); } out: return ret; } static int prism54_get_encodeext(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { islpci_private *priv = netdev_priv(ndev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, max_key_len; union oid_res_t r; int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0; int ret = 0; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; /* first get the flags */ ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); authen = r.u; ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); invoke = r.u; ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); exunencrypt = r.u; if (ret < 0) goto out; max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) return -EINVAL; idx = (encoding->flags & IW_ENCODE_INDEX) - 1; if (idx) { if (idx < 0 || idx > 3) return -EINVAL; } else { ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); if (ret < 0) goto out; idx = r.u; } encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); switch (authen) { case DOT11_AUTH_BOTH: case DOT11_AUTH_SK: wrqu->encoding.flags |= IW_ENCODE_RESTRICTED; case DOT11_AUTH_OS: default: wrqu->encoding.flags |= IW_ENCODE_OPEN; break; } down_write(&priv->mib_sem); wpa = priv->wpa; up_write(&priv->mib_sem); if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) { /* No encryption */ ext->alg = IW_ENCODE_ALG_NONE; ext->key_len = 0; wrqu->encoding.flags |= IW_ENCODE_DISABLED; } else { struct obj_key *key; ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r); if (ret < 0) goto out; key = r.ptr; if (max_key_len < key->length) { ret = -E2BIG; goto out; } memcpy(ext->key, key->key, key->length); ext->key_len = key->length; switch (key->type) { case DOT11_PRIV_TKIP: ext->alg = IW_ENCODE_ALG_TKIP; break; default: case DOT11_PRIV_WEP: ext->alg = IW_ENCODE_ALG_WEP; break; } wrqu->encoding.flags |= IW_ENCODE_ENABLED; } out: return ret; } static int prism54_reset(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_reset(netdev_priv(ndev), 0); return 0; } static int prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { union oid_res_t r; int rvalue; enum oid_num_t n = dwrq->flags; rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r); dwrq->length = mgt_response_to_str(n, &r, extra); if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32) kfree(r.ptr); return rvalue; } static int prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { u32 oid = uwrq[0], u = uwrq[1]; return mgt_set_request(netdev_priv(ndev), oid, 0, &u); } static int prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { u32 oid = dwrq->flags; return mgt_set_request(netdev_priv(ndev), oid, 0, extra); } void prism54_acl_init(struct islpci_acl *acl) { mutex_init(&acl->lock); INIT_LIST_HEAD(&acl->mac_list); acl->size = 0; acl->policy = MAC_POLICY_OPEN; } static void prism54_clear_mac(struct islpci_acl *acl) { struct list_head *ptr, *next; struct mac_entry *entry; mutex_lock(&acl->lock); if (acl->size == 0) { mutex_unlock(&acl->lock); return; } for (ptr = acl->mac_list.next, next = ptr->next; ptr != &acl->mac_list; ptr = next, next = ptr->next) { entry = list_entry(ptr, struct mac_entry, _list); list_del(ptr); kfree(entry); } acl->size = 0; mutex_unlock(&acl->lock); } void prism54_acl_clean(struct islpci_acl *acl) { prism54_clear_mac(acl); } static int prism54_add_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; struct sockaddr *addr = (struct sockaddr *) extra; if (addr->sa_family != ARPHRD_ETHER) return -EOPNOTSUPP; entry = kmalloc(sizeof (struct mac_entry), GFP_KERNEL); if (entry == NULL) return -ENOMEM; memcpy(entry->addr, addr->sa_data, ETH_ALEN); if (mutex_lock_interruptible(&acl->lock)) { kfree(entry); return -ERESTARTSYS; } list_add_tail(&entry->_list, &acl->mac_list); acl->size++; mutex_unlock(&acl->lock); return 0; } static int prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; struct sockaddr *addr = (struct sockaddr *) extra; if (addr->sa_family != ARPHRD_ETHER) return -EOPNOTSUPP; if (mutex_lock_interruptible(&acl->lock)) return -ERESTARTSYS; list_for_each_entry(entry, &acl->mac_list, _list) { if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) { list_del(&entry->_list); acl->size--; kfree(entry); mutex_unlock(&acl->lock); return 0; } } mutex_unlock(&acl->lock); return -EINVAL; } static int prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; struct sockaddr *dst = (struct sockaddr *) extra; dwrq->length = 0; if (mutex_lock_interruptible(&acl->lock)) return -ERESTARTSYS; list_for_each_entry(entry, &acl->mac_list, _list) { memcpy(dst->sa_data, entry->addr, ETH_ALEN); dst->sa_family = ARPHRD_ETHER; dwrq->length++; dst++; } mutex_unlock(&acl->lock); return 0; } /* Setting policy also clears the MAC acl, even if we don't change the default * policy */ static int prism54_set_policy(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; u32 mlmeautolevel; prism54_clear_mac(acl); if ((*uwrq < MAC_POLICY_OPEN) || (*uwrq > MAC_POLICY_REJECT)) return -EINVAL; down_write(&priv->mib_sem); acl->policy = *uwrq; /* the ACL code needs an intermediate mlmeautolevel */ if ((priv->iw_mode == IW_MODE_MASTER) && (acl->policy != MAC_POLICY_OPEN)) mlmeautolevel = DOT11_MLME_INTERMEDIATE; else mlmeautolevel = CARD_DEFAULT_MLME_MODE; if (priv->wpa) mlmeautolevel = DOT11_MLME_EXTENDED; mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel); /* restart the card with our new policy */ if (mgt_commit(priv)) { up_write(&priv->mib_sem); return -EIO; } up_write(&priv->mib_sem); return 0; } static int prism54_get_policy(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; *uwrq = acl->policy; return 0; } /* Return 1 only if client should be accepted. */ static int prism54_mac_accept(struct islpci_acl *acl, char *mac) { struct mac_entry *entry; int res = 0; if (mutex_lock_interruptible(&acl->lock)) return -ERESTARTSYS; if (acl->policy == MAC_POLICY_OPEN) { mutex_unlock(&acl->lock); return 1; } list_for_each_entry(entry, &acl->mac_list, _list) { if (memcmp(entry->addr, mac, ETH_ALEN) == 0) { res = 1; break; } } res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res; mutex_unlock(&acl->lock); return res; } static int prism54_kick_all(struct net_device *ndev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct obj_mlme *mlme; int rvalue; mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL); if (mlme == NULL) return -ENOMEM; /* Tell the card to kick every client */ mlme->id = 0; rvalue = mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme); kfree(mlme); return rvalue; } static int prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { struct obj_mlme *mlme; struct sockaddr *addr = (struct sockaddr *) extra; int rvalue; if (addr->sa_family != ARPHRD_ETHER) return -EOPNOTSUPP; mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL); if (mlme == NULL) return -ENOMEM; /* Tell the card to only kick the corresponding bastard */ memcpy(mlme->address, addr->sa_data, ETH_ALEN); mlme->id = -1; rvalue = mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme); kfree(mlme); return rvalue; } /* Translate a TRAP oid into a wireless event. Called in islpci_mgt_receive. */ static void format_event(islpci_private *priv, char *dest, const char *str, const struct obj_mlme *mlme, u16 *length, int error) { int n = snprintf(dest, IW_CUSTOM_MAX, "%s %s %pM %s (%2.2X)", str, ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"), mlme->address, (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") : ""), mlme->code); BUG_ON(n > IW_CUSTOM_MAX); *length = n; } static void send_formatted_event(islpci_private *priv, const char *str, const struct obj_mlme *mlme, int error) { union iwreq_data wrqu; char *memptr; memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); if (!memptr) return; wrqu.data.pointer = memptr; wrqu.data.length = 0; format_event(priv, memptr, str, mlme, &wrqu.data.length, error); wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr); kfree(memptr); } static void send_simple_event(islpci_private *priv, const char *str) { union iwreq_data wrqu; char *memptr; int n = strlen(str); memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); if (!memptr) return; BUG_ON(n >= IW_CUSTOM_MAX); wrqu.data.pointer = memptr; wrqu.data.length = n; strcpy(memptr, str); wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr); kfree(memptr); } static void link_changed(struct net_device *ndev, u32 bitrate) { islpci_private *priv = netdev_priv(ndev); if (bitrate) { netif_carrier_on(ndev); if (priv->iw_mode == IW_MODE_INFRA) { union iwreq_data uwrq; prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq, NULL); wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL); } else send_simple_event(netdev_priv(ndev), "Link established"); } else { netif_carrier_off(ndev); send_simple_event(netdev_priv(ndev), "Link lost"); } } /* Beacon/ProbeResp payload header */ struct ieee80211_beacon_phdr { u8 timestamp[8]; u16 beacon_int; u16 capab_info; } __packed; #define WLAN_EID_GENERIC 0xdd static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; if (wpa_ie_len > MAX_WPA_IE_LEN) wpa_ie_len = MAX_WPA_IE_LEN; mutex_lock(&priv->wpa_lock); /* try to use existing entry */ list_for_each(ptr, &priv->bss_wpa_list) { bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) { list_move(&bss->list, &priv->bss_wpa_list); break; } bss = NULL; } if (bss == NULL) { /* add a new BSS entry; if max number of entries is already * reached, replace the least recently updated */ if (priv->num_bss_wpa >= MAX_BSS_WPA_IE_COUNT) { bss = list_entry(priv->bss_wpa_list.prev, struct islpci_bss_wpa_ie, list); list_del(&bss->list); } else { bss = kzalloc(sizeof (*bss), GFP_ATOMIC); if (bss != NULL) priv->num_bss_wpa++; } if (bss != NULL) { memcpy(bss->bssid, bssid, ETH_ALEN); list_add(&bss->list, &priv->bss_wpa_list); } } if (bss != NULL) { memcpy(bss->wpa_ie, wpa_ie, wpa_ie_len); bss->wpa_ie_len = wpa_ie_len; bss->last_update = jiffies; } else { printk(KERN_DEBUG "Failed to add BSS WPA entry for " "%pM\n", bssid); } /* expire old entries from WPA list */ while (priv->num_bss_wpa > 0) { bss = list_entry(priv->bss_wpa_list.prev, struct islpci_bss_wpa_ie, list); if (!time_after(jiffies, bss->last_update + 60 * HZ)) break; list_del(&bss->list); priv->num_bss_wpa--; kfree(bss); } mutex_unlock(&priv->wpa_lock); } static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; size_t len = 0; mutex_lock(&priv->wpa_lock); list_for_each(ptr, &priv->bss_wpa_list) { bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) break; bss = NULL; } if (bss) { len = bss->wpa_ie_len; memcpy(wpa_ie, bss->wpa_ie, len); } mutex_unlock(&priv->wpa_lock); return len; } void prism54_wpa_bss_ie_init(islpci_private *priv) { INIT_LIST_HEAD(&priv->bss_wpa_list); mutex_init(&priv->wpa_lock); } void prism54_wpa_bss_ie_clean(islpci_private *priv) { struct islpci_bss_wpa_ie *bss, *n; list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) { kfree(bss); } } static void prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, u8 *payload, size_t len) { struct ieee80211_beacon_phdr *hdr; u8 *pos, *end; if (!priv->wpa) return; hdr = (struct ieee80211_beacon_phdr *) payload; pos = (u8 *) (hdr + 1); end = payload + len; while (pos < end) { if (pos + 2 + pos[1] > end) { printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed " "for %pM\n", addr); return; } if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && memcmp(pos + 2, wpa_oid, 4) == 0) { prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2); return; } pos += 2 + pos[1]; } } static void handle_request(islpci_private *priv, struct obj_mlme *mlme, enum oid_num_t oid) { if (((mlme->state == DOT11_STATE_AUTHING) || (mlme->state == DOT11_STATE_ASSOCING)) && mgt_mlme_answer(priv)) { /* Someone is requesting auth and we must respond. Just send back * the trap with error code set accordingly. */ mlme->code = prism54_mac_accept(&priv->acl, mlme->address) ? 0 : 1; mgt_set_request(priv, oid, 0, mlme); } } static int prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, char *data) { struct obj_mlme *mlme = (struct obj_mlme *) data; struct obj_mlmeex *mlmeex = (struct obj_mlmeex *) data; struct obj_mlmeex *confirm; u8 wpa_ie[MAX_WPA_IE_LEN]; int wpa_ie_len; size_t len = 0; /* u16, better? */ u8 *payload = NULL, *pos = NULL; int ret; /* I think all trapable objects are listed here. * Some oids have a EX version. The difference is that they are emitted * in DOT11_MLME_EXTENDED mode (set with DOT11_OID_MLMEAUTOLEVEL) * with more info. * The few events already defined by the wireless tools are not really * suited. We use the more flexible custom event facility. */ if (oid >= DOT11_OID_BEACON) { len = mlmeex->size; payload = pos = mlmeex->data; } /* I fear prism54_process_bss_data won't work with big endian data */ if ((oid == DOT11_OID_BEACON) || (oid == DOT11_OID_PROBE)) prism54_process_bss_data(priv, oid, mlmeex->address, payload, len); mgt_le_to_cpu(isl_oid[oid].flags & OID_FLAG_TYPE, (void *) mlme); switch (oid) { case GEN_OID_LINKSTATE: link_changed(priv->ndev, (u32) *data); break; case DOT11_OID_MICFAILURE: send_simple_event(priv, "Mic failure"); break; case DOT11_OID_DEAUTHENTICATE: send_formatted_event(priv, "DeAuthenticate request", mlme, 0); break; case DOT11_OID_AUTHENTICATE: handle_request(priv, mlme, oid); send_formatted_event(priv, "Authenticate request", mlme, 1); break; case DOT11_OID_DISASSOCIATE: send_formatted_event(priv, "Disassociate request", mlme, 0); break; case DOT11_OID_ASSOCIATE: handle_request(priv, mlme, oid); send_formatted_event(priv, "Associate request", mlme, 1); break; case DOT11_OID_REASSOCIATE: handle_request(priv, mlme, oid); send_formatted_event(priv, "ReAssociate request", mlme, 1); break; case DOT11_OID_BEACON: send_formatted_event(priv, "Received a beacon from an unknown AP", mlme, 0); break; case DOT11_OID_PROBE: /* we received a probe from a client. */ send_formatted_event(priv, "Received a probe from client", mlme, 0); break; /* Note : "mlme" is actually a "struct obj_mlmeex *" here, but this * is backward compatible layout-wise with "struct obj_mlme". */ case DOT11_OID_DEAUTHENTICATEEX: send_formatted_event(priv, "DeAuthenticate request", mlme, 0); break; case DOT11_OID_AUTHENTICATEEX: handle_request(priv, mlme, oid); send_formatted_event(priv, "Authenticate request (ex)", mlme, 1); if (priv->iw_mode != IW_MODE_MASTER && mlmeex->state != DOT11_STATE_AUTHING) break; confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC); if (!confirm) break; memcpy(&confirm->address, mlmeex->address, ETH_ALEN); printk(KERN_DEBUG "Authenticate from: address:\t%pM\n", mlmeex->address); confirm->id = -1; /* or mlmeex->id ? */ confirm->state = 0; /* not used */ confirm->code = 0; confirm->size = 6; confirm->data[0] = 0x00; confirm->data[1] = 0x00; confirm->data[2] = 0x02; confirm->data[3] = 0x00; confirm->data[4] = 0x00; confirm->data[5] = 0x00; ret = mgt_set_varlen(priv, DOT11_OID_ASSOCIATEEX, confirm, 6); kfree(confirm); if (ret) return ret; break; case DOT11_OID_DISASSOCIATEEX: send_formatted_event(priv, "Disassociate request (ex)", mlme, 0); break; case DOT11_OID_ASSOCIATEEX: handle_request(priv, mlme, oid); send_formatted_event(priv, "Associate request (ex)", mlme, 1); if (priv->iw_mode != IW_MODE_MASTER && mlmeex->state != DOT11_STATE_ASSOCING) break; confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); if (!confirm) break; memcpy(&confirm->address, mlmeex->address, ETH_ALEN); confirm->id = ((struct obj_mlmeex *)mlme)->id; confirm->state = 0; /* not used */ confirm->code = 0; wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n", mlmeex->address); kfree(confirm); break; } confirm->size = wpa_ie_len; memcpy(&confirm->data, wpa_ie, wpa_ie_len); mgt_set_varlen(priv, oid, confirm, wpa_ie_len); kfree(confirm); break; case DOT11_OID_REASSOCIATEEX: handle_request(priv, mlme, oid); send_formatted_event(priv, "Reassociate request (ex)", mlme, 1); if (priv->iw_mode != IW_MODE_MASTER && mlmeex->state != DOT11_STATE_ASSOCING) break; confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); if (!confirm) break; memcpy(&confirm->address, mlmeex->address, ETH_ALEN); confirm->id = mlmeex->id; confirm->state = 0; /* not used */ confirm->code = 0; wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n", mlmeex->address); kfree(confirm); break; } confirm->size = wpa_ie_len; memcpy(&confirm->data, wpa_ie, wpa_ie_len); mgt_set_varlen(priv, oid, confirm, wpa_ie_len); kfree(confirm); break; default: return -EINVAL; } return 0; } /* * Process a device trap. This is called via schedule_work(), outside of * interrupt context, no locks held. */ void prism54_process_trap(struct work_struct *work) { struct islpci_mgmtframe *frame = container_of(work, struct islpci_mgmtframe, ws); struct net_device *ndev = frame->ndev; enum oid_num_t n = mgt_oidtonum(frame->header->oid); if (n != OID_NUM_LAST) prism54_process_trap_helper(netdev_priv(ndev), n, frame->data); islpci_mgt_release(frame); } int prism54_set_mac_address(struct net_device *ndev, void *addr) { islpci_private *priv = netdev_priv(ndev); int ret; if (ndev->addr_len != 6) return -EINVAL; ret = mgt_set_request(priv, GEN_OID_MACADDRESS, 0, &((struct sockaddr *) addr)->sa_data); if (!ret) memcpy(priv->ndev->dev_addr, &((struct sockaddr *) addr)->sa_data, 6); return ret; } #define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 static int prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 mlme, authen, dot1x, filter, wep; if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; wep = 1; /* For privacy invoked */ filter = 1; /* Filter out all unencrypted frames */ dot1x = 0x01; /* To enable eap filter */ mlme = DOT11_MLME_EXTENDED; authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ down_write(&priv->mib_sem); priv->wpa = *uwrq; switch (priv->wpa) { default: case 0: /* Clears/disables WPA and friends */ wep = 0; filter = 0; /* Do not filter un-encrypted data */ dot1x = 0; mlme = DOT11_MLME_AUTO; printk("%s: Disabling WPA\n", ndev->name); break; case 2: case 1: /* WPA */ printk("%s: Enabling WPA\n", ndev->name); break; } up_write(&priv->mib_sem); mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &wep); mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &filter); mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x); mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlme); return 0; } static int prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); *uwrq = priv->wpa; return 0; } static int prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); priv->monitor_type = (*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211); if (priv->iw_mode == IW_MODE_MONITOR) priv->ndev->type = priv->monitor_type; return 0; } static int prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); *uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM); return 0; } static int prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); priv->priv_oid = *uwrq; printk("%s: oid 0x%08X\n", ndev->name, *uwrq); return 0; } static int prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_mgmtframe *response; int ret = -EIO; printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid); data->length = 0; if (islpci_get_state(priv) >= PRV_STATE_INIT) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET, priv->priv_oid, extra, 256, &response); printk("%s: ret: %i\n", ndev->name, ret); if (ret || !response || response->header->operation == PIMFOR_OP_ERROR) { if (response) { islpci_mgt_release(response); } printk("%s: EIO\n", ndev->name); ret = -EIO; } if (!ret) { data->length = response->header->length; memcpy(extra, response->data, data->length); islpci_mgt_release(response); printk("%s: len: %i\n", ndev->name, data->length); } } return ret; } static int prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info, struct iw_point *data, char *extra) { islpci_private *priv = netdev_priv(ndev); struct islpci_mgmtframe *response; int ret = 0, response_op = PIMFOR_OP_ERROR; printk("%s: set_oid 0x%08X\tlen: %d\n", ndev->name, priv->priv_oid, data->length); if (islpci_get_state(priv) >= PRV_STATE_INIT) { ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, priv->priv_oid, extra, data->length, &response); printk("%s: ret: %i\n", ndev->name, ret); if (ret || !response || response->header->operation == PIMFOR_OP_ERROR) { if (response) { islpci_mgt_release(response); } printk("%s: EIO\n", ndev->name); ret = -EIO; } if (!ret) { response_op = response->header->operation; printk("%s: response_op: %i\n", ndev->name, response_op); islpci_mgt_release(response); } } return (ret ? ret : -EINPROGRESS); } static int prism54_set_spy(struct net_device *ndev, struct iw_request_info *info, union iwreq_data *uwrq, char *extra) { islpci_private *priv = netdev_priv(ndev); u32 u; enum oid_num_t oid = OID_INL_CONFIG; down_write(&priv->mib_sem); mgt_get(priv, OID_INL_CONFIG, &u); if ((uwrq->data.length == 0) && (priv->spy_data.spy_number > 0)) /* disable spy */ u &= ~INL_CONFIG_RXANNEX; else if ((uwrq->data.length > 0) && (priv->spy_data.spy_number == 0)) /* enable spy */ u |= INL_CONFIG_RXANNEX; mgt_set(priv, OID_INL_CONFIG, &u); mgt_commit_list(priv, &oid, 1); up_write(&priv->mib_sem); return iw_handler_set_spy(ndev, info, uwrq, extra); } static const iw_handler prism54_handler[] = { (iw_handler) prism54_commit, /* SIOCSIWCOMMIT */ (iw_handler) prism54_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) prism54_set_freq, /* SIOCSIWFREQ */ (iw_handler) prism54_get_freq, /* SIOCGIWFREQ */ (iw_handler) prism54_set_mode, /* SIOCSIWMODE */ (iw_handler) prism54_get_mode, /* SIOCGIWMODE */ (iw_handler) prism54_set_sens, /* SIOCSIWSENS */ (iw_handler) prism54_get_sens, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) prism54_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ prism54_set_spy, /* SIOCSIWSPY */ iw_handler_get_spy, /* SIOCGIWSPY */ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ (iw_handler) prism54_set_wap, /* SIOCSIWAP */ (iw_handler) prism54_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */ (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ (iw_handler) prism54_get_essid, /* SIOCGIWESSID */ (iw_handler) prism54_set_nick, /* SIOCSIWNICKN */ (iw_handler) prism54_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) prism54_set_rate, /* SIOCSIWRATE */ (iw_handler) prism54_get_rate, /* SIOCGIWRATE */ (iw_handler) prism54_set_rts, /* SIOCSIWRTS */ (iw_handler) prism54_get_rts, /* SIOCGIWRTS */ (iw_handler) prism54_set_frag, /* SIOCSIWFRAG */ (iw_handler) prism54_get_frag, /* SIOCGIWFRAG */ (iw_handler) prism54_set_txpower, /* SIOCSIWTXPOW */ (iw_handler) prism54_get_txpower, /* SIOCGIWTXPOW */ (iw_handler) prism54_set_retry, /* SIOCSIWRETRY */ (iw_handler) prism54_get_retry, /* SIOCGIWRETRY */ (iw_handler) prism54_set_encode, /* SIOCSIWENCODE */ (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ (iw_handler) NULL, /* SIOCSIWPOWER */ (iw_handler) NULL, /* SIOCGIWPOWER */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */ (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */ (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */ (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */ (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */ (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ }; /* The low order bit identify a SET (0) or a GET (1) ioctl. */ #define PRISM54_RESET SIOCIWFIRSTPRIV #define PRISM54_GET_POLICY SIOCIWFIRSTPRIV+1 #define PRISM54_SET_POLICY SIOCIWFIRSTPRIV+2 #define PRISM54_GET_MAC SIOCIWFIRSTPRIV+3 #define PRISM54_ADD_MAC SIOCIWFIRSTPRIV+4 #define PRISM54_DEL_MAC SIOCIWFIRSTPRIV+6 #define PRISM54_KICK_MAC SIOCIWFIRSTPRIV+8 #define PRISM54_KICK_ALL SIOCIWFIRSTPRIV+10 #define PRISM54_GET_WPA SIOCIWFIRSTPRIV+11 #define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 #define PRISM54_DBG_OID SIOCIWFIRSTPRIV+14 #define PRISM54_DBG_GET_OID SIOCIWFIRSTPRIV+15 #define PRISM54_DBG_SET_OID SIOCIWFIRSTPRIV+16 #define PRISM54_GET_OID SIOCIWFIRSTPRIV+17 #define PRISM54_SET_OID_U32 SIOCIWFIRSTPRIV+18 #define PRISM54_SET_OID_STR SIOCIWFIRSTPRIV+20 #define PRISM54_SET_OID_ADDR SIOCIWFIRSTPRIV+22 #define PRISM54_GET_PRISMHDR SIOCIWFIRSTPRIV+23 #define PRISM54_SET_PRISMHDR SIOCIWFIRSTPRIV+24 #define IWPRIV_SET_U32(n,x) { n, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } #define IWPRIV_SET_SSID(n,x) { n, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } #define IWPRIV_SET_ADDR(n,x) { n, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } #define IWPRIV_GET(n,x) { n, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, "g_"x } #define IWPRIV_U32(n,x) IWPRIV_SET_U32(n,x), IWPRIV_GET(n,x) #define IWPRIV_SSID(n,x) IWPRIV_SET_SSID(n,x), IWPRIV_GET(n,x) #define IWPRIV_ADDR(n,x) IWPRIV_SET_ADDR(n,x), IWPRIV_GET(n,x) /* Note : limited to 128 private ioctls (wireless tools 26) */ static const struct iw_priv_args prism54_private_args[] = { /*{ cmd, set_args, get_args, name } */ {PRISM54_RESET, 0, 0, "reset"}, {PRISM54_GET_PRISMHDR, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_prismhdr"}, {PRISM54_SET_PRISMHDR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_prismhdr"}, {PRISM54_GET_POLICY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getPolicy"}, {PRISM54_SET_POLICY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setPolicy"}, {PRISM54_GET_MAC, 0, IW_PRIV_TYPE_ADDR | 64, "getMac"}, {PRISM54_ADD_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addMac"}, {PRISM54_DEL_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delMac"}, {PRISM54_KICK_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickMac"}, {PRISM54_KICK_ALL, 0, 0, "kickAll"}, {PRISM54_GET_WPA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wpa"}, {PRISM54_SET_WPA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_wpa"}, {PRISM54_DBG_OID, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_oid"}, {PRISM54_DBG_GET_OID, 0, IW_PRIV_TYPE_BYTE | 256, "dbg_get_oid"}, {PRISM54_DBG_SET_OID, IW_PRIV_TYPE_BYTE | 256, 0, "dbg_set_oid"}, /* --- sub-ioctls handlers --- */ {PRISM54_GET_OID, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, ""}, {PRISM54_SET_OID_U32, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, ""}, {PRISM54_SET_OID_STR, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, ""}, {PRISM54_SET_OID_ADDR, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, ""}, /* --- sub-ioctls definitions --- */ IWPRIV_ADDR(GEN_OID_MACADDRESS, "addr"), IWPRIV_GET(GEN_OID_LINKSTATE, "linkstate"), IWPRIV_U32(DOT11_OID_BSSTYPE, "bsstype"), IWPRIV_ADDR(DOT11_OID_BSSID, "bssid"), IWPRIV_U32(DOT11_OID_STATE, "state"), IWPRIV_U32(DOT11_OID_AID, "aid"), IWPRIV_SSID(DOT11_OID_SSIDOVERRIDE, "ssidoverride"), IWPRIV_U32(DOT11_OID_MEDIUMLIMIT, "medlimit"), IWPRIV_U32(DOT11_OID_BEACONPERIOD, "beacon"), IWPRIV_U32(DOT11_OID_DTIMPERIOD, "dtimperiod"), IWPRIV_U32(DOT11_OID_AUTHENABLE, "authenable"), IWPRIV_U32(DOT11_OID_PRIVACYINVOKED, "privinvok"), IWPRIV_U32(DOT11_OID_EXUNENCRYPTED, "exunencrypt"), IWPRIV_U32(DOT11_OID_REKEYTHRESHOLD, "rekeythresh"), IWPRIV_U32(DOT11_OID_MAXTXLIFETIME, "maxtxlife"), IWPRIV_U32(DOT11_OID_MAXRXLIFETIME, "maxrxlife"), IWPRIV_U32(DOT11_OID_ALOFT_FIXEDRATE, "fixedrate"), IWPRIV_U32(DOT11_OID_MAXFRAMEBURST, "frameburst"), IWPRIV_U32(DOT11_OID_PSM, "psm"), IWPRIV_U32(DOT11_OID_BRIDGELOCAL, "bridge"), IWPRIV_U32(DOT11_OID_CLIENTS, "clients"), IWPRIV_U32(DOT11_OID_CLIENTSASSOCIATED, "clientassoc"), IWPRIV_U32(DOT11_OID_DOT1XENABLE, "dot1xenable"), IWPRIV_U32(DOT11_OID_ANTENNARX, "rxant"), IWPRIV_U32(DOT11_OID_ANTENNATX, "txant"), IWPRIV_U32(DOT11_OID_ANTENNADIVERSITY, "antdivers"), IWPRIV_U32(DOT11_OID_EDTHRESHOLD, "edthresh"), IWPRIV_U32(DOT11_OID_PREAMBLESETTINGS, "preamble"), IWPRIV_GET(DOT11_OID_RATES, "rates"), IWPRIV_U32(DOT11_OID_OUTPUTPOWER, ".11outpower"), IWPRIV_GET(DOT11_OID_SUPPORTEDRATES, "supprates"), IWPRIV_GET(DOT11_OID_SUPPORTEDFREQUENCIES, "suppfreq"), IWPRIV_U32(DOT11_OID_NOISEFLOOR, "noisefloor"), IWPRIV_GET(DOT11_OID_FREQUENCYACTIVITY, "freqactivity"), IWPRIV_U32(DOT11_OID_NONERPPROTECTION, "nonerpprotec"), IWPRIV_U32(DOT11_OID_PROFILES, "profile"), IWPRIV_GET(DOT11_OID_EXTENDEDRATES, "extrates"), IWPRIV_U32(DOT11_OID_MLMEAUTOLEVEL, "mlmelevel"), IWPRIV_GET(DOT11_OID_BSSS, "bsss"), IWPRIV_GET(DOT11_OID_BSSLIST, "bsslist"), IWPRIV_U32(OID_INL_MODE, "mode"), IWPRIV_U32(OID_INL_CONFIG, "config"), IWPRIV_U32(OID_INL_DOT11D_CONFORMANCE, ".11dconform"), IWPRIV_GET(OID_INL_PHYCAPABILITIES, "phycapa"), IWPRIV_U32(OID_INL_OUTPUTPOWER, "outpower"), }; static const iw_handler prism54_private_handler[] = { (iw_handler) prism54_reset, (iw_handler) prism54_get_policy, (iw_handler) prism54_set_policy, (iw_handler) prism54_get_mac, (iw_handler) prism54_add_mac, (iw_handler) NULL, (iw_handler) prism54_del_mac, (iw_handler) NULL, (iw_handler) prism54_kick_mac, (iw_handler) NULL, (iw_handler) prism54_kick_all, (iw_handler) prism54_get_wpa, (iw_handler) prism54_set_wpa, (iw_handler) NULL, (iw_handler) prism54_debug_oid, (iw_handler) prism54_debug_get_oid, (iw_handler) prism54_debug_set_oid, (iw_handler) prism54_get_oid, (iw_handler) prism54_set_u32, (iw_handler) NULL, (iw_handler) prism54_set_raw, (iw_handler) NULL, (iw_handler) prism54_set_raw, (iw_handler) prism54_get_prismhdr, (iw_handler) prism54_set_prismhdr, }; const struct iw_handler_def prism54_handler_def = { .num_standard = ARRAY_SIZE(prism54_handler), .num_private = ARRAY_SIZE(prism54_private_handler), .num_private_args = ARRAY_SIZE(prism54_private_args), .standard = (iw_handler *) prism54_handler, .private = (iw_handler *) prism54_private_handler, .private_args = (struct iw_priv_args *) prism54_private_args, .get_wireless_stats = prism54_get_wireless_stats, };
gpl-2.0
qltsar/g620s
drivers/media/i2c/sr030pc30.c
7236
22130
/* * Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP * * Copyright (C) 2010 Samsung Electronics Co., Ltd * Author: Sylwester Nawrocki, s.nawrocki@samsung.com * * Based on original driver authored by Dongsoo Nathaniel Kim * and HeungJun Kim <riverful.kim@samsung.com>. * * Based on mt9v011 Micron Digital Image Sensor driver * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> #include <media/sr030pc30.h> static int debug; module_param(debug, int, 0644); #define MODULE_NAME "SR030PC30" /* * Register offsets within a page * b15..b8 - page id, b7..b0 - register address */ #define POWER_CTRL_REG 0x0001 #define PAGEMODE_REG 0x03 #define DEVICE_ID_REG 0x0004 #define NOON010PC30_ID 0x86 #define SR030PC30_ID 0x8C #define VDO_CTL1_REG 0x0010 #define SUBSAMPL_NONE_VGA 0 #define SUBSAMPL_QVGA 0x10 #define SUBSAMPL_QQVGA 0x20 #define VDO_CTL2_REG 0x0011 #define SYNC_CTL_REG 0x0012 #define WIN_ROWH_REG 0x0020 #define WIN_ROWL_REG 0x0021 #define WIN_COLH_REG 0x0022 #define WIN_COLL_REG 0x0023 #define WIN_HEIGHTH_REG 0x0024 #define WIN_HEIGHTL_REG 0x0025 #define WIN_WIDTHH_REG 0x0026 #define WIN_WIDTHL_REG 0x0027 #define HBLANKH_REG 0x0040 #define HBLANKL_REG 0x0041 #define VSYNCH_REG 0x0042 #define VSYNCL_REG 0x0043 /* page 10 */ #define ISP_CTL_REG(n) (0x1010 + (n)) #define YOFS_REG 0x1040 #define DARK_YOFS_REG 0x1041 #define AG_ABRTH_REG 0x1050 #define SAT_CTL_REG 0x1060 #define BSAT_REG 0x1061 #define RSAT_REG 0x1062 #define AG_SAT_TH_REG 0x1063 /* page 11 */ #define ZLPF_CTRL_REG 0x1110 #define ZLPF_CTRL2_REG 0x1112 #define ZLPF_AGH_THR_REG 0x1121 #define ZLPF_THR_REG 0x1160 #define ZLPF_DYN_THR_REG 0x1160 /* page 12 */ #define YCLPF_CTL1_REG 0x1240 #define YCLPF_CTL2_REG 0x1241 #define YCLPF_THR_REG 0x1250 #define BLPF_CTL_REG 0x1270 #define BLPF_THR1_REG 0x1274 #define BLPF_THR2_REG 0x1275 /* page 14 - Lens Shading Compensation */ #define LENS_CTRL_REG 0x1410 #define LENS_XCEN_REG 0x1420 #define LENS_YCEN_REG 0x1421 #define LENS_R_COMP_REG 0x1422 #define LENS_G_COMP_REG 0x1423 #define LENS_B_COMP_REG 0x1424 /* page 15 - Color correction */ #define CMC_CTL_REG 0x1510 #define CMC_OFSGH_REG 0x1514 #define CMC_OFSGL_REG 0x1516 #define CMC_SIGN_REG 0x1517 /* Color correction coefficients */ #define CMC_COEF_REG(n) (0x1530 + (n)) /* Color correction offset coefficients */ #define CMC_OFS_REG(n) (0x1540 + (n)) /* page 16 - Gamma correction */ #define GMA_CTL_REG 0x1610 /* Gamma correction coefficients 0.14 */ #define GMA_COEF_REG(n) (0x1630 + (n)) /* page 20 - Auto Exposure */ #define AE_CTL1_REG 0x2010 #define AE_CTL2_REG 0x2011 #define AE_FRM_CTL_REG 0x2020 #define AE_FINE_CTL_REG(n) (0x2028 + (n)) #define EXP_TIMEH_REG 0x2083 #define EXP_TIMEM_REG 0x2084 #define EXP_TIMEL_REG 0x2085 #define EXP_MMINH_REG 0x2086 #define EXP_MMINL_REG 0x2087 #define EXP_MMAXH_REG 0x2088 #define EXP_MMAXM_REG 0x2089 #define EXP_MMAXL_REG 0x208A /* page 22 - Auto White Balance */ #define AWB_CTL1_REG 0x2210 #define AWB_ENABLE 0x80 #define AWB_CTL2_REG 0x2211 #define MWB_ENABLE 0x01 /* RGB gain control (manual WB) when AWB_CTL1[7]=0 */ #define AWB_RGAIN_REG 0x2280 #define AWB_GGAIN_REG 0x2281 #define AWB_BGAIN_REG 0x2282 #define AWB_RMAX_REG 0x2283 #define AWB_RMIN_REG 0x2284 #define AWB_BMAX_REG 0x2285 #define AWB_BMIN_REG 0x2286 /* R, B gain range in bright light conditions */ #define AWB_RMAXB_REG 0x2287 #define AWB_RMINB_REG 0x2288 #define AWB_BMAXB_REG 0x2289 #define AWB_BMINB_REG 0x228A /* manual white balance, when AWB_CTL2[0]=1 */ #define MWB_RGAIN_REG 0x22B2 #define MWB_BGAIN_REG 0x22B3 /* the token to mark an array end */ #define REG_TERM 0xFFFF /* Minimum and maximum exposure time in ms */ #define EXPOS_MIN_MS 1 #define EXPOS_MAX_MS 125 struct sr030pc30_info { struct v4l2_subdev sd; const struct sr030pc30_platform_data *pdata; const struct sr030pc30_format *curr_fmt; const struct sr030pc30_frmsize *curr_win; unsigned int auto_wb:1; unsigned int auto_exp:1; unsigned int hflip:1; unsigned int vflip:1; unsigned int sleep:1; unsigned int exposure; u8 blue_balance; u8 red_balance; u8 i2c_reg_page; }; struct sr030pc30_format { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; u16 ispctl1_reg; }; struct sr030pc30_frmsize { u16 width; u16 height; int vid_ctl1; }; struct i2c_regval { u16 addr; u16 val; }; static const struct v4l2_queryctrl sr030pc30_ctrl[] = { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto White Balance", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 64, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 64, }, { .id = V4L2_CID_EXPOSURE_AUTO, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Auto Exposure", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = EXPOS_MIN_MS, .maximum = EXPOS_MAX_MS, .step = 1, .default_value = 1, }, { } }; /* supported resolutions */ static const struct sr030pc30_frmsize sr030pc30_sizes[] = { { .width = 640, .height = 480, .vid_ctl1 = SUBSAMPL_NONE_VGA, }, { .width = 320, .height = 240, .vid_ctl1 = SUBSAMPL_QVGA, }, { .width = 160, .height = 120, .vid_ctl1 = SUBSAMPL_QQVGA, }, }; /* supported pixel formats */ static const struct sr030pc30_format sr030pc30_formats[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x03, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x02, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x01, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x40, }, }; static const struct i2c_regval sr030pc30_base_regs[] = { /* Window size and position within pixel matrix */ { WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 }, { WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 }, { WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 }, { WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 }, { HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 }, { VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 }, { SYNC_CTL_REG, 0 }, /* Color corection and saturation */ { ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 }, { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 }, { AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 }, { CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C }, { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F }, { CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 }, { CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 }, { CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 }, { CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 }, { CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f }, { CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 }, { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 }, { CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 }, { CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 }, /* Color corection coefficients */ { GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 }, { GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 }, { GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D }, { GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E }, { GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF }, { GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA }, { GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC }, { GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF }, /* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */ { ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E }, { ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F }, { ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 }, { YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 }, { BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 }, { BLPF_THR2_REG, 0x04 }, /* Automatic white balance */ { AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 }, { AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B }, { AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 }, { AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 }, { AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 }, /* Auto exposure */ { AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 }, { AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F }, { AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 }, /* Lens shading compensation */ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 }, { LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 }, { LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e }, { REG_TERM, 0 }, }; static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd) { return container_of(sd, struct sr030pc30_info, sd); } static inline int set_i2c_page(struct sr030pc30_info *info, struct i2c_client *client, unsigned int reg) { int ret = 0; u32 page = reg >> 8 & 0xFF; if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) { ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page); if (!ret) info->i2c_reg_page = page; } return ret; } static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF); return ret; } static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_write_byte_data( client, reg_addr & 0xFF, val); return ret; } static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd, const struct i2c_regval *msg) { while (msg->addr != REG_TERM) { int ret = cam_i2c_write(sd, msg->addr, msg->val); if (ret) return ret; msg++; } return 0; } /* Device reset and sleep mode control */ static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep) { struct sr030pc30_info *info = to_sr030pc30(sd); u8 reg = sleep ? 0xF1 : 0xF0; int ret = 0; if (reset) ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02); if (!ret) { ret = cam_i2c_write(sd, POWER_CTRL_REG, reg); if (!ret) { info->sleep = sleep; if (reset) info->i2c_reg_page = -1; } } return ret; } static inline int sr030pc30_enable_autoexposure(struct v4l2_subdev *sd, int on) { struct sr030pc30_info *info = to_sr030pc30(sd); /* auto anti-flicker is also enabled here */ int ret = cam_i2c_write(sd, AE_CTL1_REG, on ? 0xDC : 0x0C); if (!ret) info->auto_exp = on; return ret; } static int sr030pc30_set_exposure(struct v4l2_subdev *sd, int value) { struct sr030pc30_info *info = to_sr030pc30(sd); unsigned long expos = value * info->pdata->clk_rate / (8 * 1000); int ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF); if (!ret) { /* Turn off AE */ info->exposure = value; ret = sr030pc30_enable_autoexposure(sd, 0); } return ret; } /* Automatic white balance control */ static int sr030pc30_enable_autowhitebalance(struct v4l2_subdev *sd, int on) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret = cam_i2c_write(sd, AWB_CTL2_REG, on ? 0x2E : 0x2F); if (!ret) ret = cam_i2c_write(sd, AWB_CTL1_REG, on ? 0xFB : 0x7B); if (!ret) info->auto_wb = on; return ret; } static int sr030pc30_set_flip(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); s32 reg = cam_i2c_read(sd, VDO_CTL2_REG); if (reg < 0) return reg; reg &= 0x7C; if (info->hflip) reg |= 0x01; if (info->vflip) reg |= 0x02; return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80); } /* Configure resolution, color format and image flip */ static int sr030pc30_set_params(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!info->curr_win) return -EINVAL; /* Configure the resolution through subsampling */ ret = cam_i2c_write(sd, VDO_CTL1_REG, info->curr_win->vid_ctl1); if (!ret && info->curr_fmt) ret = cam_i2c_write(sd, ISP_CTL_REG(0), info->curr_fmt->ispctl1_reg); if (!ret) ret = sr030pc30_set_flip(sd); return ret; } /* Find nearest matching image pixel size. */ static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf) { unsigned int min_err = ~0; int i = ARRAY_SIZE(sr030pc30_sizes); const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0], *match = NULL; while (i--) { int err = abs(fsize->width - mf->width) + abs(fsize->height - mf->height); if (err < min_err) { min_err = err; match = fsize; } fsize++; } if (match) { mf->width = match->width; mf->height = match->height; return 0; } return -EINVAL; } static int sr030pc30_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++) if (qc->id == sr030pc30_ctrl[i].id) { *qc = sr030pc30_ctrl[i]; v4l2_dbg(1, debug, sd, "%s id: %d\n", __func__, qc->id); return 0; } return -EINVAL; } static inline int sr030pc30_set_bluebalance(struct v4l2_subdev *sd, int value) { int ret = cam_i2c_write(sd, MWB_BGAIN_REG, value); if (!ret) to_sr030pc30(sd)->blue_balance = value; return ret; } static inline int sr030pc30_set_redbalance(struct v4l2_subdev *sd, int value) { int ret = cam_i2c_write(sd, MWB_RGAIN_REG, value); if (!ret) to_sr030pc30(sd)->red_balance = value; return ret; } static int sr030pc30_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++) if (ctrl->id == sr030pc30_ctrl[i].id) break; if (i == ARRAY_SIZE(sr030pc30_ctrl)) return -EINVAL; if (ctrl->value < sr030pc30_ctrl[i].minimum || ctrl->value > sr030pc30_ctrl[i].maximum) return -ERANGE; v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n", __func__, ctrl->id, ctrl->value); switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: sr030pc30_enable_autowhitebalance(sd, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: ret = sr030pc30_set_bluebalance(sd, ctrl->value); break; case V4L2_CID_RED_BALANCE: ret = sr030pc30_set_redbalance(sd, ctrl->value); break; case V4L2_CID_EXPOSURE_AUTO: sr030pc30_enable_autoexposure(sd, ctrl->value == V4L2_EXPOSURE_AUTO); break; case V4L2_CID_EXPOSURE: ret = sr030pc30_set_exposure(sd, ctrl->value); break; default: return -EINVAL; } return ret; } static int sr030pc30_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct sr030pc30_info *info = to_sr030pc30(sd); v4l2_dbg(1, debug, sd, "%s: id: %d\n", __func__, ctrl->id); switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: ctrl->value = info->auto_wb; break; case V4L2_CID_BLUE_BALANCE: ctrl->value = info->blue_balance; break; case V4L2_CID_RED_BALANCE: ctrl->value = info->red_balance; break; case V4L2_CID_EXPOSURE_AUTO: ctrl->value = info->auto_exp; break; case V4L2_CID_EXPOSURE: ctrl->value = info->exposure; break; default: return -EINVAL; } return 0; } static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (!code || index >= ARRAY_SIZE(sr030pc30_formats)) return -EINVAL; *code = sr030pc30_formats[index].code; return 0; } static int sr030pc30_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!mf) return -EINVAL; if (!info->curr_win || !info->curr_fmt) { ret = sr030pc30_set_params(sd); if (ret) return ret; } mf->width = info->curr_win->width; mf->height = info->curr_win->height; mf->code = info->curr_fmt->code; mf->colorspace = info->curr_fmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } /* Return nearest media bus frame format. */ static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { int i = ARRAY_SIZE(sr030pc30_formats); sr030pc30_try_frame_size(mf); while (i--) if (mf->code == sr030pc30_formats[i].code) break; mf->code = sr030pc30_formats[i].code; return &sr030pc30_formats[i]; } /* Return nearest media bus frame format. */ static int sr030pc30_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { if (!sd || !mf) return -EINVAL; try_fmt(sd, mf); return 0; } static int sr030pc30_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); if (!sd || !mf) return -EINVAL; info->curr_fmt = try_fmt(sd, mf); return sr030pc30_set_params(sd); } static int sr030pc30_base_config(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; unsigned long expmin, expmax; ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs); if (!ret) { info->curr_fmt = &sr030pc30_formats[0]; info->curr_win = &sr030pc30_sizes[0]; ret = sr030pc30_set_params(sd); } if (!ret) ret = sr030pc30_pwr_ctrl(sd, false, false); if (!ret && !info->pdata) return ret; expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000); expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000); v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__, expmin, expmax); /* Setting up manual exposure time range */ ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF); return ret; } static int sr030pc30_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); const struct sr030pc30_platform_data *pdata = info->pdata; int ret; if (pdata == NULL) { WARN(1, "No platform data!\n"); return -EINVAL; } /* * Put sensor into power sleep mode before switching off * power and disabling MCLK. */ if (!on) sr030pc30_pwr_ctrl(sd, false, true); /* set_power controls sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, on); if (ret) return ret; } if (on) { ret = sr030pc30_base_config(sd); } else { ret = 0; info->curr_win = NULL; info->curr_fmt = NULL; } return ret; } static const struct v4l2_subdev_core_ops sr030pc30_core_ops = { .s_power = sr030pc30_s_power, .queryctrl = sr030pc30_queryctrl, .s_ctrl = sr030pc30_s_ctrl, .g_ctrl = sr030pc30_g_ctrl, }; static const struct v4l2_subdev_video_ops sr030pc30_video_ops = { .g_mbus_fmt = sr030pc30_g_fmt, .s_mbus_fmt = sr030pc30_s_fmt, .try_mbus_fmt = sr030pc30_try_fmt, .enum_mbus_fmt = sr030pc30_enum_fmt, }; static const struct v4l2_subdev_ops sr030pc30_ops = { .core = &sr030pc30_core_ops, .video = &sr030pc30_video_ops, }; /* * Detect sensor type. Return 0 if SR030PC30 was detected * or -ENODEV otherwise. */ static int sr030pc30_detect(struct i2c_client *client) { const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; /* Enable sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, 1); if (ret) return ret; } ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG); if (pdata->set_power) pdata->set_power(&client->dev, 0); if (ret < 0) { dev_err(&client->dev, "%s: I2C read failed\n", __func__); return ret; } return ret == SR030PC30_ID ? 0 : -ENODEV; } static int sr030pc30_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sr030pc30_info *info; struct v4l2_subdev *sd; const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; if (!pdata) { dev_err(&client->dev, "No platform data!"); return -EIO; } ret = sr030pc30_detect(client); if (ret) return ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; sd = &info->sd; strcpy(sd->name, MODULE_NAME); info->pdata = client->dev.platform_data; v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops); info->i2c_reg_page = -1; info->hflip = 1; info->auto_exp = 1; info->exposure = 30; return 0; } static int sr030pc30_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct sr030pc30_info *info = to_sr030pc30(sd); v4l2_device_unregister_subdev(sd); kfree(info); return 0; } static const struct i2c_device_id sr030pc30_id[] = { { MODULE_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sr030pc30_id); static struct i2c_driver sr030pc30_i2c_driver = { .driver = { .name = MODULE_NAME }, .probe = sr030pc30_probe, .remove = sr030pc30_remove, .id_table = sr030pc30_id, }; module_i2c_driver(sr030pc30_i2c_driver); MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
tvall43/linux-stable
drivers/staging/crystalhd/crystalhd_hw.c
7236
64667
/*************************************************************************** * Copyright (c) 2005-2009, Broadcom Corporation. * * Name: crystalhd_hw . c * * Description: * BCM70010 Linux driver HW layer. * ********************************************************************** * This file is part of the crystalhd device driver. * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver. If not, see <http://www.gnu.org/licenses/>. **********************************************************************/ #include "crystalhd.h" #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> /* Functions internal to this file */ static void crystalhd_enable_uarts(struct crystalhd_adp *adp) { bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM); bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER); } static void crystalhd_start_dram(struct crystalhd_adp *adp) { bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) | /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */ ((15 / 5 - 1) << 7) | /* trp */ ((10 / 5 - 1) << 10) | /* trrd */ ((15 / 5 + 1) << 12) | /* twr */ ((2 + 1) << 16) | /* twtr */ ((70 / 5 - 2) << 19) | /* trfc */ (0 << 23)); bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0); bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2); bc_dec_reg_wr(adp, SDRAM_MODE, 0x132); bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0); bc_dec_reg_wr(adp, SDRAM_REFRESH, 0); bc_dec_reg_wr(adp, SDRAM_REFRESH, 0); bc_dec_reg_wr(adp, SDRAM_MODE, 0x32); /* setting the refresh rate here */ bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96)); } static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp) { union link_misc_perst_deco_ctrl rst_deco_cntrl; union link_misc_perst_clk_ctrl rst_clk_cntrl; uint32_t temp; /* * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit, * delay to allow PLL to lock Clear alternate clock, stop clock bits */ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); rst_clk_cntrl.pll_pwr_dn = 0; crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); msleep_interruptible(50); rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); rst_clk_cntrl.stop_core_clk = 0; rst_clk_cntrl.sel_alt_clk = 0; crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); msleep_interruptible(50); /* * Bus Arbiter Timeout: GISB_ARBITER_TIMER * Set internal bus arbiter timeout to 40us based on core clock speed * (63MHz * 40us = 0x9D8) */ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8); /* * Decoder clocks: MISC_PERST_DECODER_CTRL * Enable clocks while 7412 reset is asserted, delay * De-assert 7412 reset */ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); rst_deco_cntrl.stop_bcm_7412_clk = 0; rst_deco_cntrl.bcm7412_rst = 1; crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); msleep_interruptible(10); rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); rst_deco_cntrl.bcm7412_rst = 0; crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); msleep_interruptible(50); /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */ crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0); /* Clear bit 29 of 0x404 */ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION); temp &= ~BC_BIT(29); crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp); /* 2.5V regulator must be set to 2.6 volts (+6%) */ /* FIXME: jarod: what's the point of this reg read? */ temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL); crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3); return true; } static bool crystalhd_put_in_reset(struct crystalhd_adp *adp) { union link_misc_perst_deco_ctrl rst_deco_cntrl; union link_misc_perst_clk_ctrl rst_clk_cntrl; uint32_t temp; /* * Decoder clocks: MISC_PERST_DECODER_CTRL * Assert 7412 reset, delay * Assert 7412 stop clock */ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); rst_deco_cntrl.stop_bcm_7412_clk = 1; crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); msleep_interruptible(50); /* Bus Arbiter Timeout: GISB_ARBITER_TIMER * Set internal bus arbiter timeout to 40us based on core clock speed * (6.75MHZ * 40us = 0x10E) */ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E); /* Link clocks: MISC_PERST_CLOCK_CTRL * Stop core clk, delay * Set alternate clk, delay, set PLL power down */ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); rst_clk_cntrl.stop_core_clk = 1; rst_clk_cntrl.sel_alt_clk = 1; crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); msleep_interruptible(50); rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); rst_clk_cntrl.pll_pwr_dn = 1; crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); /* * Read and restore the Transaction Configuration Register * after core reset */ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION); /* * Link core soft reset: MISC3_RESET_CTRL * - Write BIT[0]=1 and read it back for core reset to take place */ crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1); rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL); msleep_interruptible(50); /* restore the transaction configuration register */ crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp); return true; } static void crystalhd_disable_interrupts(struct crystalhd_adp *adp) { union intr_mask_reg intr_mask; intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG); intr_mask.mask_pcie_err = 1; intr_mask.mask_pcie_rbusmast_err = 1; intr_mask.mask_pcie_rgr_bridge = 1; intr_mask.mask_rx_done = 1; intr_mask.mask_rx_err = 1; intr_mask.mask_tx_done = 1; intr_mask.mask_tx_err = 1; crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg); return; } static void crystalhd_enable_interrupts(struct crystalhd_adp *adp) { union intr_mask_reg intr_mask; intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG); intr_mask.mask_pcie_err = 1; intr_mask.mask_pcie_rbusmast_err = 1; intr_mask.mask_pcie_rgr_bridge = 1; intr_mask.mask_rx_done = 1; intr_mask.mask_rx_err = 1; intr_mask.mask_tx_done = 1; intr_mask.mask_tx_err = 1; crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg); return; } static void crystalhd_clear_errors(struct crystalhd_adp *adp) { uint32_t reg; /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */ reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS); if (reg) crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg); reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS); if (reg) crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg); reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS); if (reg) crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg); } static void crystalhd_clear_interrupts(struct crystalhd_adp *adp) { uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS); if (intr_sts) { crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts); /* Write End Of Interrupt for PCIE */ crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1); } } static void crystalhd_soft_rst(struct crystalhd_adp *adp) { uint32_t val; /* Assert c011 soft reset*/ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001); msleep_interruptible(50); /* Release c011 soft reset*/ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000); /* Disable Stuffing..*/ val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL); val |= BC_BIT(8); crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val); } static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp) { uint32_t i = 0, reg; crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19)); crystalhd_reg_wr(adp, AES_CMD, 0); crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF)); crystalhd_reg_wr(adp, AES_CMD, 0x1); /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */ for (i = 0; i < 100; ++i) { reg = crystalhd_reg_rd(adp, AES_STATUS); if (reg & 0x1) return true; msleep_interruptible(10); } return false; } static bool crystalhd_start_device(struct crystalhd_adp *adp) { uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0; BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n"); reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL); reg_pwrmgmt &= ~ASPM_L1_ENABLE; crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt); if (!crystalhd_bring_out_of_rst(adp)) { BCMLOG_ERR("Failed To Bring Link Out Of Reset\n"); return false; } crystalhd_disable_interrupts(adp); crystalhd_clear_errors(adp); crystalhd_clear_interrupts(adp); crystalhd_enable_interrupts(adp); /* Enable the option for getting the total no. of DWORDS * that have been transferred by the RXDMA engine */ dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG); dbg_options |= 0x10; crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options); /* Enable PCI Global Control options */ glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL); glb_cntrl |= 0x100; glb_cntrl |= 0x8000; crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl); crystalhd_enable_interrupts(adp); crystalhd_soft_rst(adp); crystalhd_start_dram(adp); crystalhd_enable_uarts(adp); return true; } static bool crystalhd_stop_device(struct crystalhd_adp *adp) { uint32_t reg; BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n"); /* Clear and disable interrupts */ crystalhd_disable_interrupts(adp); crystalhd_clear_errors(adp); crystalhd_clear_interrupts(adp); if (!crystalhd_put_in_reset(adp)) BCMLOG_ERR("Failed to Put Link To Reset State\n"); reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL); reg |= ASPM_L1_ENABLE; crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg); /* Set PCI Clk Req */ reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG); reg |= PCI_CLK_REQ_ENABLE; crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg); return true; } static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw) { unsigned long flags = 0; struct crystalhd_rx_dma_pkt *temp = NULL; if (!hw) return NULL; spin_lock_irqsave(&hw->lock, flags); temp = hw->rx_pkt_pool_head; if (temp) { hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next; temp->dio_req = NULL; temp->pkt_tag = 0; temp->flags = 0; } spin_unlock_irqrestore(&hw->lock, flags); return temp; } static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *pkt) { unsigned long flags = 0; if (!hw || !pkt) return; spin_lock_irqsave(&hw->lock, flags); pkt->next = hw->rx_pkt_pool_head; hw->rx_pkt_pool_head = pkt; spin_unlock_irqrestore(&hw->lock, flags); } /* * Call back from TX - IOQ deletion. * * This routine will release the TX DMA rings allocated * druing setup_dma rings interface. * * Memory is allocated per DMA ring basis. This is just * a place holder to be able to create the dio queues. */ static void crystalhd_tx_desc_rel_call_back(void *context, void *data) { } /* * Rx Packet release callback.. * * Release All user mapped capture buffers and Our DMA packets * back to our free pool. The actual cleanup of the DMA * ring descriptors happen during dma ring release. */ static void crystalhd_rx_pkt_rel_call_back(void *context, void *data) { struct crystalhd_hw *hw = (struct crystalhd_hw *)context; struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data; if (!pkt || !hw) { BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt); return; } if (pkt->dio_req) crystalhd_unmap_dio(hw->adp, pkt->dio_req); else BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag); crystalhd_hw_free_rx_pkt(hw, pkt); } #define crystalhd_hw_delete_ioq(adp, q) \ if (q) { \ crystalhd_delete_dioq(adp, q); \ q = NULL; \ } static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw) { if (!hw) return; BCMLOG(BCMLOG_DBG, "Deleting IOQs\n"); crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq); crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq); crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq); crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq); crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq); } #define crystalhd_hw_create_ioq(sts, hw, q, cb) \ do { \ sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \ if (sts != BC_STS_SUCCESS) \ goto hw_create_ioq_err; \ } while (0) /* * Create IOQs.. * * TX - Active & Free * RX - Active, Ready and Free. */ static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw) { enum BC_STATUS sts = BC_STS_SUCCESS; if (!hw) { BCMLOG_ERR("Invalid Arg!!\n"); return BC_STS_INV_ARG; } crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq, crystalhd_tx_desc_rel_call_back); crystalhd_hw_create_ioq(sts, hw, hw->tx_actq, crystalhd_tx_desc_rel_call_back); crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq, crystalhd_rx_pkt_rel_call_back); crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq, crystalhd_rx_pkt_rel_call_back); crystalhd_hw_create_ioq(sts, hw, hw->rx_actq, crystalhd_rx_pkt_rel_call_back); return sts; hw_create_ioq_err: crystalhd_hw_delete_ioqs(hw); return sts; } static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz, bool b_188_byte_pkts, uint8_t flags) { uint32_t base, end, writep, readp; uint32_t cpbSize, cpbFullness, fifoSize; if (flags & 0x02) { /* ASF Bit is set */ base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base); end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End); writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr); readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr); } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/ base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base); end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End); writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr); readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr); } else { base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase); end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd); writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr); readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr); } cpbSize = end - base; if (writep >= readp) cpbFullness = writep - readp; else cpbFullness = (end - base) - (readp - writep); fifoSize = cpbSize - cpbFullness; if (fifoSize < BC_INFIFO_THRESHOLD) return true; if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD)) return true; return false; } static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw, uint32_t list_id, enum BC_STATUS cs) { struct tx_dma_pkt *tx_req; if (!hw || !list_id) { BCMLOG_ERR("Invalid Arg..\n"); return BC_STS_INV_ARG; } hw->pwr_lock--; tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id); if (!tx_req) { if (cs != BC_STS_IO_USER_ABORT) BCMLOG_ERR("Find and Fetch Did not find req\n"); return BC_STS_NO_DATA; } if (tx_req->call_back) { tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs); tx_req->dio_req = NULL; tx_req->cb_event = NULL; tx_req->call_back = NULL; } else { BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n", tx_req->list_tag); } /* Now put back the tx_list back in FreeQ */ tx_req->list_tag = 0; return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0); } static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts) { uint32_t err_mask, tmp; unsigned long flags = 0; err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK | MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK | MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK; if (!(err_sts & err_mask)) return false; BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts); tmp = err_mask; if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK) tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK; if (tmp) { spin_lock_irqsave(&hw->lock, flags); /* reset list index.*/ hw->tx_list_post_index = 0; spin_unlock_irqrestore(&hw->lock, flags); } tmp = err_sts & err_mask; crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp); return true; } static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts) { uint32_t err_mask, tmp; unsigned long flags = 0; err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK | MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK | MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK; if (!(err_sts & err_mask)) return false; BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts); tmp = err_mask; if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK) tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK; if (tmp) { spin_lock_irqsave(&hw->lock, flags); /* reset list index.*/ hw->tx_list_post_index = 0; spin_unlock_irqrestore(&hw->lock, flags); } tmp = err_sts & err_mask; crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp); return true; } static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts) { uint32_t err_sts; if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK) crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0, BC_STS_SUCCESS); if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK) crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1, BC_STS_SUCCESS); if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK | INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) { /* No error mask set.. */ return; } /* Handle Tx errors. */ err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS); if (crystalhd_tx_list0_handler(hw, err_sts)) crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0, BC_STS_ERROR); if (crystalhd_tx_list1_handler(hw, err_sts)) crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1, BC_STS_ERROR); hw->stats.tx_errors++; } static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc, uint32_t ul_desc_index, uint32_t cnt) { uint32_t ix, ll = 0; if (!p_dma_desc || !cnt) return; /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than * setting ll (log level, I presume) to non-zero? */ if (!ll) return; for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) { BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n", ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"), ul_desc_index, p_dma_desc[ul_desc_index].buff_addr_high, p_dma_desc[ul_desc_index].buff_addr_low, p_dma_desc[ul_desc_index].next_desc_addr_high, p_dma_desc[ul_desc_index].next_desc_addr_low, p_dma_desc[ul_desc_index].xfer_size, p_dma_desc[ul_desc_index].intr_enable, p_dma_desc[ul_desc_index].last_rec_indicator); } } static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq, struct dma_descriptor *desc, dma_addr_t desc_paddr_base, uint32_t sg_cnt, uint32_t sg_st_ix, uint32_t sg_st_off, uint32_t xfr_sz) { uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0; dma_addr_t desc_phy_addr = desc_paddr_base; union addr_64 addr_temp; if (!ioreq || !desc || !desc_paddr_base || !xfr_sz || (!sg_cnt && !ioreq->uinfo.dir_tx)) { BCMLOG_ERR("Invalid Args\n"); return BC_STS_INV_ARG; } for (ix = 0; ix < sg_cnt; ix++) { /* Setup SGLE index. */ sg_ix = ix + sg_st_ix; /* Get SGLE length */ len = crystalhd_get_sgle_len(ioreq, sg_ix); if (len % 4) { BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt); return BC_STS_NOT_IMPL; } /* Setup DMA desc with Phy addr & Length at current index. */ addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix); if (sg_ix == sg_st_ix) { addr_temp.full_addr += sg_st_off; len -= sg_st_off; } memset(&desc[ix], 0, sizeof(desc[ix])); desc[ix].buff_addr_low = addr_temp.low_part; desc[ix].buff_addr_high = addr_temp.high_part; desc[ix].dma_dir = ioreq->uinfo.dir_tx; /* Chain DMA descriptor. */ addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor); desc[ix].next_desc_addr_low = addr_temp.low_part; desc[ix].next_desc_addr_high = addr_temp.high_part; if ((count + len) > xfr_sz) len = xfr_sz - count; /* Debug.. */ if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) { BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n", len, ix, count, xfr_sz, sg_cnt); return BC_STS_ERROR; } /* Length expects Multiple of 4 */ desc[ix].xfer_size = (len / 4); crystalhd_hw_dump_desc(desc, ix, 1); count += len; desc_phy_addr += sizeof(struct dma_descriptor); } last_desc_ix = ix - 1; if (ioreq->fb_size) { memset(&desc[ix], 0, sizeof(desc[ix])); addr_temp.full_addr = ioreq->fb_pa; desc[ix].buff_addr_low = addr_temp.low_part; desc[ix].buff_addr_high = addr_temp.high_part; desc[ix].dma_dir = ioreq->uinfo.dir_tx; desc[ix].xfer_size = 1; desc[ix].fill_bytes = 4 - ioreq->fb_size; count += ioreq->fb_size; last_desc_ix++; } /* setup last descriptor..*/ desc[last_desc_ix].last_rec_indicator = 1; desc[last_desc_ix].next_desc_addr_low = 0; desc[last_desc_ix].next_desc_addr_high = 0; desc[last_desc_ix].intr_enable = 1; crystalhd_hw_dump_desc(desc, last_desc_ix, 1); if (count != xfr_sz) { BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz); return BC_STS_ERROR; } return BC_STS_SUCCESS; } static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq, struct dma_desc_mem *pdesc_mem, uint32_t *uv_desc_index) { struct dma_descriptor *desc = NULL; dma_addr_t desc_paddr_base = 0; uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0; uint32_t xfr_sz = 0; enum BC_STATUS sts = BC_STS_SUCCESS; /* Check params.. */ if (!ioreq || !pdesc_mem || !uv_desc_index) { BCMLOG_ERR("Invalid Args\n"); return BC_STS_INV_ARG; } if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start || !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) { BCMLOG_ERR("Invalid Args\n"); return BC_STS_INV_ARG; } if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) { BCMLOG_ERR("UV offset for TX??\n"); return BC_STS_INV_ARG; } desc = pdesc_mem->pdma_desc_start; desc_paddr_base = pdesc_mem->phy_addr; if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) { sg_cnt = ioreq->sg_cnt; xfr_sz = ioreq->uinfo.xfr_len; } else { sg_cnt = ioreq->uinfo.uv_sg_ix + 1; xfr_sz = ioreq->uinfo.uv_offset; } sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt, sg_st_ix, sg_st_off, xfr_sz); if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset) return sts; /* Prepare for UV mapping.. */ desc = &pdesc_mem->pdma_desc_start[sg_cnt]; desc_paddr_base = pdesc_mem->phy_addr + (sg_cnt * sizeof(struct dma_descriptor)); /* Done with desc addr.. now update sg stuff.*/ sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix; xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset; sg_st_ix = ioreq->uinfo.uv_sg_ix; sg_st_off = ioreq->uinfo.uv_sg_off; sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt, sg_st_ix, sg_st_off, xfr_sz); if (sts != BC_STS_SUCCESS) return sts; *uv_desc_index = sg_st_ix; return sts; } static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw) { uint32_t dma_cntrl; dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS); if (!(dma_cntrl & DMA_START_BIT)) { dma_cntrl |= DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } return; } /* _CHECK_THIS_ * * Verify if the Stop generates a completion interrupt or not. * if it does not generate an interrupt, then add polling here. */ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw) { uint32_t dma_cntrl, cnt = 30; uint32_t l1 = 1, l2 = 1; unsigned long flags = 0; dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS); BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n"); if (!(dma_cntrl & DMA_START_BIT)) { BCMLOG(BCMLOG_DBG, "Already Stopped\n"); return BC_STS_SUCCESS; } crystalhd_disable_interrupts(hw->adp); /* Issue stop to HW */ /* This bit when set gave problems. Please check*/ dma_cntrl &= ~DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl); BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n"); /* Poll for 3seconds (30 * 100ms) on both the lists..*/ while ((l1 || l2) && cnt) { if (l1) { l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0); l1 &= DMA_START_BIT; } if (l2) { l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1); l2 &= DMA_START_BIT; } msleep_interruptible(100); cnt--; } if (!cnt) { BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2); crystalhd_enable_interrupts(hw->adp); return BC_STS_ERROR; } spin_lock_irqsave(&hw->lock, flags); hw->tx_list_post_index = 0; spin_unlock_irqrestore(&hw->lock, flags); BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n"); crystalhd_enable_interrupts(hw->adp); return BC_STS_SUCCESS; } static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw) { /* * Position of the PIB Entries can be found at * 0th and the 1st location of the Circular list. */ uint32_t Q_addr; uint32_t pib_cnt, r_offset, w_offset; Q_addr = hw->pib_del_Q_addr; /* Get the Read Pointer */ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); /* Get the Write Pointer */ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); if (r_offset == w_offset) return 0; /* Queue is empty */ if (w_offset > r_offset) pib_cnt = w_offset - r_offset; else pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) - (r_offset + MIN_PIB_Q_DEPTH); if (pib_cnt > MAX_PIB_Q_DEPTH) { BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt); return 0; } return pib_cnt; } static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw) { uint32_t Q_addr; uint32_t addr_entry, r_offset, w_offset; Q_addr = hw->pib_del_Q_addr; /* Get the Read Pointer 0Th Location is Read Pointer */ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); /* Get the Write Pointer 1st Location is Write pointer */ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); /* Queue is empty */ if (r_offset == w_offset) return 0; if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH)) return 0; /* Get the Actual Address of the PIB */ crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)), 1, &addr_entry); /* Increment the Read Pointer */ r_offset++; if (MAX_PIB_Q_DEPTH == r_offset) r_offset = MIN_PIB_Q_DEPTH; /* Write back the read pointer to It's Location */ crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset); return addr_entry; } static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel) { uint32_t Q_addr; uint32_t r_offset, w_offset, n_offset; Q_addr = hw->pib_rel_Q_addr; /* Get the Read Pointer */ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); /* Get the Write Pointer */ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH)) return false; n_offset = w_offset + 1; if (MAX_PIB_Q_DEPTH == n_offset) n_offset = MIN_PIB_Q_DEPTH; if (r_offset == n_offset) return false; /* should never happen */ /* Write the DRAM ADDR to the Queue at Next Offset */ crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)), 1, &addr_to_rel); /* Put the New value of the write pointer in Queue */ crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset); return true; } static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib) { if (!src_pib || !dst_pib) { BCMLOG_ERR("Invalid Arguments\n"); return; } dst_pib->timeStamp = 0; dst_pib->picture_number = src_pib->ppb.picture_number; dst_pib->width = src_pib->ppb.width; dst_pib->height = src_pib->ppb.height; dst_pib->chroma_format = src_pib->ppb.chroma_format; dst_pib->pulldown = src_pib->ppb.pulldown; dst_pib->flags = src_pib->ppb.flags; dst_pib->sess_num = src_pib->ptsStcOffset; dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio; dst_pib->colour_primaries = src_pib->ppb.colour_primaries; dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload; dst_pib->frame_rate = src_pib->resolution ; return; } static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw) { unsigned int cnt; struct c011_pib src_pib; uint32_t pib_addr, pib_cnt; struct BC_PIC_INFO_BLOCK *AppPib; struct crystalhd_rx_dma_pkt *rx_pkt = NULL; pib_cnt = crystalhd_get_pib_avail_cnt(hw); if (!pib_cnt) return; for (cnt = 0; cnt < pib_cnt; cnt++) { pib_addr = crystalhd_get_addr_from_pib_Q(hw); crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4, (uint32_t *)&src_pib); if (src_pib.bFormatChange) { rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq); if (!rx_pkt) return; rx_pkt->flags = 0; rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE; AppPib = &rx_pkt->pib; cpy_pib_to_app(&src_pib, AppPib); BCMLOG(BCMLOG_DBG, "App PIB:%x %x %x %x %x %x %x %x %x %x\n", rx_pkt->pib.picture_number, rx_pkt->pib.aspect_ratio, rx_pkt->pib.chroma_format, rx_pkt->pib.colour_primaries, rx_pkt->pib.frame_rate, rx_pkt->pib.height, rx_pkt->pib.height, rx_pkt->pib.n_drop, rx_pkt->pib.pulldown, rx_pkt->pib.ycom); crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag); } crystalhd_rel_addr_to_pib_Q(hw, pib_addr); } } static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw) { uint32_t dma_cntrl; dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); if (!(dma_cntrl & DMA_START_BIT)) { dma_cntrl |= DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); if (!(dma_cntrl & DMA_START_BIT)) { dma_cntrl |= DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } return; } static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw) { uint32_t dma_cntrl = 0, count = 30; uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1; dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); if ((dma_cntrl & DMA_START_BIT)) { dma_cntrl &= ~DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); if ((dma_cntrl & DMA_START_BIT)) { dma_cntrl &= ~DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } /* Poll for 3seconds (30 * 100ms) on both the lists..*/ while ((l0y || l0uv || l1y || l1uv) && count) { if (l0y) { l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0); l0y &= DMA_START_BIT; if (!l0y) hw->rx_list_sts[0] &= ~rx_waiting_y_intr; } if (l1y) { l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1); l1y &= DMA_START_BIT; if (!l1y) hw->rx_list_sts[1] &= ~rx_waiting_y_intr; } if (l0uv) { l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0); l0uv &= DMA_START_BIT; if (!l0uv) hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; } if (l1uv) { l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1); l1uv &= DMA_START_BIT; if (!l1uv) hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; } msleep_interruptible(100); count--; } hw->rx_list_post_index = 0; BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n", count, hw->rx_list_sts[0], hw->rx_list_sts[1]); } static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt) { uint32_t y_low_addr_reg, y_high_addr_reg; uint32_t uv_low_addr_reg, uv_high_addr_reg; union addr_64 desc_addr; unsigned long flags; if (!hw || !rx_pkt) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } if (hw->rx_list_post_index >= DMA_ENGINE_CNT) { BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index); return BC_STS_INV_ARG; } spin_lock_irqsave(&hw->rx_lock, flags); /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */ if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) { spin_unlock_irqrestore(&hw->rx_lock, flags); return BC_STS_BUSY; } if (!hw->rx_list_post_index) { y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0; y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0; uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0; uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0; } else { y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1; y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1; uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1; uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1; } rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index; hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr; if (rx_pkt->uv_phy_addr) hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr; hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT; spin_unlock_irqrestore(&hw->rx_lock, flags); crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag); crystalhd_start_rx_dma_engine(hw); /* Program the Y descriptor */ desc_addr.full_addr = rx_pkt->desc_mem.phy_addr; crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part); crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01); if (rx_pkt->uv_phy_addr) { /* Program the UV descriptor */ desc_addr.full_addr = rx_pkt->uv_phy_addr; crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part); crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01); } return BC_STS_SUCCESS; } static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt) { enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt); if (sts == BC_STS_BUSY) crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt, false, rx_pkt->pkt_tag); return sts; } static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index, uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz) { uint32_t y_dn_sz_reg, uv_dn_sz_reg; if (!list_index) { y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT; uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT; } else { y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT; uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT; } *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg); *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg); } /* * This function should be called only after making sure that the two DMA * lists are free. This function does not check if DMA's are active, before * turning off the DMA. */ static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw) { uint32_t dma_cntrl, aspm; hw->stop_pending = 0; dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); if (dma_cntrl & DMA_START_BIT) { dma_cntrl &= ~DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); if (dma_cntrl & DMA_START_BIT) { dma_cntrl &= ~DMA_START_BIT; crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); } hw->rx_list_post_index = 0; aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL); aspm |= ASPM_L1_ENABLE; /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm); } static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index, enum BC_STATUS comp_sts) { struct crystalhd_rx_dma_pkt *rx_pkt = NULL; uint32_t y_dw_dnsz, uv_dw_dnsz; enum BC_STATUS sts = BC_STS_SUCCESS; if (!hw || list_index >= DMA_ENGINE_CNT) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq, hw->rx_pkt_tag_seed + list_index); if (!rx_pkt) { BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n", hw->rx_list_post_index, hw->rx_list_sts[0], hw->rx_list_sts[1], list_index, hw->rx_pkt_tag_seed + list_index, comp_sts); return BC_STS_INV_ARG; } if (comp_sts == BC_STS_SUCCESS) { crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz); rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz; rx_pkt->flags = COMP_FLAG_DATA_VALID; if (rx_pkt->uv_phy_addr) rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz; crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true, hw->rx_pkt_tag_seed + list_index); return sts; } /* Check if we can post this DIO again. */ return crystalhd_hw_post_cap_buff(hw, rx_pkt); } static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts) { uint32_t tmp; enum list_sts tmp_lsts; if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK)) return false; tmp_lsts = hw->rx_list_sts[0]; /* Y0 - DMA */ tmp = y_err_sts & GET_Y0_ERR_MSK; if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK) hw->rx_list_sts[0] &= ~rx_waiting_y_intr; if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) { hw->rx_list_sts[0] &= ~rx_waiting_y_intr; tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK; } if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) { hw->rx_list_sts[0] &= ~rx_y_mask; hw->rx_list_sts[0] |= rx_y_error; tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK; } if (tmp) { hw->rx_list_sts[0] &= ~rx_y_mask; hw->rx_list_sts[0] |= rx_y_error; hw->rx_list_post_index = 0; } /* UV0 - DMA */ tmp = uv_err_sts & GET_UV0_ERR_MSK; if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK) hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) { hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK; } if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) { hw->rx_list_sts[0] &= ~rx_uv_mask; hw->rx_list_sts[0] |= rx_uv_error; tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK; } if (tmp) { hw->rx_list_sts[0] &= ~rx_uv_mask; hw->rx_list_sts[0] |= rx_uv_error; hw->rx_list_post_index = 0; } if (y_err_sts & GET_Y0_ERR_MSK) { tmp = y_err_sts & GET_Y0_ERR_MSK; crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp); } if (uv_err_sts & GET_UV0_ERR_MSK) { tmp = uv_err_sts & GET_UV0_ERR_MSK; crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp); } return (tmp_lsts != hw->rx_list_sts[0]); } static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts) { uint32_t tmp; enum list_sts tmp_lsts; if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK)) return false; tmp_lsts = hw->rx_list_sts[1]; /* Y1 - DMA */ tmp = y_err_sts & GET_Y1_ERR_MSK; if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK) hw->rx_list_sts[1] &= ~rx_waiting_y_intr; if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) { hw->rx_list_sts[1] &= ~rx_waiting_y_intr; tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK; } if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) { /* Add retry-support..*/ hw->rx_list_sts[1] &= ~rx_y_mask; hw->rx_list_sts[1] |= rx_y_error; tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK; } if (tmp) { hw->rx_list_sts[1] &= ~rx_y_mask; hw->rx_list_sts[1] |= rx_y_error; hw->rx_list_post_index = 0; } /* UV1 - DMA */ tmp = uv_err_sts & GET_UV1_ERR_MSK; if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) { hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK; } if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) { /* Add retry-support*/ hw->rx_list_sts[1] &= ~rx_uv_mask; hw->rx_list_sts[1] |= rx_uv_error; tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK; } if (tmp) { hw->rx_list_sts[1] &= ~rx_uv_mask; hw->rx_list_sts[1] |= rx_uv_error; hw->rx_list_post_index = 0; } if (y_err_sts & GET_Y1_ERR_MSK) { tmp = y_err_sts & GET_Y1_ERR_MSK; crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp); } if (uv_err_sts & GET_UV1_ERR_MSK) { tmp = uv_err_sts & GET_UV1_ERR_MSK; crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp); } return (tmp_lsts != hw->rx_list_sts[1]); } static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts) { unsigned long flags; uint32_t i, list_avail = 0; enum BC_STATUS comp_sts = BC_STS_NO_DATA; uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0; bool ret = 0; if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return; } if (!(intr_sts & GET_RX_INTR_MASK)) return; y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS); uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS); for (i = 0; i < DMA_ENGINE_CNT; i++) { /* Update States..*/ spin_lock_irqsave(&hw->rx_lock, flags); if (i == 0) ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts); else ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts); if (ret) { switch (hw->rx_list_sts[i]) { case sts_free: comp_sts = BC_STS_SUCCESS; list_avail = 1; break; case rx_y_error: case rx_uv_error: case rx_sts_error: /* We got error on both or Y or uv. */ hw->stats.rx_errors++; crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz); /* FIXME: jarod: this is where my mini pci-e card is tripping up */ BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x " "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n", i, hw->stats.rx_errors, y_err_sts, uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz); hw->rx_list_sts[i] = sts_free; comp_sts = BC_STS_ERROR; break; default: /* Wait for completion..*/ comp_sts = BC_STS_NO_DATA; break; } } spin_unlock_irqrestore(&hw->rx_lock, flags); /* handle completion...*/ if (comp_sts != BC_STS_NO_DATA) { crystalhd_rx_pkt_done(hw, i, comp_sts); comp_sts = BC_STS_NO_DATA; } } if (list_avail) { if (hw->stop_pending) { if ((hw->rx_list_sts[0] == sts_free) && (hw->rx_list_sts[1] == sts_free)) crystalhd_hw_finalize_pause(hw); } else { crystalhd_hw_start_capture(hw); } } } static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw, struct BC_FW_CMD *fw_cmd) { enum BC_STATUS sts = BC_STS_SUCCESS; struct dec_rsp_channel_start_video *st_rsp = NULL; switch (fw_cmd->cmd[0]) { case eCMD_C011_DEC_CHAN_START_VIDEO: st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp; hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ; hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ; BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n", hw->pib_del_Q_addr, hw->pib_rel_Q_addr); break; case eCMD_C011_INIT: if (!(crystalhd_load_firmware_config(hw->adp))) { BCMLOG_ERR("Invalid Params.\n"); sts = BC_STS_FW_AUTH_FAILED; } break; default: break; } return sts; } static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw) { uint32_t reg; union link_misc_perst_decoder_ctrl rst_cntrl_reg; /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */ rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL); rst_cntrl_reg.bcm_7412_rst = 1; crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg); msleep_interruptible(50); rst_cntrl_reg.bcm_7412_rst = 0; crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg); /* Close all banks, put DDR in idle */ bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0); /* Set bit 25 (drop CKE pin of DDR) */ reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM); reg |= 0x02000000; bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg); /* Reset the audio block */ bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1); /* Power down Raptor PLL */ reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl); reg |= 0x00008000; bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg); /* Power down all Audio PLL */ bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1); /* Power down video clock (75MHz) */ reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl); reg |= 0x00008000; bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg); /* Power down video clock (75MHz) */ reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl); reg |= 0x00008000; bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg); /* Power down core clock (200MHz) */ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); reg |= 0x00008000; bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg); /* Power down core clock (200MHz) */ reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl); reg |= 0x00008000; bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg); return BC_STS_SUCCESS; } /************************************************ ** *************************************************/ enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz) { uint32_t reg_data, cnt, *temp_buff; uint32_t fw_sig_len = 36; uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg; if (!adp || !buffer || !sz) { BCMLOG_ERR("Invalid Params.\n"); return BC_STS_INV_ARG; } reg_data = crystalhd_reg_rd(adp, OTP_CMD); if (!(reg_data & 0x02)) { BCMLOG_ERR("Invalid hw config.. otp not programmed\n"); return BC_STS_ERROR; } reg_data = 0; crystalhd_reg_wr(adp, DCI_CMD, 0); reg_data |= BC_BIT(0); crystalhd_reg_wr(adp, DCI_CMD, reg_data); reg_data = 0; cnt = 1000; msleep_interruptible(10); while (reg_data != BC_BIT(4)) { reg_data = crystalhd_reg_rd(adp, DCI_STATUS); reg_data &= BC_BIT(4); if (--cnt == 0) { BCMLOG_ERR("Firmware Download RDY Timeout.\n"); return BC_STS_TIMEOUT; } } msleep_interruptible(10); /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */ crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset); temp_buff = (uint32_t *)buffer; for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) { crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19)); crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff); dram_offset += 4; temp_buff++; } msleep_interruptible(10); temp_buff++; sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7; for (cnt = 0; cnt < 8; cnt++) { uint32_t swapped_data = *temp_buff; swapped_data = bswap_32_1(swapped_data); crystalhd_reg_wr(adp, sig_reg, swapped_data); sig_reg -= 4; temp_buff++; } msleep_interruptible(10); reg_data = 0; reg_data |= BC_BIT(1); crystalhd_reg_wr(adp, DCI_CMD, reg_data); msleep_interruptible(10); reg_data = 0; reg_data = crystalhd_reg_rd(adp, DCI_STATUS); if ((reg_data & BC_BIT(9)) == BC_BIT(9)) { cnt = 1000; while ((reg_data & BC_BIT(0)) != BC_BIT(0)) { reg_data = crystalhd_reg_rd(adp, DCI_STATUS); reg_data &= BC_BIT(0); if (!(--cnt)) break; msleep_interruptible(10); } reg_data = 0; reg_data = crystalhd_reg_rd(adp, DCI_CMD); reg_data |= BC_BIT(4); crystalhd_reg_wr(adp, DCI_CMD, reg_data); } else { BCMLOG_ERR("F/w Signature mismatch\n"); return BC_STS_FW_AUTH_FAILED; } BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n"); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, struct BC_FW_CMD *fw_cmd) { uint32_t cnt = 0, cmd_res_addr; uint32_t *cmd_buff, *res_buff; wait_queue_head_t fw_cmd_event; int rc = 0; enum BC_STATUS sts; crystalhd_create_event(&fw_cmd_event); if (!hw || !fw_cmd) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } cmd_buff = fw_cmd->cmd; res_buff = fw_cmd->rsp; if (!cmd_buff || !res_buff) { BCMLOG_ERR("Invalid Parameters for F/W Command\n"); return BC_STS_INV_ARG; } hw->pwr_lock++; hw->fwcmd_evt_sts = 0; hw->pfw_cmd_event = &fw_cmd_event; /*Write the command to the memory*/ crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff); /*Memory Read for memory arbitrator flush*/ crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt); /* Write the command address to mailbox */ bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd); msleep_interruptible(50); crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0); if (!rc) { sts = BC_STS_SUCCESS; } else if (rc == -EBUSY) { BCMLOG_ERR("Firmware command T/O\n"); sts = BC_STS_TIMEOUT; } else if (rc == -EINTR) { BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n"); sts = BC_STS_IO_USER_ABORT; } else { BCMLOG_ERR("FwCmd IO Error.\n"); sts = BC_STS_IO_ERROR; } if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("FwCmd Failed.\n"); hw->pwr_lock--; return sts; } /*Get the Response Address*/ cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1); /*Read the Response*/ crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff); hw->pwr_lock--; if (res_buff[2] != C011_RET_SUCCESS) { BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n"); return BC_STS_FW_CMD_ERR; } sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd); if (sts != BC_STS_SUCCESS) BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n"); return sts; } bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw) { uint32_t intr_sts = 0; uint32_t deco_intr = 0; bool rc = 0; if (!adp || !hw->dev_started) return rc; hw->stats.num_interrupts++; hw->pwr_lock++; deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts); intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS); if (intr_sts) { /* let system know we processed interrupt..*/ rc = 1; hw->stats.dev_interrupts++; } if (deco_intr && (deco_intr != 0xdeaddead)) { if (deco_intr & 0x80000000) { /*Set the Event and the status flag*/ if (hw->pfw_cmd_event) { hw->fwcmd_evt_sts = 1; crystalhd_set_event(hw->pfw_cmd_event); } } if (deco_intr & BC_BIT(1)) crystalhd_hw_proc_pib(hw); bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr); /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */ bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0); rc = 1; } /* Rx interrupts */ crystalhd_rx_isr(hw, intr_sts); /* Tx interrupts*/ crystalhd_tx_isr(hw, intr_sts); /* Clear interrupts */ if (rc) { if (intr_sts) crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts); crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1); } hw->pwr_lock--; return rc; } enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp) { if (!hw || !adp) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } if (hw->dev_started) return BC_STS_SUCCESS; memset(hw, 0, sizeof(struct crystalhd_hw)); hw->adp = adp; spin_lock_init(&hw->lock); spin_lock_init(&hw->rx_lock); /* FIXME: jarod: what are these magic numbers?!? */ hw->tx_ioq_tag_seed = 0x70023070; hw->rx_pkt_tag_seed = 0x70029070; hw->stop_pending = 0; crystalhd_start_device(hw->adp); hw->dev_started = true; /* set initial core clock */ hw->core_clock_mhz = CLOCK_PRESET; hw->prev_n = 0; hw->pwr_lock = 0; crystalhd_hw_set_core_clock(hw); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw) { if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } if (!hw->dev_started) return BC_STS_SUCCESS; /* Stop and DDR sleep will happen in here */ crystalhd_hw_suspend(hw); hw->dev_started = false; return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw) { unsigned int i; void *mem; size_t mem_len; dma_addr_t phy_addr; enum BC_STATUS sts = BC_STS_SUCCESS; struct crystalhd_rx_dma_pkt *rpkt; if (!hw || !hw->adp) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } sts = crystalhd_hw_create_ioqs(hw); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("Failed to create IOQs..\n"); return sts; } mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor); for (i = 0; i < BC_TX_LIST_CNT; i++) { mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr); if (mem) { memset(mem, 0, mem_len); } else { BCMLOG_ERR("Insufficient Memory For TX\n"); crystalhd_hw_free_dma_rings(hw); return BC_STS_INSUFF_RES; } /* rx_pkt_pool -- static memory allocation */ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem; hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr; hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor); hw->tx_pkt_pool[i].list_tag = 0; /* Add TX dma requests to Free Queue..*/ sts = crystalhd_dioq_add(hw->tx_freeq, &hw->tx_pkt_pool[i], false, 0); if (sts != BC_STS_SUCCESS) { crystalhd_hw_free_dma_rings(hw); return sts; } } for (i = 0; i < BC_RX_LIST_CNT; i++) { rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL); if (!rpkt) { BCMLOG_ERR("Insufficient Memory For RX\n"); crystalhd_hw_free_dma_rings(hw); return BC_STS_INSUFF_RES; } mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr); if (mem) { memset(mem, 0, mem_len); } else { BCMLOG_ERR("Insufficient Memory For RX\n"); crystalhd_hw_free_dma_rings(hw); kfree(rpkt); return BC_STS_INSUFF_RES; } rpkt->desc_mem.pdma_desc_start = mem; rpkt->desc_mem.phy_addr = phy_addr; rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor); rpkt->pkt_tag = hw->rx_pkt_tag_seed + i; crystalhd_hw_free_rx_pkt(hw, rpkt); } return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw) { unsigned int i; struct crystalhd_rx_dma_pkt *rpkt = NULL; if (!hw || !hw->adp) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } /* Delete all IOQs.. */ crystalhd_hw_delete_ioqs(hw); for (i = 0; i < BC_TX_LIST_CNT; i++) { if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) { bc_kern_dma_free(hw->adp, hw->tx_pkt_pool[i].desc_mem.sz, hw->tx_pkt_pool[i].desc_mem.pdma_desc_start, hw->tx_pkt_pool[i].desc_mem.phy_addr); hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL; } } BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n"); do { rpkt = crystalhd_hw_alloc_rx_pkt(hw); if (!rpkt) break; bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz, rpkt->desc_mem.pdma_desc_start, rpkt->desc_mem.phy_addr); kfree(rpkt); } while (rpkt); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq, hw_comp_callback call_back, wait_queue_head_t *cb_event, uint32_t *list_id, uint8_t data_flags) { struct tx_dma_pkt *tx_dma_packet = NULL; uint32_t first_desc_u_addr, first_desc_l_addr; uint32_t low_addr, high_addr; union addr_64 desc_addr; enum BC_STATUS sts, add_sts; uint32_t dummy_index = 0; unsigned long flags; bool rc; if (!hw || !ioreq || !call_back || !cb_event || !list_id) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } /* * Since we hit code in busy condition very frequently, * we will check the code in status first before * checking the availability of free elem. * * This will avoid the Q fetch/add in normal condition. */ rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len, false, data_flags); if (rc) { hw->stats.cin_busy++; return BC_STS_BUSY; } /* Get a list from TxFreeQ */ tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq); if (!tx_dma_packet) { BCMLOG_ERR("No empty elements..\n"); return BC_STS_ERR_USAGE; } sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &tx_dma_packet->desc_mem, &dummy_index); if (sts != BC_STS_SUCCESS) { add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet, false, 0); if (add_sts != BC_STS_SUCCESS) BCMLOG_ERR("double fault..\n"); return sts; } hw->pwr_lock++; desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr; low_addr = desc_addr.low_part; high_addr = desc_addr.high_part; tx_dma_packet->call_back = call_back; tx_dma_packet->cb_event = cb_event; tx_dma_packet->dio_req = ioreq; spin_lock_irqsave(&hw->lock, flags); if (hw->tx_list_post_index == 0) { first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0; first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0; } else { first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1; first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1; } *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed + hw->tx_list_post_index; hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT; spin_unlock_irqrestore(&hw->lock, flags); /* Insert in Active Q..*/ crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false, tx_dma_packet->list_tag); /* * Interrupt will come as soon as you write * the valid bit. So be ready for that. All * the initialization should happen before that. */ crystalhd_start_tx_dma_engine(hw); crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part); crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01); /* Be sure we set the valid bit ^^^^ */ return BC_STS_SUCCESS; } /* * This is a force cancel and we are racing with ISR. * * Will try to remove the req from ActQ before ISR gets it. * If ISR gets it first then the completion happens in the * normal path and we will return _STS_NO_DATA from here. * * FIX_ME: Not Tested the actual condition.. */ enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id) { if (!hw || !list_id) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } crystalhd_stop_tx_dma_engine(hw); crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq, bool en_post) { struct crystalhd_rx_dma_pkt *rpkt; uint32_t tag, uv_desc_ix = 0; enum BC_STATUS sts; if (!hw || !ioreq) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } rpkt = crystalhd_hw_alloc_rx_pkt(hw); if (!rpkt) { BCMLOG_ERR("Insufficient resources\n"); return BC_STS_INSUFF_RES; } rpkt->dio_req = ioreq; tag = rpkt->pkt_tag; sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix); if (sts != BC_STS_SUCCESS) return sts; rpkt->uv_phy_addr = 0; /* Store the address of UV in the rx packet for post*/ if (uv_desc_ix) rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr + (sizeof(struct dma_descriptor) * (uv_desc_ix + 1)); if (en_post) sts = crystalhd_hw_post_cap_buff(hw, rpkt); else sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag); return sts; } enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw, struct BC_PIC_INFO_BLOCK *pib, struct crystalhd_dio_req **ioreq) { struct crystalhd_rx_dma_pkt *rpkt; uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000; uint32_t sig_pending = 0; if (!hw || !ioreq || !pib) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending); if (!rpkt) { if (sig_pending) { BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending); return BC_STS_IO_USER_ABORT; } else { return BC_STS_TIMEOUT; } } rpkt->dio_req->uinfo.comp_flags = rpkt->flags; if (rpkt->flags & COMP_FLAG_PIB_VALID) memcpy(pib, &rpkt->pib, sizeof(*pib)); *ioreq = rpkt->dio_req; crystalhd_hw_free_rx_pkt(hw, rpkt); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw) { struct crystalhd_rx_dma_pkt *rx_pkt; enum BC_STATUS sts; uint32_t i; if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } /* This is start of capture.. Post to both the lists.. */ for (i = 0; i < DMA_ENGINE_CNT; i++) { rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq); if (!rx_pkt) return BC_STS_NO_DATA; sts = crystalhd_hw_post_cap_buff(hw, rx_pkt); if (BC_STS_SUCCESS != sts) break; } return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw) { void *temp = NULL; if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } crystalhd_stop_rx_dma_engine(hw); do { temp = crystalhd_dioq_fetch(hw->rx_freeq); if (temp) crystalhd_rx_pkt_rel_call_back(hw, temp); } while (temp); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw) { hw->stats.pause_cnt++; hw->stop_pending = 1; if ((hw->rx_list_sts[0] == sts_free) && (hw->rx_list_sts[1] == sts_free)) crystalhd_hw_finalize_pause(hw); return BC_STS_SUCCESS; } enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw) { enum BC_STATUS sts; uint32_t aspm; hw->stop_pending = 0; aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL); aspm &= ~ASPM_L1_ENABLE; /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm); sts = crystalhd_hw_start_capture(hw); return sts; } enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw) { enum BC_STATUS sts; if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } sts = crystalhd_put_ddr2sleep(hw); if (sts != BC_STS_SUCCESS) { BCMLOG_ERR("Failed to Put DDR To Sleep!!\n"); return BC_STS_ERROR; } if (!crystalhd_stop_device(hw->adp)) { BCMLOG_ERR("Failed to Stop Device!!\n"); return BC_STS_ERROR; } return BC_STS_SUCCESS; } void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats) { if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return; } /* if called w/NULL stats, its a req to zero out the stats */ if (!stats) { memset(&hw->stats, 0, sizeof(hw->stats)); return; } hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq); hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq); memcpy(stats, &hw->stats, sizeof(*stats)); } enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw) { uint32_t reg, n, i; uint32_t vco_mg, refresh_reg; if (!hw) { BCMLOG_ERR("Invalid Arguments\n"); return BC_STS_INV_ARG; } /* FIXME: jarod: wha? */ /*n = (hw->core_clock_mhz * 3) / 20 + 1; */ n = hw->core_clock_mhz/5; if (n == hw->prev_n) return BC_STS_CLK_NOCHG; if (hw->pwr_lock > 0) { /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */ return BC_STS_CLK_NOCHG; } i = n * 27; if (i < 560) vco_mg = 0; else if (i < 900) vco_mg = 1; else if (i < 1030) vco_mg = 2; else vco_mg = 3; reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); reg &= 0xFFFFCFC0; reg |= n; reg |= vco_mg << 12; BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n", hw->core_clock_mhz, n, vco_mg); /* Change the DRAM refresh rate to accommodate the new frequency */ /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/ refresh_reg = (7 * hw->core_clock_mhz / 16); bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg)); bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg); i = 0; for (i = 0; i < 10; i++) { reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); if (reg & 0x00020000) { hw->prev_n = n; /* FIXME: jarod: outputting a random "C" is... confusing... */ BCMLOG(BCMLOG_INFO, "C"); return BC_STS_SUCCESS; } else { msleep_interruptible(10); } } BCMLOG(BCMLOG_INFO, "clk change failed\n"); return BC_STS_CLK_NOCHG; }
gpl-2.0
Elite-Kernels/elite_shamu
drivers/media/pci/bt8xx/bttv-gpio.c
7748
4924
/* bttv-gpio.c -- gpio sub drivers sysfs-based sub driver interface for bttv mainly intended for gpio access Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/slab.h> #include <asm/io.h> #include "bttvp.h" /* ----------------------------------------------------------------------- */ /* internal: the bttv "bus" */ static int bttv_sub_bus_match(struct device *dev, struct device_driver *drv) { struct bttv_sub_driver *sub = to_bttv_sub_drv(drv); int len = strlen(sub->wanted); if (0 == strncmp(dev_name(dev), sub->wanted, len)) return 1; return 0; } static int bttv_sub_probe(struct device *dev) { struct bttv_sub_device *sdev = to_bttv_sub_dev(dev); struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver); return sub->probe ? sub->probe(sdev) : -ENODEV; } static int bttv_sub_remove(struct device *dev) { struct bttv_sub_device *sdev = to_bttv_sub_dev(dev); struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver); if (sub->remove) sub->remove(sdev); return 0; } struct bus_type bttv_sub_bus_type = { .name = "bttv-sub", .match = &bttv_sub_bus_match, .probe = bttv_sub_probe, .remove = bttv_sub_remove, }; static void release_sub_device(struct device *dev) { struct bttv_sub_device *sub = to_bttv_sub_dev(dev); kfree(sub); } int bttv_sub_add_device(struct bttv_core *core, char *name) { struct bttv_sub_device *sub; int err; sub = kzalloc(sizeof(*sub),GFP_KERNEL); if (NULL == sub) return -ENOMEM; sub->core = core; sub->dev.parent = &core->pci->dev; sub->dev.bus = &bttv_sub_bus_type; sub->dev.release = release_sub_device; dev_set_name(&sub->dev, "%s%d", name, core->nr); err = device_register(&sub->dev); if (0 != err) { kfree(sub); return err; } pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev)); list_add_tail(&sub->list,&core->subs); return 0; } int bttv_sub_del_devices(struct bttv_core *core) { struct bttv_sub_device *sub, *save; list_for_each_entry_safe(sub, save, &core->subs, list) { list_del(&sub->list); device_unregister(&sub->dev); } return 0; } /* ----------------------------------------------------------------------- */ /* external: sub-driver register/unregister */ int bttv_sub_register(struct bttv_sub_driver *sub, char *wanted) { sub->drv.bus = &bttv_sub_bus_type; snprintf(sub->wanted,sizeof(sub->wanted),"%s",wanted); return driver_register(&sub->drv); } EXPORT_SYMBOL(bttv_sub_register); int bttv_sub_unregister(struct bttv_sub_driver *sub) { driver_unregister(&sub->drv); return 0; } EXPORT_SYMBOL(bttv_sub_unregister); /* ----------------------------------------------------------------------- */ /* external: gpio access functions */ void bttv_gpio_inout(struct bttv_core *core, u32 mask, u32 outbits) { struct bttv *btv = container_of(core, struct bttv, c); unsigned long flags; u32 data; spin_lock_irqsave(&btv->gpio_lock,flags); data = btread(BT848_GPIO_OUT_EN); data = data & ~mask; data = data | (mask & outbits); btwrite(data,BT848_GPIO_OUT_EN); spin_unlock_irqrestore(&btv->gpio_lock,flags); } u32 bttv_gpio_read(struct bttv_core *core) { struct bttv *btv = container_of(core, struct bttv, c); u32 value; value = btread(BT848_GPIO_DATA); return value; } void bttv_gpio_write(struct bttv_core *core, u32 value) { struct bttv *btv = container_of(core, struct bttv, c); btwrite(value,BT848_GPIO_DATA); } void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits) { struct bttv *btv = container_of(core, struct bttv, c); unsigned long flags; u32 data; spin_lock_irqsave(&btv->gpio_lock,flags); data = btread(BT848_GPIO_DATA); data = data & ~mask; data = data | (mask & bits); btwrite(data,BT848_GPIO_DATA); spin_unlock_irqrestore(&btv->gpio_lock,flags); } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
sduzz/android_kernel_lge_hammerhead
drivers/media/video/saa7164/saa7164-cards.c
8772
21639
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include "saa7164.h" /* The Bridge API needs to understand register widths (in bytes) for the * attached I2C devices, so we can simplify the virtual i2c mechansms * and keep the -i2c.c implementation clean. */ #define REGLEN_8bit 1 #define REGLEN_16bit 2 struct saa7164_board saa7164_boards[] = { [SAA7164_BOARD_UNKNOWN] = { /* Bridge will not load any firmware, without knowing * the rev this would be fatal. */ .name = "Unknown", }, [SAA7164_BOARD_UNKNOWN_REV2] = { /* Bridge will load the v2 f/w and dump descriptors */ /* Required during new board bringup */ .name = "Generic Rev2", .chiprev = SAA7164_CHIP_REV2, }, [SAA7164_BOARD_UNKNOWN_REV3] = { /* Bridge will load the v2 f/w and dump descriptors */ /* Required during new board bringup */ .name = "Generic Rev3", .chiprev = SAA7164_CHIP_REV3, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_2] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV2, .unit = {{ .id = 0x06, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_3] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV2, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1c, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_4] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x1d, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1b, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1c, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1f, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x22, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x1e, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x20, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x23, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250_2] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x28, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x26, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x29, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2250_3] = { .name = "Hauppauge WinTV-HVR2250", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x26, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x07, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x08, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-1 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x22, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (TOP)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x32 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x27, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "CX24228/S5H1411-2 (QAM)", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x34 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, [SAA7164_BOARD_HAUPPAUGE_HVR2200_5] = { .name = "Hauppauge WinTV-HVR2200", .porta = SAA7164_MPEG_DVB, .portb = SAA7164_MPEG_DVB, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x23, .type = SAA7164_UNIT_EEPROM, .name = "4K EEPROM", .i2c_bus_nr = SAA7164_I2C_BUS_0, .i2c_bus_addr = 0xa0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x04, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x05, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x21, .type = SAA7164_UNIT_TUNER, .name = "TDA18271-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0xc0 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x22, .type = SAA7164_UNIT_ANALOG_DEMODULATOR, .name = "TDA8290-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x84 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x24, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-1", .i2c_bus_nr = SAA7164_I2C_BUS_1, .i2c_bus_addr = 0x10 >> 1, .i2c_reg_len = REGLEN_8bit, }, { .id = 0x25, .type = SAA7164_UNIT_DIGITAL_DEMODULATOR, .name = "TDA10048-2", .i2c_bus_nr = SAA7164_I2C_BUS_2, .i2c_bus_addr = 0x12 >> 1, .i2c_reg_len = REGLEN_8bit, } }, }, }; const unsigned int saa7164_bcount = ARRAY_SIZE(saa7164_boards); /* ------------------------------------------------------------------ */ /* PCI subsystem IDs */ struct saa7164_subid saa7164_subids[] = { { .subvendor = 0x0070, .subdevice = 0x8880, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250, }, { .subvendor = 0x0070, .subdevice = 0x8810, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250, }, { .subvendor = 0x0070, .subdevice = 0x8980, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200, }, { .subvendor = 0x0070, .subdevice = 0x8900, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_2, }, { .subvendor = 0x0070, .subdevice = 0x8901, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_3, }, { .subvendor = 0x0070, .subdevice = 0x88A1, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_3, }, { .subvendor = 0x0070, .subdevice = 0x8891, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2, }, { .subvendor = 0x0070, .subdevice = 0x8851, .card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2, }, { .subvendor = 0x0070, .subdevice = 0x8940, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_4, }, { .subvendor = 0x0070, .subdevice = 0x8953, .card = SAA7164_BOARD_HAUPPAUGE_HVR2200_5, }, }; const unsigned int saa7164_idcount = ARRAY_SIZE(saa7164_subids); void saa7164_card_list(struct saa7164_dev *dev) { int i; if (0 == dev->pci->subsystem_vendor && 0 == dev->pci->subsystem_device) { printk(KERN_ERR "%s: Board has no valid PCIe Subsystem ID and can't\n" "%s: be autodetected. Pass card=<n> insmod option to\n" "%s: workaround that. Send complaints to the vendor\n" "%s: of the TV card. Best regards,\n" "%s: -- tux\n", dev->name, dev->name, dev->name, dev->name, dev->name); } else { printk(KERN_ERR "%s: Your board isn't known (yet) to the driver.\n" "%s: Try to pick one of the existing card configs via\n" "%s: card=<n> insmod option. Updating to the latest\n" "%s: version might help as well.\n", dev->name, dev->name, dev->name, dev->name); } printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod " "option:\n", dev->name); for (i = 0; i < saa7164_bcount; i++) printk(KERN_ERR "%s: card=%d -> %s\n", dev->name, i, saa7164_boards[i].name); } /* TODO: clean this define up into the -cards.c structs */ #define PCIEBRIDGE_UNITID 2 void saa7164_gpio_setup(struct saa7164_dev *dev) { switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: /* GPIO 2: s5h1411 / tda10048-1 demod reset GPIO 3: s5h1411 / tda10048-2 demod reset GPIO 7: IRBlaster Zilog reset */ /* Reset parts by going in and out of reset */ saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 2); saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 3); msleep(20); saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 2); saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 3); break; } } static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data) { struct tveeprom tv; /* TODO: Assumption: eeprom on bus 0 */ tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv, eeprom_data); /* Make sure we support the board model */ switch (tv.model) { case 88001: /* Development board - Limit circulation */ /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */ case 88021: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, MCE CIR, FM */ break; case 88041: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */ break; case 88061: /* WinTV-HVR2250 (PCIe, Retail, full-height bracket) * ATSC/QAM (TDA18271/S5H1411) and basic analog, FM */ break; case 89519: case 89609: /* WinTV-HVR2200 (PCIe, Retail, full-height) * DVB-T (TDA18271/TDA10048) and basic analog, no IR */ break; case 89619: /* WinTV-HVR2200 (PCIe, Retail, half-height) * DVB-T (TDA18271/TDA10048) and basic analog, no IR */ break; default: printk(KERN_ERR "%s: Warning: Unknown Hauppauge model #%d\n", dev->name, tv.model); break; } printk(KERN_INFO "%s: Hauppauge eeprom: model=%d\n", dev->name, tv.model); } void saa7164_card_setup(struct saa7164_dev *dev) { static u8 eeprom[256]; if (dev->i2c_bus[0].i2c_rc == 0) { if (saa7164_api_read_eeprom(dev, &eeprom[0], sizeof(eeprom)) < 0) return; } switch (dev->board) { case SAA7164_BOARD_HAUPPAUGE_HVR2200: case SAA7164_BOARD_HAUPPAUGE_HVR2200_2: case SAA7164_BOARD_HAUPPAUGE_HVR2200_3: case SAA7164_BOARD_HAUPPAUGE_HVR2200_4: case SAA7164_BOARD_HAUPPAUGE_HVR2200_5: case SAA7164_BOARD_HAUPPAUGE_HVR2250: case SAA7164_BOARD_HAUPPAUGE_HVR2250_2: case SAA7164_BOARD_HAUPPAUGE_HVR2250_3: hauppauge_eeprom(dev, &eeprom[0]); break; } } /* With most other drivers, the kernel expects to communicate with subdrivers * through i2c. This bridge does not allow that, it does not expose any direct * access to I2C. Instead we have to communicate through the device f/w for * register access to 'processing units'. Each unit has a unique * id, regardless of how the physical implementation occurs across * the three physical i2c busses. The being said if we want leverge of * the existing kernel drivers for tuners and demods we have to 'speak i2c', * to this bridge implements 3 virtual i2c buses. This is a helper function * for those. * * Description: Translate the kernels notion of an i2c address and bus into * the appropriate unitid. */ int saa7164_i2caddr_to_unitid(struct saa7164_i2c *bus, int addr) { /* For a given bus and i2c device address, return the saa7164 unique * unitid. < 0 on error */ struct saa7164_dev *dev = bus->dev; struct saa7164_unit *unit; int i; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if ((bus->nr == unit->i2c_bus_nr) && (addr == unit->i2c_bus_addr)) return unit->id; } return -1; } /* The 7164 API needs to know the i2c register length in advance. * this is a helper function. Based on a specific chip addr and bus return the * reg length. */ int saa7164_i2caddr_to_reglen(struct saa7164_i2c *bus, int addr) { /* For a given bus and i2c device address, return the * saa7164 registry address width. < 0 on error */ struct saa7164_dev *dev = bus->dev; struct saa7164_unit *unit; int i; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if ((bus->nr == unit->i2c_bus_nr) && (addr == unit->i2c_bus_addr)) return unit->i2c_reg_len; } return -1; } /* TODO: implement a 'findeeprom' functio like the above and fix any other * eeprom related todo's in -api.c. */ /* Translate a unitid into a x readable device name, for display purposes. */ char *saa7164_unitid_name(struct saa7164_dev *dev, u8 unitid) { char *undefed = "UNDEFINED"; char *bridge = "BRIDGE"; struct saa7164_unit *unit; int i; if (unitid == 0) return bridge; for (i = 0; i < SAA7164_MAX_UNITS; i++) { unit = &saa7164_boards[dev->board].unit[i]; if (unit->type == SAA7164_UNIT_UNDEFINED) continue; if (unitid == unit->id) return unit->name; } return undefed; }
gpl-2.0
CyanogenMod/android_kernel_motorola_msm8610
arch/m32r/platforms/mappi3/setup.c
9028
5437
/* * linux/arch/m32r/platforms/mappi3/setup.c * * Setup routines for Renesas MAPPI-III(M3A-2170) Board * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/m32r.h> #include <asm/io.h> #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[NR_IRQS]; static void disable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_mappi3_irq(unsigned int irq) { unsigned long port, data; if ((irq == 0) ||(irq >= NR_IRQS)) { printk("bad irq 0x%08x\n", irq); return; } port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_mappi3(struct irq_data *data) { disable_mappi3_irq(data->irq); } static void unmask_mappi3(struct irq_data *data) { enable_mappi3_irq(data->irq); } static void shutdown_mappi3(struct irq_data *data) { unsigned long port; port = irq2port(data->irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip mappi3_irq_type = { .name = "MAPPI3-IRQ", .irq_shutdown = shutdown_mappi3, .irq_mask = mask_mappi3, .irq_unmask = unmask_mappi3, }; void __init init_IRQ(void) { #if defined(CONFIG_SMC91X) /* INT0 : LAN controller (SMC91111) */ irq_set_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(M32R_IRQ_INT0); #endif /* CONFIG_SMC91X */ /* MFT2 : system timer */ irq_set_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_mappi3_irq(M32R_IRQ_MFT2); #ifdef CONFIG_SERIAL_M32R_SIO /* SIO0_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_R); /* SIO0_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO0_S); /* SIO1_R : uart receive data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_R); /* SIO1_S : uart send data */ irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_mappi3_irq(M32R_IRQ_SIO1_S); #endif /* CONFIG_M32R_USE_DBG_CONSOLE */ #if defined(CONFIG_USB) /* INT1 : USB Host controller interrupt */ irq_set_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01; disable_mappi3_irq(M32R_IRQ_INT1); #endif /* CONFIG_USB */ /* CFC IREQ */ irq_set_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01; disable_mappi3_irq(PLD_IRQ_CFIREQ); #if defined(CONFIG_M32R_CFC) /* ICUCR41: CFC Insert & eject */ irq_set_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00; disable_mappi3_irq(PLD_IRQ_CFC_INSERT); #endif /* CONFIG_M32R_CFC */ /* IDE IREQ */ irq_set_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type, handle_level_irq); icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_mappi3_irq(PLD_IRQ_IDEIREQ); } #if defined(CONFIG_SMC91X) #define LAN_IOSTART 0x300 #define LAN_IOEND 0x320 static struct resource smc91x_resources[] = { [0] = { .start = (LAN_IOSTART), .end = (LAN_IOEND), .flags = IORESOURCE_MEM, }, [1] = { .start = M32R_IRQ_INT0, .end = M32R_IRQ_INT0, .flags = IORESOURCE_IRQ, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #endif #if defined(CONFIG_FB_S1D13XXX) #include <video/s1d13xxxfb.h> #include <asm/s1d13806.h> static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL, #ifdef CONFIG_PM .platform_suspend_video = NULL, .platform_resume_video = NULL, #endif }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = 0x1d600000UL, .end = 0x1d73FFFFUL, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x1d400000UL, .end = 0x1d4001FFUL, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; #endif static int __init platform_init(void) { #if defined(CONFIG_SMC91X) platform_device_register(&smc91x_device); #endif #if defined(CONFIG_FB_S1D13XXX) platform_device_register(&s1d13xxxfb_device); #endif return 0; } arch_initcall(platform_init);
gpl-2.0
sfumato77/rk3x_kernel_3.0.36
arch/parisc/math-emu/dfmpy.c
14148
11736
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/dfmpy.c $Revision: 1.1 $ * * Purpose: * Double Precision Floating-point Multiply * * External Interfaces: * dbl_fmpy(srcptr1,srcptr2,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "dbl_float.h" /* * Double Precision Floating-point Multiply */ int dbl_fmpy( dbl_floating_point *srcptr1, dbl_floating_point *srcptr2, dbl_floating_point *dstptr, unsigned int *status) { register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2; register unsigned int opnd3p1, opnd3p2, resultp1, resultp2; register int dest_exponent, count; register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE; boolean is_tiny; Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2); Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2); /* * set sign bit of result */ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) Dbl_setnegativezerop1(resultp1); else Dbl_setzerop1(resultp1); /* * check first operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd1p1)) { if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { if (Dbl_isnotnan(opnd2p1,opnd2p2)) { if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) { /* * invalid since operands are infinity * and zero */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * return infinity */ Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd1p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd1p1); } /* * is second operand a signaling NaN? */ else if (Dbl_is_signalingnan(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * return quiet NaN */ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr); return(NOEXCEPTION); } } /* * check second operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(opnd2p1)) { if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) { if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) { /* invalid since operands are zero & infinity */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(opnd2p1,opnd2p2); Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * return infinity */ Dbl_setinfinity_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(opnd2p1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(opnd2p1); } /* * return quiet NaN */ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr); return(NOEXCEPTION); } /* * Generate exponent */ dest_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) -DBL_BIAS; /* * Generate mantissa */ if (Dbl_isnotzero_exponent(opnd1p1)) { /* set hidden bit */ Dbl_clear_signexponent_set_hidden(opnd1p1); } else { /* check for zero */ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) { Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* is denormalized, adjust exponent */ Dbl_clear_signexponent(opnd1p1); Dbl_leftshiftby1(opnd1p1,opnd1p2); Dbl_normalize(opnd1p1,opnd1p2,dest_exponent); } /* opnd2 needs to have hidden bit set with msb in hidden bit */ if (Dbl_isnotzero_exponent(opnd2p1)) { Dbl_clear_signexponent_set_hidden(opnd2p1); } else { /* check for zero */ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) { Dbl_setzero_exponentmantissa(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* is denormalized; want to normalize */ Dbl_clear_signexponent(opnd2p1); Dbl_leftshiftby1(opnd2p1,opnd2p2); Dbl_normalize(opnd2p1,opnd2p2,dest_exponent); } /* Multiply two source mantissas together */ /* make room for guard bits */ Dbl_leftshiftby7(opnd2p1,opnd2p2); Dbl_setzero(opnd3p1,opnd3p2); /* * Four bits at a time are inspected in each loop, and a * simple shift and add multiply algorithm is used. */ for (count=1;count<=DBL_P;count+=4) { stickybit |= Dlow4p2(opnd3p2); Dbl_rightshiftby4(opnd3p1,opnd3p2); if (Dbit28p2(opnd1p2)) { /* Twoword_add should be an ADDC followed by an ADD. */ Twoword_add(opnd3p1, opnd3p2, opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3); } if (Dbit29p2(opnd1p2)) { Twoword_add(opnd3p1, opnd3p2, opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2); } if (Dbit30p2(opnd1p2)) { Twoword_add(opnd3p1, opnd3p2, opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1); } if (Dbit31p2(opnd1p2)) { Twoword_add(opnd3p1, opnd3p2, opnd2p1, opnd2p2); } Dbl_rightshiftby4(opnd1p1,opnd1p2); } if (Dbit3p1(opnd3p1)==0) { Dbl_leftshiftby1(opnd3p1,opnd3p2); } else { /* result mantissa >= 2. */ dest_exponent++; } /* check for denormalized result */ while (Dbit3p1(opnd3p1)==0) { Dbl_leftshiftby1(opnd3p1,opnd3p2); dest_exponent--; } /* * check for guard, sticky and inexact bits */ stickybit |= Dallp2(opnd3p2) << 25; guardbit = (Dallp2(opnd3p2) << 24) >> 31; inexact = guardbit | stickybit; /* align result mantissa */ Dbl_rightshiftby8(opnd3p1,opnd3p2); /* * round result */ if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) { Dbl_clear_signexponent(opnd3p1); switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) Dbl_increment(opnd3p1,opnd3p2); break; case ROUNDNEAREST: if (guardbit) { if (stickybit || Dbl_isone_lowmantissap2(opnd3p2)) Dbl_increment(opnd3p1,opnd3p2); } } if (Dbl_isone_hidden(opnd3p1)) dest_exponent++; } Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2); /* * Test for overflow */ if (dest_exponent >= DBL_INFINITY_EXPONENT) { /* trap if OVERFLOWTRAP enabled */ if (Is_overflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return (OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return (OVERFLOWEXCEPTION); } inexact = TRUE; Set_overflowflag(); /* set result to infinity or largest number */ Dbl_setoverflow(resultp1,resultp2); } /* * Test for underflow */ else if (dest_exponent <= 0) { /* trap if UNDERFLOWTRAP enabled */ if (Is_underflowtrap_enabled()) { /* * Adjust bias of result */ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return (UNDERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return (UNDERFLOWEXCEPTION); } /* Determine if should set underflow flag */ is_tiny = TRUE; if (dest_exponent == 0 && inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); if (Dbl_isone_hiddenoverflow(opnd3p1)) is_tiny = FALSE; Dbl_decrement(opnd3p1,opnd3p2); } break; } } /* * denormalize result or set to signed zero */ stickybit = inexact; Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit, stickybit,inexact); /* return zero or smallest number */ if (inexact) { switch (Rounding_mode()) { case ROUNDPLUS: if (Dbl_iszero_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDMINUS: if (Dbl_isone_sign(resultp1)) { Dbl_increment(opnd3p1,opnd3p2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opnd3p2))) { Dbl_increment(opnd3p1,opnd3p2); } break; } if (is_tiny) Set_underflowflag(); } Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2); } else Dbl_set_exponent(resultp1,dest_exponent); /* check for inexact */ Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) { if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); } return(NOEXCEPTION); }
gpl-2.0
shane87/android_kernel_lge_g3
arch/arm/mach-msm/subsystem_restart.c
69
34378
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/io.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/wakelock.h> #include <linux/suspend.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/debugfs.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/of_gpio.h> #include <asm/current.h> #include <mach/socinfo.h> #include <mach/subsystem_notif.h> #include <mach/subsystem_restart.h> #ifdef CONFIG_LGE_HANDLE_PANIC #include <mach/lge_handle_panic.h> #endif #include "smd_private.h" static int enable_debug; module_param(enable_debug, int, S_IRUGO | S_IWUSR); /** * enum p_subsys_state - state of a subsystem (private) * @SUBSYS_NORMAL: subsystem is operating normally * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting * * The 'private' side of the subsytem state used to determine where in the * restart process the subsystem is. */ enum p_subsys_state { SUBSYS_NORMAL, SUBSYS_CRASHED, SUBSYS_RESTARTING, }; /** * enum subsys_state - state of a subsystem (public) * @SUBSYS_OFFLINE: subsystem is offline * @SUBSYS_ONLINE: subsystem is online * * The 'public' side of the subsytem state, exposed to userspace. */ enum subsys_state { SUBSYS_OFFLINE, SUBSYS_ONLINE, }; static const char * const subsys_states[] = { [SUBSYS_OFFLINE] = "OFFLINE", [SUBSYS_ONLINE] = "ONLINE", }; static const char * const restart_levels[] = { [RESET_SOC] = "SYSTEM", [RESET_SUBSYS_COUPLED] = "RELATED", }; /** * struct subsys_tracking - track state of a subsystem or restart order * @p_state: private state of subsystem/order * @state: public state of subsystem/order * @s_lock: protects p_state * @lock: protects subsystem/order callbacks and state * * Tracks the state of a subsystem or a set of subsystems (restart order). * Doing this avoids the need to grab each subsystem's lock and update * each subsystems state when restarting an order. */ struct subsys_tracking { enum p_subsys_state p_state; spinlock_t s_lock; enum subsys_state state; struct mutex lock; }; /** * struct subsys_soc_restart_order - subsystem restart order * @subsystem_list: names of subsystems in this restart order * @count: number of subsystems in order * @track: state tracking and locking * @subsys_ptrs: pointers to subsystems in this restart order */ struct subsys_soc_restart_order { const char * const *subsystem_list; int count; struct subsys_tracking track; struct subsys_device *subsys_ptrs[]; }; struct restart_log { struct timeval time; struct subsys_device *dev; struct list_head list; }; /** * struct subsys_device - subsystem device * @desc: subsystem descriptor * @wake_lock: prevents suspend during subsystem_restart() * @wlname: name of @wake_lock * @work: context for subsystem_restart_wq_func() for this device * @track: state tracking and locking * @notify: subsys notify handle * @dev: device * @owner: module that provides @desc * @count: reference count of subsystem_get()/subsystem_put() * @id: ida * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.) * @restart_order: order of other devices this devices restarts with * @dentry: debugfs directory for this device * @do_ramdump_on_put: ramdump on subsystem_put() if true * @err_ready: completion variable to record error ready from subsystem * @crashed: indicates if subsystem has crashed */ struct subsys_device { struct subsys_desc *desc; struct wake_lock wake_lock; char wlname[64]; struct work_struct work; struct subsys_tracking track; void *notify; struct device dev; struct module *owner; int count; int id; int restart_level; struct subsys_soc_restart_order *restart_order; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif bool do_ramdump_on_put; struct miscdevice misc_dev; char miscdevice_name[32]; struct completion err_ready; bool crashed; }; #ifdef CONFIG_MACH_LGE static int modem_reboot_cnt; module_param(modem_reboot_cnt, int, S_IRUGO | S_IWUSR); #endif static struct subsys_device *to_subsys(struct device *d) { return container_of(d, struct subsys_device, dev); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name); } static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { enum subsys_state state = to_subsys(dev)->track.state; return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]); } static ssize_t restart_level_show(struct device *dev, struct device_attribute *attr, char *buf) { int level = to_subsys(dev)->restart_level; return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]); } static ssize_t restart_level_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct subsys_device *subsys = to_subsys(dev); int i; const char *p; p = memchr(buf, '\n', count); if (p) count = p - buf; for (i = 0; i < ARRAY_SIZE(restart_levels); i++) if (!strncasecmp(buf, restart_levels[i], count)) { subsys->restart_level = i; return count; } return -EPERM; } int subsys_get_restart_level(struct subsys_device *dev) { return dev->restart_level; } EXPORT_SYMBOL(subsys_get_restart_level); static void subsys_set_state(struct subsys_device *subsys, enum subsys_state state) { unsigned long flags; spin_lock_irqsave(&subsys->track.s_lock, flags); if (subsys->track.state != state) { subsys->track.state = state; spin_unlock_irqrestore(&subsys->track.s_lock, flags); sysfs_notify(&subsys->dev.kobj, NULL, "state"); return; } spin_unlock_irqrestore(&subsys->track.s_lock, flags); } /** * subsytem_default_online() - Mark a subsystem as online by default * @dev: subsystem to mark as online * * Marks a subsystem as "online" without increasing the reference count * on the subsystem. This is typically used by subsystems that are already * online when the kernel boots up. */ void subsys_default_online(struct subsys_device *dev) { subsys_set_state(dev, SUBSYS_ONLINE); } EXPORT_SYMBOL(subsys_default_online); static struct device_attribute subsys_attrs[] = { __ATTR_RO(name), __ATTR_RO(state), __ATTR(restart_level, 0644, restart_level_show, restart_level_store), __ATTR_NULL, }; static struct bus_type subsys_bus_type = { .name = "msm_subsys", .dev_attrs = subsys_attrs, }; static DEFINE_IDA(subsys_ida); static int enable_ramdumps; module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR); struct workqueue_struct *ssr_wq; static LIST_HEAD(restart_log_list); static DEFINE_MUTEX(soc_order_reg_lock); static DEFINE_MUTEX(restart_log_mutex); /* SOC specific restart orders go here */ #define DEFINE_SINGLE_RESTART_ORDER(name, order) \ static struct subsys_soc_restart_order __##name = { \ .subsystem_list = order, \ .count = ARRAY_SIZE(order), \ .subsys_ptrs = {[ARRAY_SIZE(order)] = NULL} \ }; \ static struct subsys_soc_restart_order *name[] = { \ &__##name, \ } /* MSM 8x60 restart ordering info */ static const char * const _order_8x60_all[] = { "external_modem", "modem", "adsp" }; DEFINE_SINGLE_RESTART_ORDER(orders_8x60_all, _order_8x60_all); static const char * const _order_8x60_modems[] = {"external_modem", "modem"}; DEFINE_SINGLE_RESTART_ORDER(orders_8x60_modems, _order_8x60_modems); /*SGLTE restart ordering info*/ static const char * const order_8960_sglte[] = {"external_modem", "modem"}; static struct subsys_soc_restart_order restart_orders_8960_fusion_sglte = { .subsystem_list = order_8960_sglte, .count = ARRAY_SIZE(order_8960_sglte), .subsys_ptrs = {[ARRAY_SIZE(order_8960_sglte)] = NULL} }; static struct subsys_soc_restart_order *restart_orders_8960_sglte[] = { &restart_orders_8960_fusion_sglte, }; /* These will be assigned to one of the sets above after * runtime SoC identification. */ static struct subsys_soc_restart_order **restart_orders; static int n_restart_orders; static struct subsys_soc_restart_order * update_restart_order(struct subsys_device *dev) { int i, j; struct subsys_soc_restart_order *order; const char *name = dev->desc->name; int len = SUBSYS_NAME_MAX_LENGTH; mutex_lock(&soc_order_reg_lock); for (j = 0; j < n_restart_orders; j++) { order = restart_orders[j]; for (i = 0; i < order->count; i++) { if (!strncmp(order->subsystem_list[i], name, len)) { order->subsys_ptrs[i] = dev; goto found; } } } order = NULL; found: mutex_unlock(&soc_order_reg_lock); return order; } static int max_restarts; module_param(max_restarts, int, 0644); static long max_history_time = 3600; module_param(max_history_time, long, 0644); static void do_epoch_check(struct subsys_device *dev) { int n = 0; struct timeval *time_first = NULL, *curr_time; struct restart_log *r_log, *temp; static int max_restarts_check; static long max_history_time_check; mutex_lock(&restart_log_mutex); max_restarts_check = max_restarts; max_history_time_check = max_history_time; /* Check if epoch checking is enabled */ if (!max_restarts_check) goto out; r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL); if (!r_log) goto out; r_log->dev = dev; do_gettimeofday(&r_log->time); curr_time = &r_log->time; INIT_LIST_HEAD(&r_log->list); list_add_tail(&r_log->list, &restart_log_list); list_for_each_entry_safe(r_log, temp, &restart_log_list, list) { if ((curr_time->tv_sec - r_log->time.tv_sec) > max_history_time_check) { pr_debug("Deleted node with restart_time = %ld\n", r_log->time.tv_sec); list_del(&r_log->list); kfree(r_log); continue; } if (!n) { time_first = &r_log->time; pr_debug("Time_first: %ld\n", time_first->tv_sec); } n++; pr_debug("Restart_time: %ld\n", r_log->time.tv_sec); } if (time_first && n >= max_restarts_check) { if ((curr_time->tv_sec - time_first->tv_sec) < max_history_time_check) panic("Subsystems have crashed %d times in less than " "%ld seconds!", max_restarts_check, max_history_time_check); } out: mutex_unlock(&restart_log_mutex); } static void for_each_subsys_device(struct subsys_device **list, unsigned count, void *data, void (*fn)(struct subsys_device *, void *)) { while (count--) { struct subsys_device *dev = *list++; if (!dev) continue; fn(dev, data); } } static void notify_each_subsys_device(struct subsys_device **list, unsigned count, enum subsys_notif_type notif, void *data) { while (count--) { enum subsys_notif_type type = (enum subsys_notif_type)type; struct subsys_device *dev = *list++; if (!dev) continue; subsys_notif_queue_notification(dev->notify, notif, data); } } static int wait_for_err_ready(struct subsys_device *subsys) { int ret; if (!subsys->desc->err_ready_irq || enable_debug == 1) return 0; ret = wait_for_completion_timeout(&subsys->err_ready, msecs_to_jiffies(10000)); if (!ret) { pr_err("[%s]: Error ready timed out\n", subsys->desc->name); return -ETIMEDOUT; } return 0; } static void subsystem_shutdown(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; pr_info("[%p]: Shutting down %s\n", current, name); if (dev->desc->shutdown(dev->desc) < 0) { #ifdef CONFIG_LGE_HANDLE_PANIC lge_set_magic_subsystem(name, LGE_ERR_SUB_SD); #endif panic("subsys-restart: [%p]: Failed to shutdown %s!", current, name); } subsys_set_state(dev, SUBSYS_OFFLINE); } static void subsystem_ramdump(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; if (dev->desc->ramdump) if (dev->desc->ramdump(enable_ramdumps, dev->desc) < 0) pr_warn("%s[%p]: Ramdump failed.\n", name, current); dev->do_ramdump_on_put = false; } static void subsystem_powerup(struct subsys_device *dev, void *data) { const char *name = dev->desc->name; int ret; pr_info("[%p]: Powering up %s\n", current, name); init_completion(&dev->err_ready); if (dev->desc->powerup(dev->desc) < 0) { #ifdef CONFIG_LGE_HANDLE_PANIC lge_set_magic_subsystem(name, LGE_ERR_SUB_PWR); #endif notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, NULL); panic("[%p]: Powerup error: %s!", current, name); } ret = wait_for_err_ready(dev); if (ret) { notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, NULL); panic("[%p]: Timed out waiting for error ready: %s!", current, name); } subsys_set_state(dev, SUBSYS_ONLINE); } static int __find_subsys(struct device *dev, void *data) { struct subsys_device *subsys = to_subsys(dev); return !strcmp(subsys->desc->name, data); } static struct subsys_device *find_subsys(const char *str) { struct device *dev; if (!str) return NULL; dev = bus_find_device(&subsys_bus_type, NULL, (void *)str, __find_subsys); return dev ? to_subsys(dev) : NULL; } static int subsys_start(struct subsys_device *subsys) { int ret; init_completion(&subsys->err_ready); ret = subsys->desc->start(subsys->desc); if (ret){ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE, NULL); return ret; } if (subsys->desc->is_not_loadable) { subsys_set_state(subsys, SUBSYS_ONLINE); return 0; } ret = wait_for_err_ready(subsys); if (ret) { /* pil-boot succeeded but we need to shutdown * the device because error ready timed out. */ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE, NULL); subsys->desc->stop(subsys->desc); } else subsys_set_state(subsys, SUBSYS_ONLINE); return ret; } static void subsys_stop(struct subsys_device *subsys) { subsys->desc->stop(subsys->desc); subsys_set_state(subsys, SUBSYS_OFFLINE); } static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys) { struct subsys_soc_restart_order *order = subsys->restart_order; if (order) return &order->track; else return &subsys->track; } /** * subsytem_get() - Boot a subsystem * @name: pointer to a string containing the name of the subsystem to boot * * This function returns a pointer if it succeeds. If an error occurs an * ERR_PTR is returned. * * If this feature is disable, the value %NULL will be returned. */ void *subsystem_get(const char *name) { struct subsys_device *subsys; struct subsys_device *subsys_d; int ret; void *retval; struct subsys_tracking *track; if (!name) return NULL; subsys = retval = find_subsys(name); if (!subsys) return ERR_PTR(-ENODEV); if (!try_module_get(subsys->owner)) { retval = ERR_PTR(-ENODEV); goto err_module; } subsys_d = subsystem_get(subsys->desc->depends_on); if (IS_ERR(subsys_d)) { retval = subsys_d; goto err_depends; } track = subsys_get_track(subsys); mutex_lock(&track->lock); if (!subsys->count) { ret = subsys_start(subsys); if (ret) { retval = ERR_PTR(ret); goto err_start; } } subsys->count++; mutex_unlock(&track->lock); return retval; err_start: mutex_unlock(&track->lock); subsystem_put(subsys_d); err_depends: module_put(subsys->owner); err_module: put_device(&subsys->dev); return retval; } EXPORT_SYMBOL(subsystem_get); /** * subsystem_put() - Shutdown a subsystem * @peripheral_handle: pointer from a previous call to subsystem_get() * * This doesn't imply that a subsystem is shutdown until all callers of * subsystem_get() have called subsystem_put(). */ void subsystem_put(void *subsystem) { struct subsys_device *subsys_d, *subsys = subsystem; struct subsys_tracking *track; if (IS_ERR_OR_NULL(subsys)) return; track = subsys_get_track(subsys); mutex_lock(&track->lock); if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n", subsys->desc->name, __func__)) goto err_out; if (!--subsys->count) { subsys_stop(subsys); if (subsys->do_ramdump_on_put) subsystem_ramdump(subsys, NULL); } mutex_unlock(&track->lock); subsys_d = find_subsys(subsys->desc->depends_on); if (subsys_d) { subsystem_put(subsys_d); put_device(&subsys_d->dev); } module_put(subsys->owner); put_device(&subsys->dev); return; err_out: mutex_unlock(&track->lock); } EXPORT_SYMBOL(subsystem_put); static void subsystem_restart_wq_func(struct work_struct *work) { struct subsys_device *dev = container_of(work, struct subsys_device, work); struct subsys_device **list; struct subsys_desc *desc = dev->desc; struct subsys_soc_restart_order *order = dev->restart_order; struct subsys_tracking *track; unsigned count; unsigned long flags; /* * It's OK to not take the registration lock at this point. * This is because the subsystem list inside the relevant * restart order is not being traversed. */ if (order) { list = order->subsys_ptrs; count = order->count; track = &order->track; } else { list = &dev; count = 1; track = &dev->track; } mutex_lock(&track->lock); do_epoch_check(dev); /* * It's necessary to take the registration lock because the subsystem * list in the SoC restart order will be traversed and it shouldn't be * changed until _this_ restart sequence completes. */ mutex_lock(&soc_order_reg_lock); pr_debug("[%p]: Starting restart sequence for %s\n", current, desc->name); notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL); for_each_subsys_device(list, count, NULL, subsystem_shutdown); notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL); notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION, &enable_ramdumps); spin_lock_irqsave(&track->s_lock, flags); track->p_state = SUBSYS_RESTARTING; spin_unlock_irqrestore(&track->s_lock, flags); /* Collect ram dumps for all subsystems in order here */ for_each_subsys_device(list, count, NULL, subsystem_ramdump); notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL); for_each_subsys_device(list, count, NULL, subsystem_powerup); notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL); pr_info("[%p]: Restart sequence for %s completed.\n", current, desc->name); mutex_unlock(&soc_order_reg_lock); mutex_unlock(&track->lock); spin_lock_irqsave(&track->s_lock, flags); track->p_state = SUBSYS_NORMAL; wake_unlock(&dev->wake_lock); spin_unlock_irqrestore(&track->s_lock, flags); } static void __subsystem_restart_dev(struct subsys_device *dev) { struct subsys_desc *desc = dev->desc; const char *name = dev->desc->name; struct subsys_tracking *track; unsigned long flags; pr_debug("Restarting %s [level=%s]!\n", desc->name, restart_levels[dev->restart_level]); track = subsys_get_track(dev); /* * Allow drivers to call subsystem_restart{_dev}() as many times as * they want up until the point where the subsystem is shutdown. */ spin_lock_irqsave(&track->s_lock, flags); if (track->p_state != SUBSYS_CRASHED) { if (dev->track.state == SUBSYS_ONLINE && track->p_state != SUBSYS_RESTARTING) { track->p_state = SUBSYS_CRASHED; wake_lock(&dev->wake_lock); queue_work(ssr_wq, &dev->work); } else { panic("Subsystem %s crashed during SSR!", name); } } spin_unlock_irqrestore(&track->s_lock, flags); } int subsystem_restart_dev(struct subsys_device *dev) { const char *name; if (!get_device(&dev->dev)) return -ENODEV; if (!try_module_get(dev->owner)) { put_device(&dev->dev); return -ENODEV; } name = dev->desc->name; /* * If a system reboot/shutdown is underway, ignore subsystem errors. * However, print a message so that we know that a subsystem behaved * unexpectedly here. */ if (system_state == SYSTEM_RESTART || system_state == SYSTEM_POWER_OFF) { pr_err("%s crashed during a system poweroff/shutdown.\n", name); return -EBUSY; } pr_info("Restart sequence requested for %s, restart_level = %s.\n", name, restart_levels[dev->restart_level]); #ifdef CONFIG_MACH_LGE if (!strcmp(name, "modem")) { modem_reboot_cnt++; if (modem_reboot_cnt <= 0) modem_reboot_cnt = 1; } #endif switch (dev->restart_level) { case RESET_SUBSYS_COUPLED: __subsystem_restart_dev(dev); break; case RESET_SOC: #ifdef CONFIG_LGE_HANDLE_PANIC lge_set_magic_subsystem(name, LGE_ERR_SUB_RST); #endif panic("subsys-restart: Resetting the SoC - %s crashed.", name); break; default: #ifdef CONFIG_LGE_HANDLE_PANIC lge_set_magic_subsystem(name, LGE_ERR_SUB_UNK); #endif panic("subsys-restart: Unknown restart level!\n"); break; } module_put(dev->owner); put_device(&dev->dev); return 0; } EXPORT_SYMBOL(subsystem_restart_dev); int subsystem_restart(const char *name) { int ret; struct subsys_device *dev = find_subsys(name); if (!dev) return -ENODEV; ret = subsystem_restart_dev(dev); put_device(&dev->dev); return ret; } EXPORT_SYMBOL(subsystem_restart); /** * subsys_modem_restart() - modem restart silently * * modem restart silently */ int subsys_modem_restart() { const char *name; struct subsys_device *dev = find_subsys("modem"); if (!get_device(&dev->dev)) return -ENODEV; if (!try_module_get(dev->owner)) { put_device(&dev->dev); return -ENODEV; } name = dev->desc->name; /* * If a system reboot/shutdown is underway, ignore subsystem errors. * However, print a message so that we know that a subsystem behaved * unexpectedly here. */ if (system_state == SYSTEM_RESTART || system_state == SYSTEM_POWER_OFF) { pr_err("%s crashed during a system poweroff/shutdown.\n", name); return -EBUSY; } pr_info("Restart sequence requested for %s, restart_level = %s.\n", name, restart_levels[dev->restart_level]); #ifdef CONFIG_MACH_LGE if (!strcmp(name, "modem")) { modem_reboot_cnt++; if (modem_reboot_cnt <= 0) modem_reboot_cnt = 1; } #endif __subsystem_restart_dev(dev); module_put(dev->owner); put_device(&dev->dev); return 0; } EXPORT_SYMBOL(subsys_modem_restart); int subsystem_crashed(const char *name) { struct subsys_device *dev = find_subsys(name); struct subsys_tracking *track; if (!dev) return -ENODEV; if (!get_device(&dev->dev)) return -ENODEV; track = subsys_get_track(dev); mutex_lock(&track->lock); dev->do_ramdump_on_put = true; /* * TODO: Make this work with multiple consumers where one is calling * subsystem_restart() and another is calling this function. To do * so would require updating private state, etc. */ mutex_unlock(&track->lock); put_device(&dev->dev); return 0; } EXPORT_SYMBOL(subsystem_crashed); void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { dev->crashed = true; } bool subsys_get_crash_status(struct subsys_device *dev) { return dev->crashed; } #ifdef CONFIG_DEBUG_FS static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { int r; char buf[40]; struct subsys_device *subsys = filp->private_data; r = snprintf(buf, sizeof(buf), "%d\n", subsys->count); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t subsys_debugfs_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct subsys_device *subsys = filp->private_data; char buf[10]; char *cmp; cnt = min(cnt, sizeof(buf) - 1); if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = '\0'; cmp = strstrip(buf); if (!strcmp(cmp, "restart")) { if (subsystem_restart_dev(subsys)) return -EIO; } else if (!strcmp(cmp, "get")) { if (subsystem_get(subsys->desc->name)) return -EIO; } else if (!strcmp(cmp, "put")) { subsystem_put(subsys); } else { return -EINVAL; } return cnt; } static const struct file_operations subsys_debugfs_fops = { .open = simple_open, .read = subsys_debugfs_read, .write = subsys_debugfs_write, }; static struct dentry *subsys_base_dir; static int __init subsys_debugfs_init(void) { subsys_base_dir = debugfs_create_dir("msm_subsys", NULL); return !subsys_base_dir ? -ENOMEM : 0; } static void subsys_debugfs_exit(void) { debugfs_remove_recursive(subsys_base_dir); } static int subsys_debugfs_add(struct subsys_device *subsys) { if (!subsys_base_dir) return -ENOMEM; subsys->dentry = debugfs_create_file(subsys->desc->name, S_IRUGO | S_IWUSR, subsys_base_dir, subsys, &subsys_debugfs_fops); return !subsys->dentry ? -ENOMEM : 0; } static void subsys_debugfs_remove(struct subsys_device *subsys) { debugfs_remove(subsys->dentry); } #else static int __init subsys_debugfs_init(void) { return 0; }; static void subsys_debugfs_exit(void) { } static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; } static void subsys_debugfs_remove(struct subsys_device *subsys) { } #endif static int subsys_device_open(struct inode *inode, struct file *file) { void *retval; struct subsys_device *subsys_dev = container_of(file->private_data, struct subsys_device, misc_dev); if (!file->private_data) return -EINVAL; retval = subsystem_get(subsys_dev->desc->name); if (IS_ERR(retval)) return PTR_ERR(retval); return 0; } static int subsys_device_close(struct inode *inode, struct file *file) { struct subsys_device *subsys_dev = container_of(file->private_data, struct subsys_device, misc_dev); if (!file->private_data) return -EINVAL; subsystem_put(subsys_dev); return 0; } static const struct file_operations subsys_device_fops = { .owner = THIS_MODULE, .open = subsys_device_open, .release = subsys_device_close, }; static void subsys_device_release(struct device *dev) { struct subsys_device *subsys = to_subsys(dev); wake_lock_destroy(&subsys->wake_lock); mutex_destroy(&subsys->track.lock); ida_simple_remove(&subsys_ida, subsys->id); kfree(subsys); } static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys) { struct subsys_device *subsys_dev = subsys; dev_info(subsys_dev->desc->dev, "Subsystem error monitoring/handling services are up\n"); if (subsys_dev->desc->is_not_loadable) return IRQ_HANDLED; complete(&subsys_dev->err_ready); return IRQ_HANDLED; } static int subsys_misc_device_add(struct subsys_device *subsys_dev) { int ret; memset(subsys_dev->miscdevice_name, 0, ARRAY_SIZE(subsys_dev->miscdevice_name)); snprintf(subsys_dev->miscdevice_name, ARRAY_SIZE(subsys_dev->miscdevice_name), "subsys_%s", subsys_dev->desc->name); subsys_dev->misc_dev.minor = MISC_DYNAMIC_MINOR; subsys_dev->misc_dev.name = subsys_dev->miscdevice_name; subsys_dev->misc_dev.fops = &subsys_device_fops; subsys_dev->misc_dev.parent = &subsys_dev->dev; ret = misc_register(&subsys_dev->misc_dev); if (ret) { pr_err("%s: misc_register() failed for %s (%d)", __func__, subsys_dev->miscdevice_name, ret); } return ret; } static void subsys_misc_device_remove(struct subsys_device *subsys_dev) { misc_deregister(&subsys_dev->misc_dev); } static int __get_gpio(struct subsys_desc *desc, const char *prop, int *gpio) { struct device_node *dnode = desc->dev->of_node; int ret = -ENOENT; if (of_find_property(dnode, prop, NULL)) { *gpio = of_get_named_gpio(dnode, prop, 0); ret = *gpio < 0 ? *gpio : 0; } return ret; } static int __get_irq(struct subsys_desc *desc, const char *prop, unsigned int *irq) { int ret, gpio, irql; ret = __get_gpio(desc, prop, &gpio); if (ret) return ret; irql = gpio_to_irq(gpio); if (irql == -ENOENT) irql = -ENXIO; if (irql < 0) { pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name, prop); return irql; } else { *irq = irql; } return 0; } static int subsys_parse_devicetree(struct subsys_desc *desc) { int ret; struct platform_device *pdev = container_of(desc->dev, struct platform_device, dev); ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq); if (ret && ret != -ENOENT) return ret; ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq); if (ret && ret != -ENOENT) return ret; ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq); if (ret && ret != -ENOENT) return ret; ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio); if (ret && ret != -ENOENT) return ret; desc->wdog_bite_irq = platform_get_irq(pdev, 0); if (desc->wdog_bite_irq < 0) return desc->wdog_bite_irq; return 0; } static int subsys_setup_irqs(struct subsys_device *subsys) { struct subsys_desc *desc = subsys->desc; int ret; if (desc->err_fatal_irq && desc->err_fatal_handler) { ret = devm_request_irq(desc->dev, desc->err_fatal_irq, desc->err_fatal_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n", desc->name, ret); return ret; } } if (desc->stop_ack_irq && desc->stop_ack_handler) { ret = devm_request_irq(desc->dev, desc->stop_ack_irq, desc->stop_ack_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n", desc->name, ret); return ret; } } if (desc->wdog_bite_irq && desc->wdog_bite_handler) { ret = devm_request_irq(desc->dev, desc->wdog_bite_irq, desc->wdog_bite_handler, IRQF_TRIGGER_RISING, desc->name, desc); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n", desc->name, ret); return ret; } } if (desc->err_ready_irq) { ret = devm_request_irq(desc->dev, desc->err_ready_irq, subsys_err_ready_intr_handler, IRQF_TRIGGER_RISING, "error_ready_interrupt", subsys); if (ret < 0) { dev_err(desc->dev, "[%s]: Unable to register err ready handler\n", desc->name); return ret; } } return 0; } struct subsys_device *subsys_register(struct subsys_desc *desc) { struct subsys_device *subsys; int ret; subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return ERR_PTR(-ENOMEM); subsys->desc = desc; subsys->owner = desc->owner; subsys->dev.parent = desc->dev; subsys->dev.bus = &subsys_bus_type; subsys->dev.release = subsys_device_release; subsys->notify = subsys_notif_add_subsys(desc->name); subsys->restart_order = update_restart_order(subsys); ret = subsys_parse_devicetree(desc); if (ret) goto err_dtree; snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name); wake_lock_init(&subsys->wake_lock, WAKE_LOCK_SUSPEND, subsys->wlname); INIT_WORK(&subsys->work, subsystem_restart_wq_func); spin_lock_init(&subsys->track.s_lock); subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL); if (subsys->id < 0) { ret = subsys->id; goto err_ida; } dev_set_name(&subsys->dev, "subsys%d", subsys->id); mutex_init(&subsys->track.lock); ret = subsys_debugfs_add(subsys); if (ret) goto err_debugfs; ret = device_register(&subsys->dev); if (ret) { device_unregister(&subsys->dev); goto err_register; } ret = subsys_misc_device_add(subsys); if (ret) { put_device(&subsys->dev); goto err_register; } ret = subsys_setup_irqs(subsys); if (ret < 0) goto err_misc_device; return subsys; err_misc_device: subsys_misc_device_remove(subsys); err_register: subsys_debugfs_remove(subsys); err_debugfs: mutex_destroy(&subsys->track.lock); ida_simple_remove(&subsys_ida, subsys->id); err_ida: wake_lock_destroy(&subsys->wake_lock); err_dtree: kfree(subsys); return ERR_PTR(ret); } EXPORT_SYMBOL(subsys_register); void subsys_unregister(struct subsys_device *subsys) { if (IS_ERR_OR_NULL(subsys)) return; if (get_device(&subsys->dev)) { mutex_lock(&subsys->track.lock); WARN_ON(subsys->count); device_unregister(&subsys->dev); mutex_unlock(&subsys->track.lock); subsys_debugfs_remove(subsys); subsys_misc_device_remove(subsys); put_device(&subsys->dev); } } EXPORT_SYMBOL(subsys_unregister); static int subsys_panic(struct device *dev, void *data) { struct subsys_device *subsys = to_subsys(dev); if (subsys->desc->crash_shutdown) subsys->desc->crash_shutdown(subsys->desc); return 0; } static int ssr_panic_handler(struct notifier_block *this, unsigned long event, void *ptr) { bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic); return NOTIFY_DONE; } static struct notifier_block panic_nb = { .notifier_call = ssr_panic_handler, }; static int __init ssr_init_soc_restart_orders(void) { int i; atomic_notifier_chain_register(&panic_notifier_list, &panic_nb); if (cpu_is_msm8x60()) { for (i = 0; i < ARRAY_SIZE(orders_8x60_all); i++) { mutex_init(&orders_8x60_all[i]->track.lock); spin_lock_init(&orders_8x60_all[i]->track.s_lock); } for (i = 0; i < ARRAY_SIZE(orders_8x60_modems); i++) { mutex_init(&orders_8x60_modems[i]->track.lock); spin_lock_init(&orders_8x60_modems[i]->track.s_lock); } restart_orders = orders_8x60_all; n_restart_orders = ARRAY_SIZE(orders_8x60_all); } if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) { restart_orders = restart_orders_8960_sglte; n_restart_orders = ARRAY_SIZE(restart_orders_8960_sglte); } for (i = 0; i < n_restart_orders; i++) { mutex_init(&restart_orders[i]->track.lock); spin_lock_init(&restart_orders[i]->track.s_lock); } return 0; } static int __init subsys_restart_init(void) { int ret; ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0); BUG_ON(!ssr_wq); ret = bus_register(&subsys_bus_type); if (ret) goto err_bus; ret = subsys_debugfs_init(); if (ret) goto err_debugfs; ret = ssr_init_soc_restart_orders(); if (ret) goto err_soc; return 0; err_soc: subsys_debugfs_exit(); err_debugfs: bus_unregister(&subsys_bus_type); err_bus: destroy_workqueue(ssr_wq); return ret; } arch_initcall(subsys_restart_init); MODULE_DESCRIPTION("Subsystem Restart Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
val2k/linux
drivers/staging/comedi/drivers/adl_pci9111.c
581
20438
/* * adl_pci9111.c * Hardware driver for PCI9111 ADLink cards: PCI-9111HR * Copyright (C) 2002-2005 Emmanuel Pacaud <emmanuel.pacaud@univ-poitiers.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: adl_pci9111 * Description: Adlink PCI-9111HR * Devices: [ADLink] PCI-9111HR (adl_pci9111) * Author: Emmanuel Pacaud <emmanuel.pacaud@univ-poitiers.fr> * Status: experimental * * Configuration options: not applicable, uses PCI auto config * * Supports: * - ai_insn read * - ao_insn read/write * - di_insn read * - do_insn read/write * - ai_do_cmd mode with the following sources: * - start_src TRIG_NOW * - scan_begin_src TRIG_FOLLOW TRIG_TIMER TRIG_EXT * - convert_src TRIG_TIMER TRIG_EXT * - scan_end_src TRIG_COUNT * - stop_src TRIG_COUNT TRIG_NONE * * The scanned channels must be consecutive and start from 0. They must * all have the same range and aref. */ /* * TODO: * - Really test implemented functionality. * - Add support for the PCI-9111DG with a probe routine to identify * the card type (perhaps with the help of the channel number readback * of the A/D Data register). * - Add external multiplexer support. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/interrupt.h> #include "../comedi_pci.h" #include "plx9052.h" #include "comedi_8254.h" #define PCI9111_FIFO_HALF_SIZE 512 #define PCI9111_AI_ACQUISITION_PERIOD_MIN_NS 10000 #define PCI9111_RANGE_SETTING_DELAY 10 #define PCI9111_AI_INSTANT_READ_UDELAY_US 2 /* * IO address map and bit defines */ #define PCI9111_AI_FIFO_REG 0x00 #define PCI9111_AO_REG 0x00 #define PCI9111_DIO_REG 0x02 #define PCI9111_EDIO_REG 0x04 #define PCI9111_AI_CHANNEL_REG 0x06 #define PCI9111_AI_RANGE_STAT_REG 0x08 #define PCI9111_AI_STAT_AD_BUSY BIT(7) #define PCI9111_AI_STAT_FF_FF BIT(6) #define PCI9111_AI_STAT_FF_HF BIT(5) #define PCI9111_AI_STAT_FF_EF BIT(4) #define PCI9111_AI_RANGE(x) (((x) & 0x7) << 0) #define PCI9111_AI_RANGE_MASK PCI9111_AI_RANGE(7) #define PCI9111_AI_TRIG_CTRL_REG 0x0a #define PCI9111_AI_TRIG_CTRL_TRGEVENT BIT(5) #define PCI9111_AI_TRIG_CTRL_POTRG BIT(4) #define PCI9111_AI_TRIG_CTRL_PTRG BIT(3) #define PCI9111_AI_TRIG_CTRL_ETIS BIT(2) #define PCI9111_AI_TRIG_CTRL_TPST BIT(1) #define PCI9111_AI_TRIG_CTRL_ASCAN BIT(0) #define PCI9111_INT_CTRL_REG 0x0c #define PCI9111_INT_CTRL_ISC2 BIT(3) #define PCI9111_INT_CTRL_FFEN BIT(2) #define PCI9111_INT_CTRL_ISC1 BIT(1) #define PCI9111_INT_CTRL_ISC0 BIT(0) #define PCI9111_SOFT_TRIG_REG 0x0e #define PCI9111_8254_BASE_REG 0x40 #define PCI9111_INT_CLR_REG 0x48 /* PLX 9052 Local Interrupt 1 enabled and active */ #define PCI9111_LI1_ACTIVE (PLX9052_INTCSR_LI1ENAB | \ PLX9052_INTCSR_LI1STAT) /* PLX 9052 Local Interrupt 2 enabled and active */ #define PCI9111_LI2_ACTIVE (PLX9052_INTCSR_LI2ENAB | \ PLX9052_INTCSR_LI2STAT) static const struct comedi_lrange pci9111_ai_range = { 5, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625) } }; struct pci9111_private_data { unsigned long lcr_io_base; unsigned int scan_delay; unsigned int chunk_counter; unsigned int chunk_num_samples; unsigned short ai_bounce_buffer[2 * PCI9111_FIFO_HALF_SIZE]; }; static void plx9050_interrupt_control(unsigned long io_base, bool int1_enable, bool int1_active_high, bool int2_enable, bool int2_active_high, bool interrupt_enable) { int flags = 0; if (int1_enable) flags |= PLX9052_INTCSR_LI1ENAB; if (int1_active_high) flags |= PLX9052_INTCSR_LI1POL; if (int2_enable) flags |= PLX9052_INTCSR_LI2ENAB; if (int2_active_high) flags |= PLX9052_INTCSR_LI2POL; if (interrupt_enable) flags |= PLX9052_INTCSR_PCIENAB; outb(flags, io_base + PLX9052_INTCSR); } enum pci9111_ISC0_sources { irq_on_eoc, irq_on_fifo_half_full }; enum pci9111_ISC1_sources { irq_on_timer_tick, irq_on_external_trigger }; static void pci9111_interrupt_source_set(struct comedi_device *dev, enum pci9111_ISC0_sources irq_0_source, enum pci9111_ISC1_sources irq_1_source) { int flags; /* Read the current interrupt control bits */ flags = inb(dev->iobase + PCI9111_AI_TRIG_CTRL_REG); /* Shift the bits so they are compatible with the write register */ flags >>= 4; /* Mask off the ISCx bits */ flags &= 0xc0; /* Now set the new ISCx bits */ if (irq_0_source == irq_on_fifo_half_full) flags |= PCI9111_INT_CTRL_ISC0; if (irq_1_source == irq_on_external_trigger) flags |= PCI9111_INT_CTRL_ISC1; outb(flags, dev->iobase + PCI9111_INT_CTRL_REG); } static void pci9111_fifo_reset(struct comedi_device *dev) { unsigned long int_ctrl_reg = dev->iobase + PCI9111_INT_CTRL_REG; /* To reset the FIFO, set FFEN sequence as 0 -> 1 -> 0 */ outb(0, int_ctrl_reg); outb(PCI9111_INT_CTRL_FFEN, int_ctrl_reg); outb(0, int_ctrl_reg); } static int pci9111_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci9111_private_data *dev_private = dev->private; /* Disable interrupts */ plx9050_interrupt_control(dev_private->lcr_io_base, true, true, true, true, false); /* disable A/D triggers (software trigger mode) and auto scan off */ outb(0, dev->iobase + PCI9111_AI_TRIG_CTRL_REG); pci9111_fifo_reset(dev); return 0; } static int pci9111_ai_check_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int range0 = CR_RANGE(cmd->chanlist[0]); unsigned int aref0 = CR_AREF(cmd->chanlist[0]); int i; for (i = 1; i < cmd->chanlist_len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); unsigned int range = CR_RANGE(cmd->chanlist[i]); unsigned int aref = CR_AREF(cmd->chanlist[i]); if (chan != i) { dev_dbg(dev->class_dev, "entries in chanlist must be consecutive channels,counting upwards from 0\n"); return -EINVAL; } if (range != range0) { dev_dbg(dev->class_dev, "entries in chanlist must all have the same gain\n"); return -EINVAL; } if (aref != aref0) { dev_dbg(dev->class_dev, "entries in chanlist must all have the same reference\n"); return -EINVAL; } } return 0; } static int pci9111_ai_do_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int arg; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->scan_begin_src); err |= comedi_check_trigger_is_unique(cmd->convert_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (cmd->scan_begin_src != TRIG_FOLLOW) { if (cmd->scan_begin_src != cmd->convert_src) err |= -EINVAL; } if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); if (cmd->convert_src == TRIG_TIMER) { err |= comedi_check_trigger_arg_min(&cmd->convert_arg, PCI9111_AI_ACQUISITION_PERIOD_MIN_NS); } else { /* TRIG_EXT */ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); } if (cmd->scan_begin_src == TRIG_TIMER) { err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, PCI9111_AI_ACQUISITION_PERIOD_MIN_NS); } else { /* TRIG_FOLLOW || TRIG_EXT */ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); } err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { arg = cmd->convert_arg; comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); } /* * There's only one timer on this card, so the scan_begin timer * must be a multiple of chanlist_len*convert_arg */ if (cmd->scan_begin_src == TRIG_TIMER) { arg = cmd->chanlist_len * cmd->convert_arg; if (arg < cmd->scan_begin_arg) arg *= (cmd->scan_begin_arg / arg); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg); } if (err) return 4; /* Step 5: check channel list if it exists */ if (cmd->chanlist && cmd->chanlist_len > 0) err |= pci9111_ai_check_chanlist(dev, s, cmd); if (err) return 5; return 0; } static int pci9111_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci9111_private_data *dev_private = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned int last_chan = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]); unsigned int range0 = CR_RANGE(cmd->chanlist[0]); unsigned int trig = 0; /* Set channel scan limit */ /* PCI9111 allows only scanning from channel 0 to channel n */ /* TODO: handle the case of an external multiplexer */ if (cmd->chanlist_len > 1) trig |= PCI9111_AI_TRIG_CTRL_ASCAN; outb(last_chan, dev->iobase + PCI9111_AI_CHANNEL_REG); /* Set gain - all channels use the same range */ outb(PCI9111_AI_RANGE(range0), dev->iobase + PCI9111_AI_RANGE_STAT_REG); /* Set timer pacer */ dev_private->scan_delay = 0; if (cmd->convert_src == TRIG_TIMER) { trig |= PCI9111_AI_TRIG_CTRL_TPST; comedi_8254_update_divisors(dev->pacer); comedi_8254_pacer_enable(dev->pacer, 1, 2, true); pci9111_fifo_reset(dev); pci9111_interrupt_source_set(dev, irq_on_fifo_half_full, irq_on_timer_tick); plx9050_interrupt_control(dev_private->lcr_io_base, true, true, false, true, true); if (cmd->scan_begin_src == TRIG_TIMER) { dev_private->scan_delay = (cmd->scan_begin_arg / (cmd->convert_arg * cmd->chanlist_len)) - 1; } } else { /* TRIG_EXT */ trig |= PCI9111_AI_TRIG_CTRL_ETIS; pci9111_fifo_reset(dev); pci9111_interrupt_source_set(dev, irq_on_fifo_half_full, irq_on_timer_tick); plx9050_interrupt_control(dev_private->lcr_io_base, true, true, false, true, true); } outb(trig, dev->iobase + PCI9111_AI_TRIG_CTRL_REG); dev_private->chunk_counter = 0; dev_private->chunk_num_samples = cmd->chanlist_len * (1 + dev_private->scan_delay); return 0; } static void pci9111_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *data, unsigned int num_bytes, unsigned int start_chan_index) { unsigned short *array = data; unsigned int maxdata = s->maxdata; unsigned int invert = (maxdata + 1) >> 1; unsigned int shift = (maxdata == 0xffff) ? 0 : 4; unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes); unsigned int i; for (i = 0; i < num_samples; i++) array[i] = ((array[i] >> shift) & maxdata) ^ invert; } static void pci9111_handle_fifo_half_full(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci9111_private_data *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned short *buf = devpriv->ai_bounce_buffer; unsigned int samples; samples = comedi_nsamples_left(s, PCI9111_FIFO_HALF_SIZE); insw(dev->iobase + PCI9111_AI_FIFO_REG, buf, samples); if (devpriv->scan_delay < 1) { comedi_buf_write_samples(s, buf, samples); } else { unsigned int pos = 0; unsigned int to_read; while (pos < samples) { if (devpriv->chunk_counter < cmd->chanlist_len) { to_read = cmd->chanlist_len - devpriv->chunk_counter; if (to_read > samples - pos) to_read = samples - pos; comedi_buf_write_samples(s, buf + pos, to_read); } else { to_read = devpriv->chunk_num_samples - devpriv->chunk_counter; if (to_read > samples - pos) to_read = samples - pos; } pos += to_read; devpriv->chunk_counter += to_read; if (devpriv->chunk_counter >= devpriv->chunk_num_samples) devpriv->chunk_counter = 0; } } } static irqreturn_t pci9111_interrupt(int irq, void *p_device) { struct comedi_device *dev = p_device; struct pci9111_private_data *dev_private = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; struct comedi_cmd *cmd; unsigned int status; unsigned long irq_flags; unsigned char intcsr; if (!dev->attached) { /* Ignore interrupt before device fully attached. */ /* Might not even have allocated subdevices yet! */ return IRQ_NONE; } async = s->async; cmd = &async->cmd; spin_lock_irqsave(&dev->spinlock, irq_flags); /* Check if we are source of interrupt */ intcsr = inb(dev_private->lcr_io_base + PLX9052_INTCSR); if (!(((intcsr & PLX9052_INTCSR_PCIENAB) != 0) && (((intcsr & PCI9111_LI1_ACTIVE) == PCI9111_LI1_ACTIVE) || ((intcsr & PCI9111_LI2_ACTIVE) == PCI9111_LI2_ACTIVE)))) { /* Not the source of the interrupt. */ /* (N.B. not using PLX9052_INTCSR_SOFTINT) */ spin_unlock_irqrestore(&dev->spinlock, irq_flags); return IRQ_NONE; } if ((intcsr & PCI9111_LI1_ACTIVE) == PCI9111_LI1_ACTIVE) { /* Interrupt comes from fifo_half-full signal */ status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG); /* '0' means FIFO is full, data may have been lost */ if (!(status & PCI9111_AI_STAT_FF_FF)) { spin_unlock_irqrestore(&dev->spinlock, irq_flags); dev_dbg(dev->class_dev, "fifo overflow\n"); outb(0, dev->iobase + PCI9111_INT_CLR_REG); async->events |= COMEDI_CB_ERROR; comedi_handle_events(dev, s); return IRQ_HANDLED; } /* '0' means FIFO is half-full */ if (!(status & PCI9111_AI_STAT_FF_HF)) pci9111_handle_fifo_half_full(dev, s); } if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) async->events |= COMEDI_CB_EOA; outb(0, dev->iobase + PCI9111_INT_CLR_REG); spin_unlock_irqrestore(&dev->spinlock, irq_flags); comedi_handle_events(dev, s); return IRQ_HANDLED; } static int pci9111_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG); if (status & PCI9111_AI_STAT_FF_EF) return 0; return -EBUSY; } static int pci9111_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int maxdata = s->maxdata; unsigned int invert = (maxdata + 1) >> 1; unsigned int shift = (maxdata == 0xffff) ? 0 : 4; unsigned int status; int ret; int i; outb(chan, dev->iobase + PCI9111_AI_CHANNEL_REG); status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG); if ((status & PCI9111_AI_RANGE_MASK) != range) { outb(PCI9111_AI_RANGE(range), dev->iobase + PCI9111_AI_RANGE_STAT_REG); } pci9111_fifo_reset(dev); for (i = 0; i < insn->n; i++) { /* Generate a software trigger */ outb(0, dev->iobase + PCI9111_SOFT_TRIG_REG); ret = comedi_timeout(dev, s, insn, pci9111_ai_eoc, 0); if (ret) { pci9111_fifo_reset(dev); return ret; } data[i] = inw(dev->iobase + PCI9111_AI_FIFO_REG); data[i] = ((data[i] >> shift) & maxdata) ^ invert; } return i; } static int pci9111_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int val = s->readback[chan]; int i; for (i = 0; i < insn->n; i++) { val = data[i]; outw(val, dev->iobase + PCI9111_AO_REG); } s->readback[chan] = val; return insn->n; } static int pci9111_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inw(dev->iobase + PCI9111_DIO_REG); return insn->n; } static int pci9111_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) outw(s->state, dev->iobase + PCI9111_DIO_REG); data[1] = s->state; return insn->n; } static int pci9111_reset(struct comedi_device *dev) { struct pci9111_private_data *dev_private = dev->private; /* Set trigger source to software */ plx9050_interrupt_control(dev_private->lcr_io_base, true, true, true, true, false); /* disable A/D triggers (software trigger mode) and auto scan off */ outb(0, dev->iobase + PCI9111_AI_TRIG_CTRL_REG); return 0; } static int pci9111_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); struct pci9111_private_data *dev_private; struct comedi_subdevice *s; int ret; dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private)); if (!dev_private) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; dev_private->lcr_io_base = pci_resource_start(pcidev, 1); dev->iobase = pci_resource_start(pcidev, 2); pci9111_reset(dev); if (pcidev->irq) { ret = request_irq(pcidev->irq, pci9111_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret == 0) dev->irq = pcidev->irq; } dev->pacer = comedi_8254_init(dev->iobase + PCI9111_8254_BASE_REG, I8254_OSC_BASE_2MHZ, I8254_IO16, 0); if (!dev->pacer) return -ENOMEM; ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON; s->n_chan = 16; s->maxdata = 0xffff; s->range_table = &pci9111_ai_range; s->insn_read = pci9111_ai_insn_read; if (dev->irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->len_chanlist = s->n_chan; s->do_cmdtest = pci9111_ai_do_cmd_test; s->do_cmd = pci9111_ai_do_cmd; s->cancel = pci9111_ai_cancel; s->munge = pci9111_ai_munge; } s = &dev->subdevices[1]; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_COMMON; s->n_chan = 1; s->maxdata = 0x0fff; s->len_chanlist = 1; s->range_table = &range_bipolar10; s->insn_write = pci9111_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pci9111_di_insn_bits; s = &dev->subdevices[3]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pci9111_do_insn_bits; return 0; } static void pci9111_detach(struct comedi_device *dev) { if (dev->iobase) pci9111_reset(dev); comedi_pci_detach(dev); } static struct comedi_driver adl_pci9111_driver = { .driver_name = "adl_pci9111", .module = THIS_MODULE, .auto_attach = pci9111_auto_attach, .detach = pci9111_detach, }; static int pci9111_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &adl_pci9111_driver, id->driver_data); } static const struct pci_device_id pci9111_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x9111) }, /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */ { 0 } }; MODULE_DEVICE_TABLE(pci, pci9111_pci_table); static struct pci_driver adl_pci9111_pci_driver = { .name = "adl_pci9111", .id_table = pci9111_pci_table, .probe = pci9111_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(adl_pci9111_driver, adl_pci9111_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
freedesktop-unofficial-mirror/openchrome__drm-openchrome
drivers/char/agp/sis-agp.c
2117
11342
/* * SiS AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/delay.h> #include "agp.h" #define SIS_ATTBASE 0x90 #define SIS_APSIZE 0x94 #define SIS_TLBCNTRL 0x97 #define SIS_TLBFLUSH 0x98 #define PCI_DEVICE_ID_SI_662 0x0662 #define PCI_DEVICE_ID_SI_671 0x0671 static bool agp_sis_force_delay = 0; static int agp_sis_agp_spec = -1; static int sis_fetch_size(void) { u8 temp_size; int i; struct aper_size_info_8 *values; pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size); values = A_SIZE_8(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if ((temp_size == values[i].size_value) || ((temp_size & ~(0x07)) == (values[i].size_value & ~(0x07)))) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void sis_tlbflush(struct agp_memory *mem) { pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02); } static int sis_configure(void) { struct aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge->current_size); pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR); pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, agp_bridge->gatt_bus_addr); pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, current_size->size_value); return 0; } static void sis_cleanup(void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); } static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) { struct pci_dev *device = NULL; u32 command; int rate; dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(bridge, mode, command); command |= AGPSTAT_AGP_ENABLE; rate = (command & 0x7) << 2; for_each_pci_dev(device) { u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); if (!agp) continue; dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n", pci_name(device), rate); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); /* * Weird: on some sis chipsets any rate change in the target * command register triggers a 5ms screwup during which the master * cannot be configured */ if (device->device == bridge->dev->device) { dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n"); msleep(10); } } } static const struct aper_size_info_8 sis_generic_sizes[7] = { {256, 65536, 6, 99}, {128, 32768, 5, 83}, {64, 16384, 4, 67}, {32, 8192, 3, 51}, {16, 4096, 2, 35}, {8, 2048, 1, 19}, {4, 1024, 0, 3} }; static struct agp_bridge_driver sis_driver = { .owner = THIS_MODULE, .aperture_sizes = sis_generic_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = sis_configure, .fetch_size = sis_fetch_size, .cleanup = sis_cleanup, .tlb_flush = sis_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = NULL, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = agp_generic_insert_memory, .remove_memory = agp_generic_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; // chipsets that require the 'delay hack' static int sis_broken_chipsets[] = { PCI_DEVICE_ID_SI_648, PCI_DEVICE_ID_SI_746, 0 // terminator }; static void sis_get_driver(struct agp_bridge_data *bridge) { int i; for (i=0; sis_broken_chipsets[i]!=0; ++i) if (bridge->dev->device==sis_broken_chipsets[i]) break; if (sis_broken_chipsets[i] || agp_sis_force_delay) sis_driver.agp_enable=sis_delayed_enable; // sis chipsets that indicate less than agp3.5 // are not actually fully agp3 compliant if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5 && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) { sis_driver.aperture_sizes = agp3_generic_sizes; sis_driver.size_type = U16_APER_SIZE; sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES; sis_driver.configure = agp3_generic_configure; sis_driver.fetch_size = agp3_generic_fetch_size; sis_driver.cleanup = agp3_generic_cleanup; sis_driver.tlb_flush = agp3_generic_tlbflush; } } static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n", pdev->vendor, pdev->device); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &sis_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; get_agp_version(bridge); /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); sis_get_driver(bridge); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_sis_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } #ifdef CONFIG_PM static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state) { pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int agp_sis_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return sis_driver.configure(); } #endif /* CONFIG_PM */ static struct pci_device_id agp_sis_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_5591, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_530, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_540, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_550, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_620, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_630, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_635, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_645, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_646, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_648, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_650, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_651, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_655, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_661, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_662, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_671, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_730, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_735, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_740, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_741, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_745, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, .device = PCI_DEVICE_ID_SI_746, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); static struct pci_driver agp_sis_pci_driver = { .name = "agpgart-sis", .id_table = agp_sis_pci_table, .probe = agp_sis_probe, .remove = agp_sis_remove, #ifdef CONFIG_PM .suspend = agp_sis_suspend, .resume = agp_sis_resume, #endif }; static int __init agp_sis_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_sis_pci_driver); } static void __exit agp_sis_cleanup(void) { pci_unregister_driver(&agp_sis_pci_driver); } module_init(agp_sis_init); module_exit(agp_sis_cleanup); module_param(agp_sis_force_delay, bool, 0); MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); module_param(agp_sis_agp_spec, int, 0); MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
ssanglee/capstone
Linux-Kernel/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
2373
66166
/*--------------------------------------------------------------------------- FT1000 driver for Flarion Flash OFDM NIC Device Copyright (C) 2002 Flarion Technologies, All rights reserved. Copyright (C) 2006 Patrik Ostrihon, All rights reserved. Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -----------------------------------------------------------------------------*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <asm/io.h> #include <asm/bitops.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/wait.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <linux/ethtool.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #ifdef FT_DEBUG #define DEBUG(n, args...) printk(KERN_DEBUG args); #else #define DEBUG(n, args...) #endif #include <linux/delay.h> #include "ft1000.h" static const struct firmware *fw_entry; static void ft1000_hbchk(u_long data); static struct timer_list poll_timer = { .function = ft1000_hbchk }; static u16 cmdbuffer[1024]; static u8 tempbuffer[1600]; static u8 ft1000_card_present = 0; static u8 flarion_ft1000_cnt = 0; static irqreturn_t ft1000_interrupt(int irq, void *dev_id); static void ft1000_enable_interrupts(struct net_device *dev); static void ft1000_disable_interrupts(struct net_device *dev); /* new kernel */ MODULE_AUTHOR(""); MODULE_DESCRIPTION ("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("FT1000"); #define MAX_RCV_LOOP 100 //--------------------------------------------------------------------------- // // Function: ft1000_read_fifo_len // Description: This function will read the ASIC Uplink FIFO status register // which will return the number of bytes remaining in the Uplink FIFO. // Sixteen bytes are subtracted to make sure that the ASIC does not // reach its threshold. // Input: // dev - network device structure // Output: // value - number of bytes available in the ASIC Uplink FIFO. // //--------------------------------------------------------------------------- static inline u16 ft1000_read_fifo_len(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); if (info->AsicID == ELECTRABUZZ_ID) return (ft1000_read_reg(dev, FT1000_REG_UFIFO_STAT) - 16); else return (ft1000_read_reg(dev, FT1000_REG_MAG_UFSR) - 16); } //--------------------------------------------------------------------------- // // Function: ft1000_read_dpram // Description: This function will read the specific area of dpram // (Electrabuzz ASIC only) // Input: // dev - device structure // offset - index of dpram // Output: // value - value of dpram // //--------------------------------------------------------------------------- u16 ft1000_read_dpram(struct net_device *dev, int offset) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u16 data; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); data = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); spin_unlock_irqrestore(&info->dpram_lock, flags); return (data); } //--------------------------------------------------------------------------- // // Function: ft1000_write_dpram // Description: This function will write to a specific area of dpram // (Electrabuzz ASIC only) // Input: // dev - device structure // offset - index of dpram // value - value to write // Output: // none. // //--------------------------------------------------------------------------- static inline void ft1000_write_dpram(struct net_device *dev, int offset, u16 value) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, value); spin_unlock_irqrestore(&info->dpram_lock, flags); } //--------------------------------------------------------------------------- // // Function: ft1000_read_dpram_mag_16 // Description: This function will read the specific area of dpram // (Magnemite ASIC only) // Input: // dev - device structure // offset - index of dpram // Output: // value - value of dpram // //--------------------------------------------------------------------------- u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u16 data; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); // check if we want to read upper or lower 32-bit word if (Index) { data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL); } else { data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAH); } spin_unlock_irqrestore(&info->dpram_lock, flags); return (data); } //--------------------------------------------------------------------------- // // Function: ft1000_write_dpram_mag_16 // Description: This function will write to a specific area of dpram // (Magnemite ASIC only) // Input: // dev - device structure // offset - index of dpram // value - value to write // Output: // none. // //--------------------------------------------------------------------------- static inline void ft1000_write_dpram_mag_16(struct net_device *dev, int offset, u16 value, int Index) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); if (Index) { ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAL, value); } else { ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, value); } spin_unlock_irqrestore(&info->dpram_lock, flags); } //--------------------------------------------------------------------------- // // Function: ft1000_read_dpram_mag_32 // Description: This function will read the specific area of dpram // (Magnemite ASIC only) // Input: // dev - device structure // offset - index of dpram // Output: // value - value of dpram // //--------------------------------------------------------------------------- u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; u32 data; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); data = inl(dev->base_addr + FT1000_REG_MAG_DPDATAL); spin_unlock_irqrestore(&info->dpram_lock, flags); return (data); } //--------------------------------------------------------------------------- // // Function: ft1000_write_dpram_mag_32 // Description: This function will write to a specific area of dpram // (Magnemite ASIC only) // Input: // dev - device structure // offset - index of dpram // value - value to write // Output: // none. // //--------------------------------------------------------------------------- void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value) { struct ft1000_info *info = netdev_priv(dev); unsigned long flags; // Provide mutual exclusive access while reading ASIC registers. spin_lock_irqsave(&info->dpram_lock, flags); ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset); outl(value, dev->base_addr + FT1000_REG_MAG_DPDATAL); spin_unlock_irqrestore(&info->dpram_lock, flags); } //--------------------------------------------------------------------------- // // Function: ft1000_enable_interrupts // Description: This function will enable interrupts base on the current interrupt mask. // Input: // dev - device structure // Output: // None. // //--------------------------------------------------------------------------- static void ft1000_enable_interrupts(struct net_device *dev) { u16 tempword; DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n"); ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_DEFAULT_MASK); tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK); DEBUG(1, "ft1000_hw:ft1000_enable_interrupts:current interrupt enable mask = 0x%x\n", tempword); } //--------------------------------------------------------------------------- // // Function: ft1000_disable_interrupts // Description: This function will disable all interrupts. // Input: // dev - device structure // Output: // None. // //--------------------------------------------------------------------------- static void ft1000_disable_interrupts(struct net_device *dev) { u16 tempword; DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n"); ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_MASK_ALL); tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK); DEBUG(1, "ft1000_hw:ft1000_disable_interrupts:current interrupt enable mask = 0x%x\n", tempword); } //--------------------------------------------------------------------------- // // Function: ft1000_reset_asic // Description: This function will call the Card Service function to reset the // ASIC. // Input: // dev - device structure // Output: // none // //--------------------------------------------------------------------------- static void ft1000_reset_asic(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); struct ft1000_pcmcia *pcmcia = info->priv; u16 tempword; DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n"); (*info->ft1000_reset) (pcmcia->link); // Let's use the register provided by the Magnemite ASIC to reset the // ASIC and DSP. if (info->AsicID == MAGNEMITE_ID) { ft1000_write_reg(dev, FT1000_REG_RESET, (DSP_RESET_BIT | ASIC_RESET_BIT)); } mdelay(1); if (info->AsicID == ELECTRABUZZ_ID) { // set watermark to -1 in order to not generate an interrupt ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff); } else { // set watermark to -1 in order to not generate an interrupt ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff); } // clear interrupts tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword); ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword); tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword); } //--------------------------------------------------------------------------- // // Function: ft1000_reset_card // Description: This function will reset the card // Input: // dev - device structure // Output: // status - false (card reset fail) // true (card reset successful) // //--------------------------------------------------------------------------- static int ft1000_reset_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); u16 tempword; int i; unsigned long flags; struct prov_record *ptr; DEBUG(1, "ft1000_hw:ft1000_reset_card called.....\n"); info->CardReady = 0; info->ProgConStat = 0; info->squeseqnum = 0; ft1000_disable_interrupts(dev); // del_timer(&poll_timer); // Make sure we free any memory reserve for provisioning while (list_empty(&info->prov_list) == 0) { DEBUG(0, "ft1000_hw:ft1000_reset_card:deleting provisioning record\n"); ptr = list_entry(info->prov_list.next, struct prov_record, list); list_del(&ptr->list); kfree(ptr->pprov_data); kfree(ptr); } if (info->AsicID == ELECTRABUZZ_ID) { DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting DSP\n"); ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT); } else { DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC and DSP\n"); ft1000_write_reg(dev, FT1000_REG_RESET, (DSP_RESET_BIT | ASIC_RESET_BIT)); } // Copy DSP session record into info block if this is not a coldstart if (ft1000_card_present == 1) { spin_lock_irqsave(&info->dpram_lock, flags); if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_RX_BASE); for (i = 0; i < MAX_DSP_SESS_REC; i++) { info->DSPSess.Rec[i] = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); } } else { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_RX_BASE); for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) { info->DSPSess.MagRec[i] = inl(dev->base_addr + FT1000_REG_MAG_DPDATA); } } spin_unlock_irqrestore(&info->dpram_lock, flags); } DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n"); mdelay(10); //reset ASIC ft1000_reset_asic(dev); DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n"); if (info->AsicID == MAGNEMITE_ID) { // Put dsp in reset and take ASIC out of reset DEBUG(0, "ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n"); ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT); // Setting MAGNEMITE ASIC to big endian mode ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE); // Download bootloader card_bootload(dev); // Take DSP out of reset ft1000_write_reg(dev, FT1000_REG_RESET, 0); // FLARION_DSP_ACTIVE; mdelay(10); DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n"); // Wait for 0xfefe indicating dsp ready before starting download for (i = 0; i < 50; i++) { tempword = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DPRAM_FEFE, FT1000_MAG_DPRAM_FEFE_INDX); if (tempword == 0xfefe) { break; } mdelay(20); } if (i == 50) { DEBUG(0, "ft1000_hw:ft1000_reset_card:No FEFE detected from DSP\n"); return false; } } else { // Take DSP out of reset ft1000_write_reg(dev, FT1000_REG_RESET, ~DSP_RESET_BIT); mdelay(10); } if (card_download(dev, fw_entry->data, fw_entry->size)) { DEBUG(1, "card download unsuccessful\n"); return false; } else { DEBUG(1, "card download successful\n"); } mdelay(10); if (info->AsicID == ELECTRABUZZ_ID) { // Need to initialize the FIFO length counter to zero in order to sync up // with the DSP info->fifo_cnt = 0; ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt); // Initialize DSP heartbeat area to ho ft1000_write_dpram(dev, FT1000_HI_HO, ho); tempword = ft1000_read_dpram(dev, FT1000_HI_HO); DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n", tempword); } else { // Initialize DSP heartbeat area to ho ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag, FT1000_MAG_HI_HO_INDX); tempword = ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX); DEBUG(1, "ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword); } info->CardReady = 1; ft1000_enable_interrupts(dev); /* Schedule heartbeat process to run every 2 seconds */ // poll_timer.expires = jiffies + (2*HZ); // poll_timer.data = (u_long)dev; // add_timer(&poll_timer); return true; } //--------------------------------------------------------------------------- // // Function: ft1000_chkcard // Description: This function will check if the device is presently available on // the system. // Input: // dev - device structure // Output: // status - false (device is not present) // true (device is present) // //--------------------------------------------------------------------------- static int ft1000_chkcard(struct net_device *dev) { u16 tempword; // Mask register is used to check for device presence since it is never // set to zero. tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK); if (tempword == 0) { DEBUG(1, "ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n"); return false; } // The system will return the value of 0xffff for the version register // if the device is not present. tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID); if (tempword == 0xffff) { DEBUG(1, "ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n"); return false; } return true; } //--------------------------------------------------------------------------- // // Function: ft1000_hbchk // Description: This function will perform the heart beat check of the DSP as // well as the ASIC. // Input: // dev - device structure // Output: // none // //--------------------------------------------------------------------------- static void ft1000_hbchk(u_long data) { struct net_device *dev = (struct net_device *)data; struct ft1000_info *info; u16 tempword; info = netdev_priv(dev); if (info->CardReady == 1) { // Perform dsp heartbeat check if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); } else { tempword = ntohs(ft1000_read_dpram_mag_16 (dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX)); } DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n", tempword); // Let's perform another check if ho is not detected if (tempword != ho) { if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); } else { tempword = ntohs(ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX)); } } if (tempword != ho) { printk(KERN_INFO "ft1000: heartbeat failed - no ho detected\n"); if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } info->DrvErrNum = DSP_HB_INFO; if (ft1000_reset_card(dev) == 0) { printk(KERN_INFO "ft1000: Hardware Failure Detected - PC Card disabled\n"); info->ProgConStat = 0xff; return; } /* Schedule this module to run every 2 seconds */ poll_timer.expires = jiffies + (2*HZ); poll_timer.data = (u_long)dev; add_timer(&poll_timer); return; } tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); // Let's check doorbell again if fail if (tempword & FT1000_DB_HB) { tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); } if (tempword & FT1000_DB_HB) { printk(KERN_INFO "ft1000: heartbeat doorbell not clear by firmware\n"); if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } info->DrvErrNum = DSP_HB_INFO; if (ft1000_reset_card(dev) == 0) { printk(KERN_INFO "ft1000: Hardware Failure Detected - PC Card disabled\n"); info->ProgConStat = 0xff; return; } /* Schedule this module to run every 2 seconds */ poll_timer.expires = jiffies + (2*HZ); poll_timer.data = (u_long)dev; add_timer(&poll_timer); return; } // Set dedicated area to hi and ring appropriate doorbell according // to hi/ho heartbeat protocol if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_dpram(dev, FT1000_HI_HO, hi); } else { ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, hi_mag, FT1000_MAG_HI_HO_INDX); } if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); } else { tempword = ntohs(ft1000_read_dpram_mag_16 (dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX)); } // Let's write hi again if fail if (tempword != hi) { if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_dpram(dev, FT1000_HI_HO, hi); } else { ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, hi_mag, FT1000_MAG_HI_HO_INDX); } if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_dpram(dev, FT1000_HI_HO); } else { tempword = ntohs(ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO, FT1000_MAG_HI_HO_INDX)); } } if (tempword != hi) { printk(KERN_INFO "ft1000: heartbeat failed - cannot write hi into DPRAM\n"); if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } info->DrvErrNum = DSP_HB_INFO; if (ft1000_reset_card(dev) == 0) { printk(KERN_INFO "ft1000: Hardware Failure Detected - PC Card disabled\n"); info->ProgConStat = 0xff; return; } /* Schedule this module to run every 2 seconds */ poll_timer.expires = jiffies + (2*HZ); poll_timer.data = (u_long)dev; add_timer(&poll_timer); return; } ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_HB); } /* Schedule this module to run every 2 seconds */ poll_timer.expires = jiffies + (2 * HZ); poll_timer.data = (u_long) dev; add_timer(&poll_timer); } //--------------------------------------------------------------------------- // // Function: ft1000_send_cmd // Description: // Input: // Output: // //--------------------------------------------------------------------------- static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype) { struct ft1000_info *info = netdev_priv(dev); int i; u16 tempword; unsigned long flags; size += sizeof(struct pseudo_hdr); // check for odd byte and increment to 16-bit word align value if ((size & 0x0001)) { size++; } DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size); DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer)); // put message into slow queue area // All messages are in the form total_len + pseudo header + message body spin_lock_irqsave(&info->dpram_lock, flags); // Make sure SLOWQ doorbell is clear tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); i=0; while (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); i++; if (i==10) { spin_unlock_irqrestore(&info->dpram_lock, flags); return; } tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); } if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_TX_BASE); // Write total length to dpram ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size); // Write pseudo header and messgae body for (i = 0; i < (size >> 1); i++) { DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i, *ptempbuffer); tempword = htons(*ptempbuffer++); ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, tempword); } } else { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_TX_BASE); // Write total length to dpram ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, htons(size)); // Write pseudo header and messgae body ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_TX_BASE + 1); for (i = 0; i < (size >> 2); i++) { DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer); outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAL); DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer); outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAH); } DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer); outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAL); DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer); outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAH); } spin_unlock_irqrestore(&info->dpram_lock, flags); // ring doorbell to notify DSP that we have a message ready ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_TX); } //--------------------------------------------------------------------------- // // Function: ft1000_receive_cmd // Description: This function will read a message from the dpram area. // Input: // dev - network device structure // pbuffer - caller supply address to buffer // pnxtph - pointer to next pseudo header // Output: // Status = 0 (unsuccessful) // = 1 (successful) // //--------------------------------------------------------------------------- static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) { struct ft1000_info *info = netdev_priv(dev); u16 size; u16 *ppseudohdr; int i; u16 tempword; unsigned long flags; if (info->AsicID == ELECTRABUZZ_ID) { size = ( ft1000_read_dpram(dev, *pnxtph) ) + sizeof(struct pseudo_hdr); } else { size = ntohs(ft1000_read_dpram_mag_16 (dev, FT1000_MAG_PH_LEN, FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr); } if (size > maxsz) { DEBUG(1, "FT1000:ft1000_receive_cmd:Invalid command length = %d\n", size); return false; } else { ppseudohdr = (u16 *) pbuffer; spin_lock_irqsave(&info->dpram_lock, flags); if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_RX_BASE + 2); for (i = 0; i <= (size >> 1); i++) { tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); *pbuffer++ = ntohs(tempword); } } else { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_RX_BASE); *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH); DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer); pbuffer++; ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_RX_BASE + 1); for (i = 0; i <= (size >> 2); i++) { *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL); pbuffer++; *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH); pbuffer++; } //copy odd aligned word *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL); DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer); pbuffer++; *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH); DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer); pbuffer++; } if (size & 0x0001) { //copy odd byte from fifo tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA); *pbuffer = ntohs(tempword); } spin_unlock_irqrestore(&info->dpram_lock, flags); // Check if pseudo header checksum is good // Calculate pseudo header checksum tempword = *ppseudohdr++; for (i = 1; i < 7; i++) { tempword ^= *ppseudohdr++; } if ((tempword != *ppseudohdr)) { DEBUG(1, "FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n"); // Drop this message return false; } return true; } } //--------------------------------------------------------------------------- // // Function: ft1000_proc_drvmsg // Description: This function will process the various driver messages. // Input: // dev - device structure // pnxtph - pointer to next pseudo header // Output: // none // //--------------------------------------------------------------------------- static void ft1000_proc_drvmsg(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); u16 msgtype; u16 tempword; struct media_msg *pmediamsg; struct dsp_init_msg *pdspinitmsg; struct drv_msg *pdrvmsg; u16 len; u16 i; struct prov_record *ptr; struct pseudo_hdr *ppseudo_hdr; u16 *pmsg; struct timeval tv; union { u8 byte[2]; u16 wrd; } convert; if (info->AsicID == ELECTRABUZZ_ID) { tempword = FT1000_DPRAM_RX_BASE+2; } else { tempword = FT1000_DPRAM_MAG_RX_BASE; } if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) { // Get the message type which is total_len + PSEUDO header + msgtype + message body pdrvmsg = (struct drv_msg *) & cmdbuffer[0]; msgtype = ntohs(pdrvmsg->type); DEBUG(1, "Command message type = 0x%x\n", msgtype); switch (msgtype) { case DSP_PROVISION: DEBUG(0, "Got a provisioning request message from DSP\n"); mdelay(25); while (list_empty(&info->prov_list) == 0) { DEBUG(0, "Sending a provisioning message\n"); // Make sure SLOWQ doorbell is clear tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); i = 0; while (tempword & FT1000_DB_DPRAM_TX) { mdelay(5); i++; if (i == 10) { break; } } ptr = list_entry(info->prov_list.next, struct prov_record, list); len = *(u16 *) ptr->pprov_data; len = htons(len); pmsg = (u16 *) ptr->pprov_data; ppseudo_hdr = (struct pseudo_hdr *) pmsg; // Insert slow queue sequence number ppseudo_hdr->seq_num = info->squeseqnum++; ppseudo_hdr->portsrc = 0; // Calculate new checksum ppseudo_hdr->checksum = *pmsg++; DEBUG(1, "checksum = 0x%x\n", ppseudo_hdr->checksum); for (i = 1; i < 7; i++) { ppseudo_hdr->checksum ^= *pmsg++; DEBUG(1, "checksum = 0x%x\n", ppseudo_hdr->checksum); } ft1000_send_cmd (dev, (u16 *)ptr->pprov_data, len, SLOWQ_TYPE); list_del(&ptr->list); kfree(ptr->pprov_data); kfree(ptr); } // Indicate adapter is ready to take application messages after all // provisioning messages are sent info->CardReady = 1; break; case MEDIA_STATE: pmediamsg = (struct media_msg *) & cmdbuffer[0]; if (info->ProgConStat != 0xFF) { if (pmediamsg->state) { DEBUG(1, "Media is up\n"); if (info->mediastate == 0) { netif_carrier_on(dev); netif_wake_queue(dev); info->mediastate = 1; do_gettimeofday(&tv); info->ConTm = tv.tv_sec; } } else { DEBUG(1, "Media is down\n"); if (info->mediastate == 1) { info->mediastate = 0; netif_carrier_off(dev); netif_stop_queue(dev); info->ConTm = 0; } } } else { DEBUG(1,"Media is down\n"); if (info->mediastate == 1) { info->mediastate = 0; netif_carrier_off(dev); netif_stop_queue(dev); info->ConTm = 0; } } break; case DSP_INIT_MSG: pdspinitmsg = (struct dsp_init_msg *) & cmdbuffer[0]; memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ); DEBUG(1, "DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n", info->DspVer[0], info->DspVer[1], info->DspVer[2], info->DspVer[3]); memcpy(info->HwSerNum, pdspinitmsg->HwSerNum, HWSERNUMSZ); memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ); memcpy(info->eui64, pdspinitmsg->eui64, EUISZ); dev->dev_addr[0] = info->eui64[0]; dev->dev_addr[1] = info->eui64[1]; dev->dev_addr[2] = info->eui64[2]; dev->dev_addr[3] = info->eui64[5]; dev->dev_addr[4] = info->eui64[6]; dev->dev_addr[5] = info->eui64[7]; if (ntohs(pdspinitmsg->length) == (sizeof(struct dsp_init_msg) - 20)) { memcpy(info->ProductMode, pdspinitmsg->ProductMode, MODESZ); memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ); memcpy(info->RfCalDate, pdspinitmsg->RfCalDate, CALDATESZ); DEBUG(1, "RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0], info->RfCalVer[1]); } break ; case DSP_STORE_INFO: DEBUG(1, "FT1000:drivermsg:Got DSP_STORE_INFO\n"); tempword = ntohs(pdrvmsg->length); info->DSPInfoBlklen = tempword; if (tempword < (MAX_DSP_SESS_REC - 4)) { pmsg = (u16 *) & pdrvmsg->data[0]; for (i = 0; i < ((tempword + 1) / 2); i++) { DEBUG(1, "FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg); info->DSPInfoBlk[i + 10] = *pmsg++; } } break; case DSP_GET_INFO: DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n"); // copy dsp info block to dsp // allow any outstanding ioctl to finish mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); } } if ((tempword & FT1000_DB_DPRAM_TX) == 0) { // Put message into Slow Queue // Form Pseudo header pmsg = (u16 *) info->DSPInfoBlk; ppseudo_hdr = (struct pseudo_hdr *) pmsg; ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4); ppseudo_hdr->source = 0x10; ppseudo_hdr->destination = 0x20; ppseudo_hdr->portdest = 0; ppseudo_hdr->portsrc = 0; ppseudo_hdr->sh_str_id = 0; ppseudo_hdr->control = 0; ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; // Insert slow queue sequence number ppseudo_hdr->seq_num = info->squeseqnum++; // Insert application id ppseudo_hdr->portsrc = 0; // Calculate new checksum ppseudo_hdr->checksum = *pmsg++; for (i = 1; i < 7; i++) { ppseudo_hdr->checksum ^= *pmsg++; } info->DSPInfoBlk[8] = 0x7200; info->DSPInfoBlk[9] = htons(info->DSPInfoBlklen); ft1000_send_cmd (dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0); } break; case GET_DRV_ERR_RPT_MSG: DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n"); // copy driver error message to dsp // allow any outstanding ioctl to finish mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); } } if ((tempword & FT1000_DB_DPRAM_TX) == 0) { // Put message into Slow Queue // Form Pseudo header pmsg = (u16 *) & tempbuffer[0]; ppseudo_hdr = (struct pseudo_hdr *) pmsg; ppseudo_hdr->length = htons(0x0012); ppseudo_hdr->source = 0x10; ppseudo_hdr->destination = 0x20; ppseudo_hdr->portdest = 0; ppseudo_hdr->portsrc = 0; ppseudo_hdr->sh_str_id = 0; ppseudo_hdr->control = 0; ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; // Insert slow queue sequence number ppseudo_hdr->seq_num = info->squeseqnum++; // Insert application id ppseudo_hdr->portsrc = 0; // Calculate new checksum ppseudo_hdr->checksum = *pmsg++; for (i=1; i<7; i++) { ppseudo_hdr->checksum ^= *pmsg++; } pmsg = (u16 *) & tempbuffer[16]; *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG); *pmsg++ = htons(0x000e); *pmsg++ = htons(info->DSP_TIME[0]); *pmsg++ = htons(info->DSP_TIME[1]); *pmsg++ = htons(info->DSP_TIME[2]); *pmsg++ = htons(info->DSP_TIME[3]); convert.byte[0] = info->DspVer[0]; convert.byte[1] = info->DspVer[1]; *pmsg++ = convert.wrd; convert.byte[0] = info->DspVer[2]; convert.byte[1] = info->DspVer[3]; *pmsg++ = convert.wrd; *pmsg++ = htons(info->DrvErrNum); ft1000_send_cmd (dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0); info->DrvErrNum = 0; } break; default: break; } } } //--------------------------------------------------------------------------- // // Function: ft1000_parse_dpram_msg // Description: This function will parse the message received from the DSP // via the DPRAM interface. // Input: // dev - device structure // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_parse_dpram_msg(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); u16 doorbell; u16 portid; u16 nxtph; u16 total_len; int i = 0; int cnt; unsigned long flags; doorbell = ft1000_read_reg(dev, FT1000_REG_DOORBELL); DEBUG(1, "Doorbell = 0x%x\n", doorbell); if (doorbell & FT1000_ASIC_RESET_REQ) { // Copy DSP session record from info block spin_lock_irqsave(&info->dpram_lock, flags); if (info->AsicID == ELECTRABUZZ_ID) { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_RX_BASE); for (i = 0; i < MAX_DSP_SESS_REC; i++) { ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, info->DSPSess.Rec[i]); } } else { ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_MAG_RX_BASE); for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) { outl(info->DSPSess.MagRec[i], dev->base_addr + FT1000_REG_MAG_DPDATA); } } spin_unlock_irqrestore(&info->dpram_lock, flags); // clear ASIC RESET request ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_ASIC_RESET_REQ); DEBUG(1, "Got an ASIC RESET Request\n"); ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_ASIC_RESET_DSP); if (info->AsicID == MAGNEMITE_ID) { // Setting MAGNEMITE ASIC to big endian mode ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE); } } if (doorbell & FT1000_DSP_ASIC_RESET) { DEBUG(0, "FT1000:ft1000_parse_dpram_msg: Got a dsp ASIC reset message\n"); ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DSP_ASIC_RESET); udelay(200); return SUCCESS; } if (doorbell & FT1000_DB_DPRAM_RX) { DEBUG(1, "FT1000:ft1000_parse_dpram_msg: Got a slow queue message\n"); nxtph = FT1000_DPRAM_RX_BASE + 2; if (info->AsicID == ELECTRABUZZ_ID) { total_len = ft1000_read_dpram(dev, FT1000_DPRAM_RX_BASE); } else { total_len = ntohs(ft1000_read_dpram_mag_16 (dev, FT1000_MAG_TOTAL_LEN, FT1000_MAG_TOTAL_LEN_INDX)); } DEBUG(1, "FT1000:ft1000_parse_dpram_msg:total length = %d\n", total_len); if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) { total_len += nxtph; cnt = 0; // ft1000_read_reg will return a value that needs to be byteswap // in order to get DSP_QID_OFFSET. if (info->AsicID == ELECTRABUZZ_ID) { portid = (ft1000_read_dpram (dev, DSP_QID_OFFSET + FT1000_DPRAM_RX_BASE + 2) >> 8) & 0xff; } else { portid = (ft1000_read_dpram_mag_16 (dev, FT1000_MAG_PORT_ID, FT1000_MAG_PORT_ID_INDX) & 0xff); } DEBUG(1, "DSP_QID = 0x%x\n", portid); if (portid == DRIVERID) { // We are assumming one driver message from the DSP at a time. ft1000_proc_drvmsg(dev); } } ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_RX); } if (doorbell & FT1000_DB_COND_RESET) { // Reset ASIC and DSP if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } info->DrvErrNum = DSP_CONDRESET_INFO; DEBUG(1, "ft1000_hw:DSP conditional reset requested\n"); ft1000_reset_card(dev); ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_COND_RESET); } // let's clear any unexpected doorbells from DSP doorbell = doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ | FT1000_DB_COND_RESET | 0xff00); if (doorbell) { DEBUG(1, "Clearing unexpected doorbell = 0x%x\n", doorbell); ft1000_write_reg(dev, FT1000_REG_DOORBELL, doorbell); } return SUCCESS; } //--------------------------------------------------------------------------- // // Function: ft1000_flush_fifo // Description: This function will flush one packet from the downlink // FIFO. // Input: // dev - device structure // drv_err - driver error causing the flush fifo // Output: // None. // //--------------------------------------------------------------------------- static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum) { struct ft1000_info *info = netdev_priv(dev); struct ft1000_pcmcia *pcmcia = info->priv; u16 i; u32 templong; u16 tempword; DEBUG(1, "ft1000:ft1000_hw:ft1000_flush_fifo called\n"); if (pcmcia->PktIntfErr > MAX_PH_ERR) { if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } info->DrvErrNum = DrvErrNum; ft1000_reset_card(dev); return; } else { // Flush corrupted pkt from FIFO i = 0; do { if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO_STAT); } else { templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR); } i++; // This should never happen unless the ASIC is broken. // We must reset to recover. if ((i > 2048) || (tempword == 0)) { if (info->AsicID == ELECTRABUZZ_ID) { info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0); info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1); info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2); info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3); } else { info->DSP_TIME[0] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0, FT1000_MAG_DSP_TIMER0_INDX); info->DSP_TIME[1] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1, FT1000_MAG_DSP_TIMER1_INDX); info->DSP_TIME[2] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2, FT1000_MAG_DSP_TIMER2_INDX); info->DSP_TIME[3] = ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3, FT1000_MAG_DSP_TIMER3_INDX); } if (tempword == 0) { // Let's check if ASIC reads are still ok by reading the Mask register // which is never zero at this point of the code. tempword = inw(dev->base_addr + FT1000_REG_SUP_IMASK); if (tempword == 0) { // This indicates that we can not communicate with the ASIC info->DrvErrNum = FIFO_FLUSH_BADCNT; } else { // Let's assume that we really flush the FIFO pcmcia->PktIntfErr++; return; } } else { info->DrvErrNum = FIFO_FLUSH_MAXLIMIT; } return; } tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT); } while ((tempword & 0x03) != 0x03); if (info->AsicID == ELECTRABUZZ_ID) { i++; DEBUG(0, "Flushing FIFO complete = %x\n", tempword); // Flush last word in FIFO. tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); // Update FIFO counter for DSP i = i * 2; DEBUG(0, "Flush Data byte count to dsp = %d\n", i); info->fifo_cnt += i; ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt); } else { DEBUG(0, "Flushing FIFO complete = %x\n", tempword); // Flush last word in FIFO templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT); DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword); tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR); DEBUG(0, "FT1000_REG_MAG_DFSR = 0x%x\n", tempword); } if (DrvErrNum) { pcmcia->PktIntfErr++; } } } //--------------------------------------------------------------------------- // // Function: ft1000_copy_up_pkt // Description: This function will pull Flarion packets out of the Downlink // FIFO and convert it to an ethernet packet. The ethernet packet will // then be deliver to the TCP/IP stack. // Input: // dev - device structure // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_copy_up_pkt(struct net_device *dev) { u16 tempword; struct ft1000_info *info = netdev_priv(dev); u16 len; struct sk_buff *skb; u16 i; u8 *pbuffer = NULL; u8 *ptemp = NULL; u16 chksum; u32 *ptemplong; u32 templong; DEBUG(1, "ft1000_copy_up_pkt\n"); // Read length if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); len = tempword; } else { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL); len = ntohs(tempword); } chksum = tempword; DEBUG(1, "Number of Bytes in FIFO = %d\n", len); if (len > ENET_MAX_SIZE) { DEBUG(0, "size of ethernet packet invalid\n"); if (info->AsicID == MAGNEMITE_ID) { // Read High word to complete 32 bit access tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); } ft1000_flush_fifo(dev, DSP_PKTLEN_INFO); info->stats.rx_errors++; return FAILURE; } skb = dev_alloc_skb(len + 12 + 2); if (skb == NULL) { DEBUG(0, "No Network buffers available\n"); // Read High word to complete 32 bit access if (info->AsicID == MAGNEMITE_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); } ft1000_flush_fifo(dev, 0); info->stats.rx_errors++; return FAILURE; } pbuffer = (u8 *) skb_put(skb, len + 12); // Pseudo header if (info->AsicID == ELECTRABUZZ_ID) { for (i = 1; i < 7; i++) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); chksum ^= tempword; } // read checksum value tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); } else { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL); DEBUG(1, "Pseudo = 0x%x\n", tempword); chksum ^= tempword; // read checksum value tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH); DEBUG(1, "Pseudo = 0x%x\n", tempword); } if (chksum != tempword) { DEBUG(0, "Packet checksum mismatch 0x%x 0x%x\n", chksum, tempword); ft1000_flush_fifo(dev, DSP_PKTPHCKSUM_INFO); info->stats.rx_errors++; kfree_skb(skb); return FAILURE; } //subtract the number of bytes read already ptemp = pbuffer; // fake MAC address *pbuffer++ = dev->dev_addr[0]; *pbuffer++ = dev->dev_addr[1]; *pbuffer++ = dev->dev_addr[2]; *pbuffer++ = dev->dev_addr[3]; *pbuffer++ = dev->dev_addr[4]; *pbuffer++ = dev->dev_addr[5]; *pbuffer++ = 0x00; *pbuffer++ = 0x07; *pbuffer++ = 0x35; *pbuffer++ = 0xff; *pbuffer++ = 0xff; *pbuffer++ = 0xfe; if (info->AsicID == ELECTRABUZZ_ID) { for (i = 0; i < len / 2; i++) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); *pbuffer++ = (u8) (tempword >> 8); *pbuffer++ = (u8) tempword; if (ft1000_chkcard(dev) == false) { kfree_skb(skb); return FAILURE; } } // Need to read one more word if odd byte if (len & 0x0001) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO); *pbuffer++ = (u8) (tempword >> 8); } } else { ptemplong = (u32 *) pbuffer; for (i = 0; i < len / 4; i++) { templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); DEBUG(1, "Data = 0x%8x\n", templong); *ptemplong++ = templong; } // Need to read one more word if odd align. if (len & 0x0003) { templong = inl(dev->base_addr + FT1000_REG_MAG_DFR); DEBUG(1, "Data = 0x%8x\n", templong); *ptemplong++ = templong; } } DEBUG(1, "Data passed to Protocol layer:\n"); for (i = 0; i < len + 12; i++) { DEBUG(1, "Protocol Data: 0x%x\n ", *ptemp++); } skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx(skb); info->stats.rx_packets++; // Add on 12 bytes for MAC address which was removed info->stats.rx_bytes += (len + 12); if (info->AsicID == ELECTRABUZZ_ID) { // track how many bytes have been read from FIFO - round up to 16 bit word tempword = len + 16; if (tempword & 0x01) tempword++; info->fifo_cnt += tempword; ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_FIFO_LEN); ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, info->fifo_cnt); } return SUCCESS; } //--------------------------------------------------------------------------- // // Function: ft1000_copy_down_pkt // Description: This function will take an ethernet packet and convert it to // a Flarion packet prior to sending it to the ASIC Downlink // FIFO. // Input: // dev - device structure // packet - address of ethernet packet // len - length of IP packet // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len) { struct ft1000_info *info = netdev_priv(dev); struct ft1000_pcmcia *pcmcia = info->priv; union { struct pseudo_hdr blk; u16 buff[sizeof(struct pseudo_hdr) >> 1]; u8 buffc[sizeof(struct pseudo_hdr)]; } pseudo; int i; u32 *plong; DEBUG(1, "ft1000_hw: copy_down_pkt()\n"); // Check if there is room on the FIFO if (len > ft1000_read_fifo_len(dev)) { udelay(10); if (len > ft1000_read_fifo_len(dev)) { udelay(20); } if (len > ft1000_read_fifo_len(dev)) { udelay(20); } if (len > ft1000_read_fifo_len(dev)) { udelay(20); } if (len > ft1000_read_fifo_len(dev)) { udelay(20); } if (len > ft1000_read_fifo_len(dev)) { udelay(20); } if (len > ft1000_read_fifo_len(dev)) { DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:Transmit FIFO is fulli - pkt drop\n"); info->stats.tx_errors++; return SUCCESS; } } // Create pseudo header and send pseudo/ip to hardware if (info->AsicID == ELECTRABUZZ_ID) { pseudo.blk.length = len; } else { pseudo.blk.length = ntohs(len); } pseudo.blk.source = DSPID; // Need to swap to get in correct order pseudo.blk.destination = HOSTID; pseudo.blk.portdest = NETWORKID; // Need to swap to get in correct order pseudo.blk.portsrc = DSPAIRID; pseudo.blk.sh_str_id = 0; pseudo.blk.control = 0; pseudo.blk.rsvd1 = 0; pseudo.blk.seq_num = 0; pseudo.blk.rsvd2 = pcmcia->packetseqnum++; pseudo.blk.qos_class = 0; /* Calculate pseudo header checksum */ pseudo.blk.checksum = pseudo.buff[0]; for (i = 1; i < 7; i++) { pseudo.blk.checksum ^= pseudo.buff[i]; } // Production Mode if (info->AsicID == ELECTRABUZZ_ID) { // copy first word to UFIFO_BEG reg ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n", pseudo.buff[0]); // copy subsequent words to UFIFO_MID reg ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n", pseudo.buff[1]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[2]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 2 MID = 0x%04x\n", pseudo.buff[2]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[3]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 3 MID = 0x%04x\n", pseudo.buff[3]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[4]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 4 MID = 0x%04x\n", pseudo.buff[4]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[5]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 5 MID = 0x%04x\n", pseudo.buff[5]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[6]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 6 MID = 0x%04x\n", pseudo.buff[6]); ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[7]); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n", pseudo.buff[7]); // Write PPP type + IP Packet into Downlink FIFO for (i = 0; i < (len >> 1) - 1; i++) { ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, htons(*packet)); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n", i + 8, htons(*packet)); packet++; } // Check for odd byte if (len & 0x0001) { ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, htons(*packet)); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data MID = 0x%04x\n", htons(*packet)); packet++; ft1000_write_reg(dev, FT1000_REG_UFIFO_END, htons(*packet)); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n", i + 8, htons(*packet)); } else { ft1000_write_reg(dev, FT1000_REG_UFIFO_END, htons(*packet)); DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n", i + 8, htons(*packet)); } } else { outl(*(u32 *) & pseudo.buff[0], dev->base_addr + FT1000_REG_MAG_UFDR); DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n", *(u32 *) & pseudo.buff[0]); outl(*(u32 *) & pseudo.buff[2], dev->base_addr + FT1000_REG_MAG_UFDR); DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n", *(u32 *) & pseudo.buff[2]); outl(*(u32 *) & pseudo.buff[4], dev->base_addr + FT1000_REG_MAG_UFDR); DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n", *(u32 *) & pseudo.buff[4]); outl(*(u32 *) & pseudo.buff[6], dev->base_addr + FT1000_REG_MAG_UFDR); DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n", *(u32 *) & pseudo.buff[6]); plong = (u32 *) packet; // Write PPP type + IP Packet into Downlink FIFO for (i = 0; i < (len >> 2); i++) { outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR); } // Check for odd alignment if (len & 0x0003) { DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n", *plong); outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR); } outl(1, dev->base_addr + FT1000_REG_MAG_UFER); } info->stats.tx_packets++; // Add 14 bytes for MAC address plus ethernet type info->stats.tx_bytes += (len + 14); return SUCCESS; } static struct net_device_stats *ft1000_stats(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); return (&info->stats); } static int ft1000_open(struct net_device *dev) { DEBUG(0, "ft1000_hw: ft1000_open is called\n"); ft1000_reset_card(dev); DEBUG(0, "ft1000_hw: ft1000_open is ended\n"); /* schedule ft1000_hbchk to perform periodic heartbeat checks on DSP and ASIC */ init_timer(&poll_timer); poll_timer.expires = jiffies + (2 * HZ); poll_timer.data = (u_long) dev; add_timer(&poll_timer); DEBUG(0, "ft1000_hw: ft1000_open is ended2\n"); return 0; } static int ft1000_close(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); DEBUG(0, "ft1000_hw: ft1000_close()\n"); info->CardReady = 0; del_timer(&poll_timer); if (ft1000_card_present == 1) { DEBUG(0, "Media is down\n"); netif_stop_queue(dev); ft1000_disable_interrupts(dev); ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT); //reset ASIC ft1000_reset_asic(dev); } return 0; } static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); u8 *pdata; DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n"); if (skb == NULL) { DEBUG(1, "ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n"); return 0; } DEBUG(1, "ft1000_hw: ft1000_start_xmit:length of packet = %d\n", skb->len); pdata = (u8 *) skb->data; if (info->mediastate == 0) { /* Drop packet is mediastate is down */ DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:mediastate is down\n"); return SUCCESS; } if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) { /* Drop packet which has invalid size */ DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:invalid ethernet length\n"); return SUCCESS; } ft1000_copy_down_pkt(dev, (u16 *) (pdata + ENET_HEADER_SIZE - 2), skb->len - ENET_HEADER_SIZE + 2); dev_kfree_skb(skb); return 0; } static irqreturn_t ft1000_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct ft1000_info *info = netdev_priv(dev); u16 tempword; u16 inttype; int cnt; DEBUG(1, "ft1000_hw: ft1000_interrupt()\n"); if (info->CardReady == 0) { ft1000_disable_interrupts(dev); return IRQ_HANDLED; } if (ft1000_chkcard(dev) == false) { ft1000_disable_interrupts(dev); return IRQ_HANDLED; } ft1000_disable_interrupts(dev); // Read interrupt type inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); // Make sure we process all interrupt before leaving the ISR due to the edge trigger interrupt type while (inttype) { if (inttype & ISR_DOORBELL_PEND) ft1000_parse_dpram_msg(dev); if (inttype & ISR_RCV) { DEBUG(1, "Data in FIFO\n"); cnt = 0; do { // Check if we have packets in the Downlink FIFO if (info->AsicID == ELECTRABUZZ_ID) { tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO_STAT); } else { tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFSR); } if (tempword & 0x1f) { ft1000_copy_up_pkt(dev); } else { break; } cnt++; } while (cnt < MAX_RCV_LOOP); } // clear interrupts tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR); DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword); ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword); // Read interrupt type inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR); DEBUG(1,"ft1000_hw: interrupt status register after clear = 0x%x\n",inttype); } ft1000_enable_interrupts(dev); return IRQ_HANDLED; } void stop_ft1000_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); struct prov_record *ptr; // int cnt; DEBUG(0, "ft1000_hw: stop_ft1000_card()\n"); info->CardReady = 0; ft1000_card_present = 0; netif_stop_queue(dev); ft1000_disable_interrupts(dev); // Make sure we free any memory reserve for provisioning while (list_empty(&info->prov_list) == 0) { ptr = list_entry(info->prov_list.next, struct prov_record, list); list_del(&ptr->list); kfree(ptr->pprov_data); kfree(ptr); } kfree(info->priv); if (info->registered) { unregister_netdev(dev); info->registered = 0; } free_irq(dev->irq, dev); release_region(dev->base_addr,256); release_firmware(fw_entry); flarion_ft1000_cnt--; ft1000CleanupProc(dev); } static void ft1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ft1000_info *ft_info; ft_info = netdev_priv(dev); strlcpy(info->driver, "ft1000", sizeof(info->driver)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d.%d", ft_info->DspVer[0], ft_info->DspVer[1], ft_info->DspVer[2], ft_info->DspVer[3]); } static u32 ft1000_get_link(struct net_device *dev) { struct ft1000_info *info; info = netdev_priv(dev); return info->mediastate; } static const struct ethtool_ops ops = { .get_drvinfo = ft1000_get_drvinfo, .get_link = ft1000_get_link }; struct net_device *init_ft1000_card(struct pcmcia_device *link, void *ft1000_reset) { struct ft1000_info *info; struct ft1000_pcmcia *pcmcia; struct net_device *dev; static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes { .ndo_open = &ft1000_open, .ndo_stop = &ft1000_close, .ndo_start_xmit = &ft1000_start_xmit, .ndo_get_stats = &ft1000_stats, }; DEBUG(1, "ft1000_hw: init_ft1000_card()\n"); DEBUG(1, "ft1000_hw: irq = %d\n", link->irq); DEBUG(1, "ft1000_hw: port = 0x%04x\n", link->resource[0]->start); flarion_ft1000_cnt++; if (flarion_ft1000_cnt > 1) { flarion_ft1000_cnt--; printk(KERN_INFO "ft1000: This driver can not support more than one instance\n"); return NULL; } dev = alloc_etherdev(sizeof(struct ft1000_info)); if (!dev) { printk(KERN_ERR "ft1000: failed to allocate etherdev\n"); return NULL; } SET_NETDEV_DEV(dev, &link->dev); info = netdev_priv(dev); memset(info, 0, sizeof(struct ft1000_info)); DEBUG(1, "address of dev = 0x%8x\n", (u32) dev); DEBUG(1, "address of dev info = 0x%8x\n", (u32) info); DEBUG(0, "device name = %s\n", dev->name); memset(&info->stats, 0, sizeof(struct net_device_stats)); info->priv = kzalloc(sizeof(struct ft1000_pcmcia), GFP_KERNEL); pcmcia = info->priv; pcmcia->link = link; spin_lock_init(&info->dpram_lock); info->DrvErrNum = 0; info->registered = 1; info->ft1000_reset = ft1000_reset; info->mediastate = 0; info->fifo_cnt = 0; info->CardReady = 0; info->DSP_TIME[0] = 0; info->DSP_TIME[1] = 0; info->DSP_TIME[2] = 0; info->DSP_TIME[3] = 0; flarion_ft1000_cnt = 0; INIT_LIST_HEAD(&info->prov_list); info->squeseqnum = 0; // dev->hard_start_xmit = &ft1000_start_xmit; // dev->get_stats = &ft1000_stats; // dev->open = &ft1000_open; // dev->stop = &ft1000_close; dev->netdev_ops = &ft1000ops; // Slavius 21.10.2009 due to kernel changes DEBUG(0, "device name = %s\n", dev->name); dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (pcmcia_get_mac_from_cis(link, dev)) { printk(KERN_ERR "ft1000: Could not read mac address\n"); goto err_dev; } if (request_irq(dev->irq, ft1000_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_ERR "ft1000: Could not request_irq\n"); goto err_dev; } if (request_region(dev->base_addr, 256, dev->name) == NULL) { printk(KERN_ERR "ft1000: Could not request_region\n"); goto err_irq; } if (register_netdev(dev) != 0) { DEBUG(0, "ft1000: Could not register netdev"); goto err_reg; } info->AsicID = ft1000_read_reg(dev, FT1000_REG_ASIC_ID); if (info->AsicID == ELECTRABUZZ_ID) { DEBUG(0, "ft1000_hw: ELECTRABUZZ ASIC\n"); if (request_firmware(&fw_entry, "ft1000.img", &link->dev) != 0) { printk(KERN_INFO "ft1000: Could not open ft1000.img\n"); goto err_unreg; } } else { DEBUG(0, "ft1000_hw: MAGNEMITE ASIC\n"); if (request_firmware(&fw_entry, "ft2000.img", &link->dev) != 0) { printk(KERN_INFO "ft1000: Could not open ft2000.img\n"); goto err_unreg; } } ft1000_enable_interrupts(dev); ft1000InitProc(dev); ft1000_card_present = 1; SET_ETHTOOL_OPS(dev, &ops); printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr); return dev; err_unreg: unregister_netdev(dev); err_reg: release_region(dev->base_addr, 256); err_irq: free_irq(dev->irq, dev); err_dev: free_netdev(dev); return NULL; }
gpl-2.0
mastero9017/kernel_n5_racer
drivers/spi/spi-omap-uwire.c
4933
13572
/* * MicroWire interface driver for OMAP * * Copyright 2003 MontaVista Software Inc. <source@mvista.com> * * Ported to 2.6 OMAP uwire interface. * Copyright (C) 2004 Texas Instruments. * * Generalization patches by Juha Yrjola <juha.yrjola@nokia.com> * * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface) * Copyright (C) 2006 Nokia * * Many updates by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/module.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/mach-types.h> #include <plat/mux.h> #include <plat/omap7xx.h> /* OMAP7XX_IO_CONF registers */ /* FIXME address is now a platform device resource, * and irqs should show there too... */ #define UWIRE_BASE_PHYS 0xFFFB3000 /* uWire Registers: */ #define UWIRE_IO_SIZE 0x20 #define UWIRE_TDR 0x00 #define UWIRE_RDR 0x00 #define UWIRE_CSR 0x01 #define UWIRE_SR1 0x02 #define UWIRE_SR2 0x03 #define UWIRE_SR3 0x04 #define UWIRE_SR4 0x05 #define UWIRE_SR5 0x06 /* CSR bits */ #define RDRB (1 << 15) #define CSRB (1 << 14) #define START (1 << 13) #define CS_CMD (1 << 12) /* SR1 or SR2 bits */ #define UWIRE_READ_FALLING_EDGE 0x0001 #define UWIRE_READ_RISING_EDGE 0x0000 #define UWIRE_WRITE_FALLING_EDGE 0x0000 #define UWIRE_WRITE_RISING_EDGE 0x0002 #define UWIRE_CS_ACTIVE_LOW 0x0000 #define UWIRE_CS_ACTIVE_HIGH 0x0004 #define UWIRE_FREQ_DIV_2 0x0000 #define UWIRE_FREQ_DIV_4 0x0008 #define UWIRE_FREQ_DIV_8 0x0010 #define UWIRE_CHK_READY 0x0020 #define UWIRE_CLK_INVERTED 0x0040 struct uwire_spi { struct spi_bitbang bitbang; struct clk *ck; }; struct uwire_state { unsigned bits_per_word; unsigned div1_idx; }; /* REVISIT compile time constant for idx_shift? */ /* * Or, put it in a structure which is used throughout the driver; * that avoids having to issue two loads for each bit of static data. */ static unsigned int uwire_idx_shift; static void __iomem *uwire_base; static inline void uwire_write_reg(int idx, u16 val) { __raw_writew(val, uwire_base + (idx << uwire_idx_shift)); } static inline u16 uwire_read_reg(int idx) { return __raw_readw(uwire_base + (idx << uwire_idx_shift)); } static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) { u16 w, val = 0; int shift, reg; if (flags & UWIRE_CLK_INVERTED) val ^= 0x03; val = flags & 0x3f; if (cs & 1) shift = 6; else shift = 0; if (cs <= 1) reg = UWIRE_SR1; else reg = UWIRE_SR2; w = uwire_read_reg(reg); w &= ~(0x3f << shift); w |= val << shift; uwire_write_reg(reg, w); } static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch) { u16 w; int c = 0; unsigned long max_jiffies = jiffies + HZ; for (;;) { w = uwire_read_reg(UWIRE_CSR); if ((w & mask) == val) break; if (time_after(jiffies, max_jiffies)) { printk(KERN_ERR "%s: timeout. reg=%#06x " "mask=%#06x val=%#06x\n", __func__, w, mask, val); return -1; } c++; if (might_not_catch && c > 64) break; } return 0; } static void uwire_set_clk1_div(int div1_idx) { u16 w; w = uwire_read_reg(UWIRE_SR3); w &= ~(0x03 << 1); w |= div1_idx << 1; uwire_write_reg(UWIRE_SR3, w); } static void uwire_chipselect(struct spi_device *spi, int value) { struct uwire_state *ust = spi->controller_state; u16 w; int old_cs; BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0)); w = uwire_read_reg(UWIRE_CSR); old_cs = (w >> 10) & 0x03; if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) { /* Deselect this CS, or the previous CS */ w &= ~CS_CMD; uwire_write_reg(UWIRE_CSR, w); } /* activate specfied chipselect */ if (value == BITBANG_CS_ACTIVE) { uwire_set_clk1_div(ust->div1_idx); /* invert clock? */ if (spi->mode & SPI_CPOL) uwire_write_reg(UWIRE_SR4, 1); else uwire_write_reg(UWIRE_SR4, 0); w = spi->chip_select << 10; w |= CS_CMD; uwire_write_reg(UWIRE_CSR, w); } } static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; unsigned len = t->len; unsigned bits = ust->bits_per_word; unsigned bytes; u16 val, w; int status = 0; if (!t->tx_buf && !t->rx_buf) return 0; /* Microwire doesn't read and write concurrently */ if (t->tx_buf && t->rx_buf) return -EPERM; w = spi->chip_select << 10; w |= CS_CMD; if (t->tx_buf) { const u8 *buf = t->tx_buf; /* NOTE: DMA could be used for TX transfers */ /* write one or two bytes at a time */ while (len >= 1) { /* tx bit 15 is first sent; we byteswap multibyte words * (msb-first) on the way out from memory. */ val = *buf++; if (bits > 8) { bytes = 2; val |= *buf++ << 8; } else bytes = 1; val <<= 16 - bits; #ifdef VERBOSE pr_debug("%s: write-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; uwire_write_reg(UWIRE_TDR, val); /* start write */ val = START | w | (bits << 5); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till write actually starts. * This is needed with MPU clock 60+ MHz. * REVISIT: we may not have time to catch it... */ if (wait_uwire_csr_flag(CSRB, CSRB, 1)) goto eio; status += bytes; } /* REVISIT: save this for later to get more i/o overlap */ if (wait_uwire_csr_flag(CSRB, 0, 0)) goto eio; } else if (t->rx_buf) { u8 *buf = t->rx_buf; /* read one or two bytes at a time */ while (len) { if (bits > 8) { bytes = 2; } else bytes = 1; /* start read */ val = START | w | (bits << 0); uwire_write_reg(UWIRE_CSR, val); len -= bytes; /* Wait till read actually starts */ (void) wait_uwire_csr_flag(CSRB, CSRB, 1); if (wait_uwire_csr_flag(RDRB | CSRB, RDRB, 0)) goto eio; /* rx bit 0 is last received; multibyte words will * be properly byteswapped on the way to memory. */ val = uwire_read_reg(UWIRE_RDR); val &= (1 << bits) - 1; *buf++ = (u8) val; if (bytes == 2) *buf++ = val >> 8; status += bytes; #ifdef VERBOSE pr_debug("%s: read-%d =%04x\n", dev_name(&spi->dev), bits, val); #endif } } return status; eio: return -EIO; } static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct uwire_state *ust = spi->controller_state; struct uwire_spi *uwire; unsigned flags = 0; unsigned bits; unsigned hz; unsigned long rate; int div1_idx; int div1; int div2; int status; uwire = spi_master_get_devdata(spi->master); if (spi->chip_select > 3) { pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select); status = -ENODEV; goto done; } bits = spi->bits_per_word; if (t != NULL && t->bits_per_word) bits = t->bits_per_word; if (bits > 16) { pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); status = -ENODEV; goto done; } ust->bits_per_word = bits; /* mode 0..3, clock inverted separately; * standard nCS signaling; * don't treat DI=high as "not ready" */ if (spi->mode & SPI_CS_HIGH) flags |= UWIRE_CS_ACTIVE_HIGH; if (spi->mode & SPI_CPOL) flags |= UWIRE_CLK_INVERTED; switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { case SPI_MODE_0: case SPI_MODE_3: flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE; break; case SPI_MODE_1: case SPI_MODE_2: flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE; break; } /* assume it's already enabled */ rate = clk_get_rate(uwire->ck); hz = spi->max_speed_hz; if (t != NULL && t->speed_hz) hz = t->speed_hz; if (!hz) { pr_debug("%s: zero speed?\n", dev_name(&spi->dev)); status = -EINVAL; goto done; } /* F_INT = mpu_xor_clk / DIV1 */ for (div1_idx = 0; div1_idx < 4; div1_idx++) { switch (div1_idx) { case 0: div1 = 2; break; case 1: div1 = 4; break; case 2: div1 = 7; break; default: case 3: div1 = 10; break; } div2 = (rate / div1 + hz - 1) / hz; if (div2 <= 8) break; } if (div1_idx == 4) { pr_debug("%s: lowest clock %ld, need %d\n", dev_name(&spi->dev), rate / 10 / 8, hz); status = -EDOM; goto done; } /* we have to cache this and reset in uwire_chipselect as this is a * global parameter and another uwire device can change it under * us */ ust->div1_idx = div1_idx; uwire_set_clk1_div(div1_idx); rate /= div1; switch (div2) { case 0: case 1: case 2: flags |= UWIRE_FREQ_DIV_2; rate /= 2; break; case 3: case 4: flags |= UWIRE_FREQ_DIV_4; rate /= 4; break; case 5: case 6: case 7: case 8: flags |= UWIRE_FREQ_DIV_8; rate /= 8; break; } omap_uwire_configure_mode(spi->chip_select, flags); pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n", __func__, flags, clk_get_rate(uwire->ck) / 1000, rate / 1000); status = 0; done: return status; } static int uwire_setup(struct spi_device *spi) { struct uwire_state *ust = spi->controller_state; if (ust == NULL) { ust = kzalloc(sizeof(*ust), GFP_KERNEL); if (ust == NULL) return -ENOMEM; spi->controller_state = ust; } return uwire_setup_transfer(spi, NULL); } static void uwire_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } static void uwire_off(struct uwire_spi *uwire) { uwire_write_reg(UWIRE_SR3, 0); clk_disable(uwire->ck); clk_put(uwire->ck); spi_master_put(uwire->bitbang.master); } static int __init uwire_probe(struct platform_device *pdev) { struct spi_master *master; struct uwire_spi *uwire; int status; master = spi_alloc_master(&pdev->dev, sizeof *uwire); if (!master) return -ENODEV; uwire = spi_master_get_devdata(master); uwire_base = ioremap(UWIRE_BASE_PHYS, UWIRE_IO_SIZE); if (!uwire_base) { dev_dbg(&pdev->dev, "can't ioremap UWIRE\n"); spi_master_put(master); return -ENOMEM; } dev_set_drvdata(&pdev->dev, uwire); uwire->ck = clk_get(&pdev->dev, "fck"); if (IS_ERR(uwire->ck)) { status = PTR_ERR(uwire->ck); dev_dbg(&pdev->dev, "no functional clock?\n"); spi_master_put(master); return status; } clk_enable(uwire->ck); if (cpu_is_omap7xx()) uwire_idx_shift = 1; else uwire_idx_shift = 2; uwire_write_reg(UWIRE_SR3, 1); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->flags = SPI_MASTER_HALF_DUPLEX; master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; master->cleanup = uwire_cleanup; uwire->bitbang.master = master; uwire->bitbang.chipselect = uwire_chipselect; uwire->bitbang.setup_transfer = uwire_setup_transfer; uwire->bitbang.txrx_bufs = uwire_txrx; status = spi_bitbang_start(&uwire->bitbang); if (status < 0) { uwire_off(uwire); iounmap(uwire_base); } return status; } static int __exit uwire_remove(struct platform_device *pdev) { struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); int status; // FIXME remove all child devices, somewhere ... status = spi_bitbang_stop(&uwire->bitbang); uwire_off(uwire); iounmap(uwire_base); return status; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap_uwire"); static struct platform_driver uwire_driver = { .driver = { .name = "omap_uwire", .owner = THIS_MODULE, }, .remove = __exit_p(uwire_remove), // suspend ... unuse ck // resume ... use ck }; static int __init omap_uwire_init(void) { /* FIXME move these into the relevant board init code. also, include * H3 support; it uses tsc2101 like H2 (on a different chipselect). */ if (machine_is_omap_h2()) { /* defaults: W21 SDO, U18 SDI, V19 SCL */ omap_cfg_reg(N14_1610_UWIRE_CS0); omap_cfg_reg(N15_1610_UWIRE_CS1); } if (machine_is_omap_perseus2()) { /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); } return platform_driver_probe(&uwire_driver, uwire_probe); } static void __exit omap_uwire_exit(void) { platform_driver_unregister(&uwire_driver); } subsys_initcall(omap_uwire_init); module_exit(omap_uwire_exit); MODULE_LICENSE("GPL");
gpl-2.0
omerjerk/CodyKernel-hammerhead
sound/pci/mixart/mixart_hwdep.c
5189
19600
/* * Driver for Digigram miXart soundcards * * DSP firmware management * * Copyright (c) 2003 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/io.h> #include <sound/core.h> #include "mixart.h" #include "mixart_mixer.h" #include "mixart_core.h" #include "mixart_hwdep.h" /** * wait for a value on a peudo register, exit with a timeout * * @param mgr pointer to miXart manager structure * @param offset unsigned pseudo_register base + offset of value * @param value value * @param timeout timeout in centisenconds */ static int mixart_wait_nice_for_register_value(struct mixart_mgr *mgr, u32 offset, int is_egal, u32 value, unsigned long timeout) { unsigned long end_time = jiffies + (timeout * HZ / 100); u32 read; do { /* we may take too long time in this loop. * so give controls back to kernel if needed. */ cond_resched(); read = readl_be( MIXART_MEM( mgr, offset )); if(is_egal) { if(read == value) return 0; } else { /* wait for different value */ if(read != value) return 0; } } while ( time_after_eq(end_time, jiffies) ); return -EBUSY; } /* structures needed to upload elf code packets */ struct snd_mixart_elf32_ehdr { u8 e_ident[16]; u16 e_type; u16 e_machine; u32 e_version; u32 e_entry; u32 e_phoff; u32 e_shoff; u32 e_flags; u16 e_ehsize; u16 e_phentsize; u16 e_phnum; u16 e_shentsize; u16 e_shnum; u16 e_shstrndx; }; struct snd_mixart_elf32_phdr { u32 p_type; u32 p_offset; u32 p_vaddr; u32 p_paddr; u32 p_filesz; u32 p_memsz; u32 p_flags; u32 p_align; }; static int mixart_load_elf(struct mixart_mgr *mgr, const struct firmware *dsp ) { char elf32_magic_number[4] = {0x7f,'E','L','F'}; struct snd_mixart_elf32_ehdr *elf_header; int i; elf_header = (struct snd_mixart_elf32_ehdr *)dsp->data; for( i=0; i<4; i++ ) if ( elf32_magic_number[i] != elf_header->e_ident[i] ) return -EINVAL; if( elf_header->e_phoff != 0 ) { struct snd_mixart_elf32_phdr elf_programheader; for( i=0; i < be16_to_cpu(elf_header->e_phnum); i++ ) { u32 pos = be32_to_cpu(elf_header->e_phoff) + (u32)(i * be16_to_cpu(elf_header->e_phentsize)); memcpy( &elf_programheader, dsp->data + pos, sizeof(elf_programheader) ); if(elf_programheader.p_type != 0) { if( elf_programheader.p_filesz != 0 ) { memcpy_toio( MIXART_MEM( mgr, be32_to_cpu(elf_programheader.p_vaddr)), dsp->data + be32_to_cpu( elf_programheader.p_offset ), be32_to_cpu( elf_programheader.p_filesz )); } } } } return 0; } /* * get basic information and init miXart */ /* audio IDs for request to the board */ #define MIXART_FIRST_ANA_AUDIO_ID 0 #define MIXART_FIRST_DIG_AUDIO_ID 8 static int mixart_enum_connectors(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_enum_connector_resp *connector; struct mixart_audio_info_req *audio_info_req; struct mixart_audio_info_resp *audio_info; connector = kmalloc(sizeof(*connector), GFP_KERNEL); audio_info_req = kmalloc(sizeof(*audio_info_req), GFP_KERNEL); audio_info = kmalloc(sizeof(*audio_info), GFP_KERNEL); if (! connector || ! audio_info_req || ! audio_info) { err = -ENOMEM; goto __error; } audio_info_req->line_max_level = MIXART_FLOAT_P_22_0_TO_HEX; audio_info_req->micro_max_level = MIXART_FLOAT_M_20_0_TO_HEX; audio_info_req->cd_max_level = MIXART_FLOAT____0_0_TO_HEX; request.message_id = MSG_SYSTEM_ENUM_PLAY_CONNECTOR; request.uid = (struct mixart_uid){0,0}; /* board num = 0 */ request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PLAY_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_out_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_out_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; /* odd */ } else { pipe->uid_left_connector = connector->uid[k]; /* even */ } /* snd_printk(KERN_DEBUG "playback connector[%d].object_id = %x\n", k, connector->uid[k].object_id); */ /* TODO: really need send_msg MSG_CONNECTOR_GET_AUDIO_INFO for each connector ? perhaps for analog level caps ? */ request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } /*snd_printk(KERN_DEBUG "play analog_info.analog_level_present = %x\n", audio_info->info.analog_info.analog_level_present);*/ } request.message_id = MSG_SYSTEM_ENUM_RECORD_CONNECTOR; request.uid = (struct mixart_uid){0,0}; /* board num = 0 */ request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_RECORD_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_in_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_in_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; /* odd */ } else { pipe->uid_left_connector = connector->uid[k]; /* even */ } /* snd_printk(KERN_DEBUG "capture connector[%d].object_id = %x\n", k, connector->uid[k].object_id); */ /* TODO: really need send_msg MSG_CONNECTOR_GET_AUDIO_INFO for each connector ? perhaps for analog level caps ? */ request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } /*snd_printk(KERN_DEBUG "rec analog_info.analog_level_present = %x\n", audio_info->info.analog_info.analog_level_present);*/ } err = 0; __error: kfree(connector); kfree(audio_info_req); kfree(audio_info); return err; } static int mixart_enum_physio(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_uid get_console_mgr; struct mixart_return_uid console_mgr; struct mixart_uid_enumeration phys_io; /* get the uid for the console manager */ get_console_mgr.object_id = 0; get_console_mgr.desc = MSG_CONSOLE_MANAGER | 0; /* cardindex = 0 */ request.message_id = MSG_CONSOLE_GET_CLOCK_UID; request.uid = get_console_mgr; request.data = &get_console_mgr; request.size = sizeof(get_console_mgr); err = snd_mixart_send_msg(mgr, &request, sizeof(console_mgr), &console_mgr); if( (err < 0) || (console_mgr.error_code != 0) ) { snd_printk(KERN_DEBUG "error MSG_CONSOLE_GET_CLOCK_UID : err=%x\n", console_mgr.error_code); return -EINVAL; } /* used later for clock issues ! */ mgr->uid_console_manager = console_mgr.uid; request.message_id = MSG_SYSTEM_ENUM_PHYSICAL_IO; request.uid = (struct mixart_uid){0,0}; request.data = &console_mgr.uid; request.size = sizeof(console_mgr.uid); err = snd_mixart_send_msg(mgr, &request, sizeof(phys_io), &phys_io); if( (err < 0) || ( phys_io.error_code != 0 ) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PHYSICAL_IO err(%x) error_code(%x)\n", err, phys_io.error_code ); return -EINVAL; } /* min 2 phys io per card (analog in + analog out) */ if (phys_io.nb_uid < MIXART_MAX_CARDS * 2) return -EINVAL; for(k=0; k<mgr->num_cards; k++) { mgr->chip[k]->uid_in_analog_physio = phys_io.uid[k]; mgr->chip[k]->uid_out_analog_physio = phys_io.uid[phys_io.nb_uid/2 + k]; } return 0; } static int mixart_first_init(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; if((err = mixart_enum_connectors(mgr)) < 0) return err; if((err = mixart_enum_physio(mgr)) < 0) return err; /* send a synchro command to card (necessary to do this before first MSG_STREAM_START_STREAM_GRP_PACKET) */ /* though why not here */ request.message_id = MSG_SYSTEM_SEND_SYNCHRO_CMD; request.uid = (struct mixart_uid){0,0}; request.data = NULL; request.size = 0; /* this command has no data. response is a 32 bit status */ err = snd_mixart_send_msg(mgr, &request, sizeof(k), &k); if( (err < 0) || (k != 0) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_SEND_SYNCHRO_CMD\n"); return err == 0 ? -EINVAL : err; } return 0; } /* firmware base addresses (when hard coded) */ #define MIXART_MOTHERBOARD_XLX_BASE_ADDRESS 0x00600000 static int mixart_dsp_load(struct mixart_mgr* mgr, int index, const struct firmware *dsp) { int err, card_index; u32 status_xilinx, status_elf, status_daught; u32 val; /* read motherboard xilinx status */ status_xilinx = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* read elf status */ status_elf = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* read daughterboard xilinx status */ status_daught = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* motherboard xilinx status 5 will say that the board is performing a reset */ if (status_xilinx == 5) { snd_printk(KERN_ERR "miXart is resetting !\n"); return -EAGAIN; /* try again later */ } switch (index) { case MIXART_MOTHERBOARD_XLX_INDEX: /* xilinx already loaded ? */ if (status_xilinx == 4) { snd_printk(KERN_DEBUG "xilinx is already loaded !\n"); return 0; } /* the status should be 0 == "idle" */ if (status_xilinx != 0) { snd_printk(KERN_ERR "xilinx load error ! status = %d\n", status_xilinx); return -EIO; /* modprob -r may help ? */ } /* check xilinx validity */ if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; /* set xilinx status to copying */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* setup xilinx base address */ writel_be( MIXART_MOTHERBOARD_XLX_BASE_ADDRESS, MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_BASE_ADDR_OFFSET )); /* setup code size for xilinx file */ writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_SIZE_OFFSET )); /* copy xilinx code */ memcpy_toio( MIXART_MEM( mgr, MIXART_MOTHERBOARD_XLX_BASE_ADDRESS), dsp->data, dsp->size); /* set xilinx status to copy finished */ writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); /* return, because no further processing needed */ return 0; case MIXART_MOTHERBOARD_ELF_INDEX: if (status_elf == 4) { snd_printk(KERN_DEBUG "elf file already loaded !\n"); return 0; } /* the status should be 0 == "idle" */ if (status_elf != 0) { snd_printk(KERN_ERR "elf load error ! status = %d\n", status_elf); return -EIO; /* modprob -r may help ? */ } /* wait for xilinx status == 4 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET, 1, 4, 500); /* 5sec */ if (err < 0) { snd_printk(KERN_ERR "xilinx was not loaded or " "could not be started\n"); return err; } /* init some data on the card */ writel_be( 0, MIXART_MEM( mgr, MIXART_PSEUDOREG_BOARDNUMBER ) ); /* set miXart boardnumber to 0 */ writel_be( 0, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); /* reset pointer to flow table on miXart */ /* set elf status to copying */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* process the copying of the elf packets */ err = mixart_load_elf( mgr, dsp ); if (err < 0) return err; /* set elf status to copy finished */ writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); /* wait for elf status == 4 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET, 1, 4, 300); /* 3sec */ if (err < 0) { snd_printk(KERN_ERR "elf could not be started\n"); return err; } /* miXart waits at this point on the pointer to the flow table */ writel_be( (u32)mgr->flowinfo.addr, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); /* give pointer of flow table to miXart */ return 0; /* return, another xilinx file has to be loaded before */ case MIXART_AESEBUBOARD_XLX_INDEX: default: /* elf and xilinx should be loaded */ if (status_elf != 4 || status_xilinx != 4) { printk(KERN_ERR "xilinx or elf not " "successfully loaded\n"); return -EIO; /* modprob -r may help ? */ } /* wait for daughter detection != 0 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DBRD_PRESENCE_OFFSET, 0, 0, 30); /* 300msec */ if (err < 0) { snd_printk(KERN_ERR "error starting elf file\n"); return err; } /* the board type can now be retrieved */ mgr->board_type = (DAUGHTER_TYPE_MASK & readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DBRD_TYPE_OFFSET))); if (mgr->board_type == MIXART_DAUGHTER_TYPE_NONE) break; /* no daughter board; the file does not have to be loaded, continue after the switch */ /* only if aesebu daughter board presence (elf code must run) */ if (mgr->board_type != MIXART_DAUGHTER_TYPE_AES ) return -EINVAL; /* daughter should be idle */ if (status_daught != 0) { printk(KERN_ERR "daughter load error ! status = %d\n", status_daught); return -EIO; /* modprob -r may help ? */ } /* check daughterboard xilinx validity */ if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; /* inform mixart about the size of the file */ writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_SIZE_OFFSET )); /* set daughterboard status to 1 */ writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* wait for status == 2 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 2, 30); /* 300msec */ if (err < 0) { snd_printk(KERN_ERR "daughter board load error\n"); return err; } /* get the address where to write the file */ val = readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_BASE_ADDR_OFFSET )); if (!val) return -EINVAL; /* copy daughterboard xilinx code */ memcpy_toio( MIXART_MEM( mgr, val), dsp->data, dsp->size); /* set daughterboard status to 4 */ writel_be( 4, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); /* continue with init */ break; } /* end of switch file index*/ /* wait for daughter status == 3 */ err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 3, 300); /* 3sec */ if (err < 0) { snd_printk(KERN_ERR "daughter board could not be initialised\n"); return err; } /* init mailbox (communication with embedded) */ snd_mixart_init_mailbox(mgr); /* first communication with embedded */ err = mixart_first_init(mgr); if (err < 0) { snd_printk(KERN_ERR "miXart could not be set up\n"); return err; } /* create devices and mixer in accordance with HW options*/ for (card_index = 0; card_index < mgr->num_cards; card_index++) { struct snd_mixart *chip = mgr->chip[card_index]; if ((err = snd_mixart_create_pcm(chip)) < 0) return err; if (card_index == 0) { if ((err = snd_mixart_create_mixer(chip->mgr)) < 0) return err; } if ((err = snd_card_register(chip->card)) < 0) return err; }; snd_printdd("miXart firmware downloaded and successfully set up\n"); return 0; } #if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) #if !defined(CONFIG_USE_MIXARTLOADER) && !defined(CONFIG_SND_MIXART) /* built-in kernel */ #define SND_MIXART_FW_LOADER /* use the standard firmware loader */ #endif #endif #ifdef SND_MIXART_FW_LOADER int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { static char *fw_files[3] = { "miXart8.xlx", "miXart8.elf", "miXart8AES.xlx" }; char path[32]; const struct firmware *fw_entry; int i, err; for (i = 0; i < 3; i++) { sprintf(path, "mixart/%s", fw_files[i]); if (request_firmware(&fw_entry, path, &mgr->pci->dev)) { snd_printk(KERN_ERR "miXart: can't load firmware %s\n", path); return -ENOENT; } /* fake hwdep dsp record */ err = mixart_dsp_load(mgr, i, fw_entry); release_firmware(fw_entry); if (err < 0) return err; mgr->dsp_loaded |= 1 << i; } return 0; } MODULE_FIRMWARE("mixart/miXart8.xlx"); MODULE_FIRMWARE("mixart/miXart8.elf"); MODULE_FIRMWARE("mixart/miXart8AES.xlx"); #else /* old style firmware loading */ /* miXart hwdep interface id string */ #define SND_MIXART_HWDEP_ID "miXart Loader" static int mixart_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { struct mixart_mgr *mgr = hw->private_data; strcpy(info->id, "miXart"); info->num_dsps = MIXART_HARDW_FILES_MAX_INDEX; if (mgr->dsp_loaded & (1 << MIXART_MOTHERBOARD_ELF_INDEX)) info->chip_ready = 1; info->version = MIXART_DRIVER_VERSION; return 0; } static int mixart_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct mixart_mgr* mgr = hw->private_data; struct firmware fw; int err; fw.size = dsp->length; fw.data = vmalloc(dsp->length); if (! fw.data) { snd_printk(KERN_ERR "miXart: cannot allocate image size %d\n", (int)dsp->length); return -ENOMEM; } if (copy_from_user((void *) fw.data, dsp->image, dsp->length)) { vfree(fw.data); return -EFAULT; } err = mixart_dsp_load(mgr, dsp->index, &fw); vfree(fw.data); if (err < 0) return err; mgr->dsp_loaded |= 1 << dsp->index; return err; } int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { int err; struct snd_hwdep *hw; /* only create hwdep interface for first cardX (see "index" module parameter)*/ if ((err = snd_hwdep_new(mgr->chip[0]->card, SND_MIXART_HWDEP_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_MIXART; hw->private_data = mgr; hw->ops.dsp_status = mixart_hwdep_dsp_status; hw->ops.dsp_load = mixart_hwdep_dsp_load; hw->exclusive = 1; sprintf(hw->name, SND_MIXART_HWDEP_ID); mgr->dsp_loaded = 0; return snd_card_register(mgr->chip[0]->card); } #endif /* SND_MIXART_FW_LOADER */
gpl-2.0
AdityaSureka/android_kernel_lge_msm8974-1
arch/mips/pnx8550/common/reset.c
8005
1126
/*. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## * * Reset the PNX8550 board. * */ #include <linux/kernel.h> #include <asm/processor.h> #include <asm/reboot.h> #include <glb.h> void pnx8550_machine_restart(char *command) { PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; } void pnx8550_machine_halt(void) { while (1) { if (cpu_wait) cpu_wait(); } }
gpl-2.0
Euphoria-OS-Devices/android_kernel_lge_msm8974
net/dsa/tag_dsa.c
8005
4551
/* * net/dsa/tag_dsa.c - (Non-ethertype) DSA tagging * Copyright (c) 2008-2009 Marvell Semiconductor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/etherdevice.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/slab.h> #include "dsa_priv.h" #define DSA_HLEN 4 netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); u8 *dsa_header; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* * Convert the outermost 802.1q tag to a DSA tag for tagged * packets, or insert a DSA tag between the addresses and * the ethertype field for untagged packets. */ if (skb->protocol == htons(ETH_P_8021Q)) { if (skb_cow_head(skb, 0) < 0) goto out_free; /* * Construct tagged FROM_CPU DSA tag from 802.1q tag. */ dsa_header = skb->data + 2 * ETH_ALEN; dsa_header[0] = 0x60 | p->parent->index; dsa_header[1] = p->port << 3; /* * Move CFI field from byte 2 to byte 1. */ if (dsa_header[2] & 0x10) { dsa_header[1] |= 0x01; dsa_header[2] &= ~0x10; } } else { if (skb_cow_head(skb, DSA_HLEN) < 0) goto out_free; skb_push(skb, DSA_HLEN); memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); /* * Construct untagged FROM_CPU DSA tag. */ dsa_header = skb->data + 2 * ETH_ALEN; dsa_header[0] = 0x40 | p->parent->index; dsa_header[1] = p->port << 3; dsa_header[2] = 0x00; dsa_header[3] = 0x00; } skb->protocol = htons(ETH_P_DSA); skb->dev = p->parent->dst->master_netdev; dev_queue_xmit(skb); return NETDEV_TX_OK; out_free: kfree_skb(skb); return NETDEV_TX_OK; } static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; u8 *dsa_header; int source_device; int source_port; if (unlikely(dst == NULL)) goto out_drop; skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) goto out; if (unlikely(!pskb_may_pull(skb, DSA_HLEN))) goto out_drop; /* * The ethertype field is part of the DSA header. */ dsa_header = skb->data - 2; /* * Check that frame type is either TO_CPU or FORWARD. */ if ((dsa_header[0] & 0xc0) != 0x00 && (dsa_header[0] & 0xc0) != 0xc0) goto out_drop; /* * Determine source device and port. */ source_device = dsa_header[0] & 0x1f; source_port = (dsa_header[1] >> 3) & 0x1f; /* * Check that the source device exists and that the source * port is a registered DSA port. */ if (source_device >= dst->pd->nr_chips) goto out_drop; ds = dst->ds[source_device]; if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) goto out_drop; /* * Convert the DSA header to an 802.1q header if the 'tagged' * bit in the DSA header is set. If the 'tagged' bit is clear, * delete the DSA header entirely. */ if (dsa_header[0] & 0x20) { u8 new_header[4]; /* * Insert 802.1q ethertype and copy the VLAN-related * fields, but clear the bit that will hold CFI (since * DSA uses that bit location for another purpose). */ new_header[0] = (ETH_P_8021Q >> 8) & 0xff; new_header[1] = ETH_P_8021Q & 0xff; new_header[2] = dsa_header[2] & ~0x10; new_header[3] = dsa_header[3]; /* * Move CFI bit from its place in the DSA header to * its 802.1q-designated place. */ if (dsa_header[1] & 0x01) new_header[2] |= 0x10; /* * Update packet checksum if skb is CHECKSUM_COMPLETE. */ if (skb->ip_summed == CHECKSUM_COMPLETE) { __wsum c = skb->csum; c = csum_add(c, csum_partial(new_header + 2, 2, 0)); c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0)); skb->csum = c; } memcpy(dsa_header, new_header, DSA_HLEN); } else { /* * Remove DSA tag and update checksum. */ skb_pull_rcsum(skb, DSA_HLEN); memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - DSA_HLEN, 2 * ETH_ALEN); } skb->dev = ds->ports[source_port]; skb_push(skb, ETH_HLEN); skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); return 0; out_drop: kfree_skb(skb); out: return 0; } struct packet_type dsa_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DSA), .func = dsa_rcv, };
gpl-2.0
Michael-Pizzileo/lichee-3.0.8-leaked
net/tipc/addr.c
8005
3318
/* * net/tipc/addr.c: TIPC address utility routines * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2004-2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "addr.h" /** * tipc_addr_domain_valid - validates a network domain address * * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, * where Z, C, and N are non-zero. * * Returns 1 if domain address is valid, otherwise 0 */ int tipc_addr_domain_valid(u32 addr) { u32 n = tipc_node(addr); u32 c = tipc_cluster(addr); u32 z = tipc_zone(addr); if (n && (!z || !c)) return 0; if (c && !z) return 0; return 1; } /** * tipc_addr_node_valid - validates a proposed network address for this node * * Accepts <Z.C.N>, where Z, C, and N are non-zero. * * Returns 1 if address can be used, otherwise 0 */ int tipc_addr_node_valid(u32 addr) { return tipc_addr_domain_valid(addr) && tipc_node(addr); } int tipc_in_scope(u32 domain, u32 addr) { if (!domain || (domain == addr)) return 1; if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */ return 1; if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */ return 1; return 0; } /** * tipc_addr_scope - convert message lookup domain to a 2-bit scope value */ int tipc_addr_scope(u32 domain) { if (likely(!domain)) return TIPC_ZONE_SCOPE; if (tipc_node(domain)) return TIPC_NODE_SCOPE; if (tipc_cluster(domain)) return TIPC_CLUSTER_SCOPE; return TIPC_ZONE_SCOPE; } char *tipc_addr_string_fill(char *string, u32 addr) { snprintf(string, 16, "<%u.%u.%u>", tipc_zone(addr), tipc_cluster(addr), tipc_node(addr)); return string; }
gpl-2.0
coolshou/htc_k2u_kernel-3.4.10
net/tipc/node_subscr.c
8005
3292
/* * net/tipc/node_subscr.c: TIPC "node down" subscription handling * * Copyright (c) 1995-2006, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "node_subscr.h" #include "node.h" /** * tipc_nodesub_subscribe - create "node down" subscription for specified node */ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { if (addr == tipc_own_addr) { node_sub->node = NULL; return; } node_sub->node = tipc_node_find(addr); if (!node_sub->node) { warn("Node subscription rejected, unknown node 0x%x\n", addr); return; } node_sub->handle_node_down = handle_down; node_sub->usr_handle = usr_handle; tipc_node_lock(node_sub->node); list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); tipc_node_unlock(node_sub->node); } /** * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) */ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) { if (!node_sub->node) return; tipc_node_lock(node_sub->node); list_del_init(&node_sub->nodesub_list); tipc_node_unlock(node_sub->node); } /** * tipc_nodesub_notify - notify subscribers that a node is unreachable * * Note: node is locked by caller */ void tipc_nodesub_notify(struct tipc_node *node) { struct tipc_node_subscr *ns; list_for_each_entry(ns, &node->nsub, nodesub_list) { if (ns->handle_node_down) { tipc_k_signal((Handler)ns->handle_node_down, (unsigned long)ns->usr_handle); ns->handle_node_down = NULL; } } }
gpl-2.0
Jovy23/M919_Kernel
net/tipc/node_subscr.c
8005
3292
/* * net/tipc/node_subscr.c: TIPC "node down" subscription handling * * Copyright (c) 1995-2006, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "node_subscr.h" #include "node.h" /** * tipc_nodesub_subscribe - create "node down" subscription for specified node */ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { if (addr == tipc_own_addr) { node_sub->node = NULL; return; } node_sub->node = tipc_node_find(addr); if (!node_sub->node) { warn("Node subscription rejected, unknown node 0x%x\n", addr); return; } node_sub->handle_node_down = handle_down; node_sub->usr_handle = usr_handle; tipc_node_lock(node_sub->node); list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); tipc_node_unlock(node_sub->node); } /** * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) */ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) { if (!node_sub->node) return; tipc_node_lock(node_sub->node); list_del_init(&node_sub->nodesub_list); tipc_node_unlock(node_sub->node); } /** * tipc_nodesub_notify - notify subscribers that a node is unreachable * * Note: node is locked by caller */ void tipc_nodesub_notify(struct tipc_node *node) { struct tipc_node_subscr *ns; list_for_each_entry(ns, &node->nsub, nodesub_list) { if (ns->handle_node_down) { tipc_k_signal((Handler)ns->handle_node_down, (unsigned long)ns->usr_handle); ns->handle_node_down = NULL; } } }
gpl-2.0
Kra1o5/android_kernel_bq_curie2qc
drivers/leds/ledtrig-default-on.c
9797
1071
/* * LED Kernel Default ON Trigger * * Copyright 2008 Nick Forbes <nick.forbes@incepta.com> * * Based on Richard Purdie's ledtrig-timer.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/leds.h> #include "leds.h" static void defon_trig_activate(struct led_classdev *led_cdev) { led_set_brightness(led_cdev, led_cdev->max_brightness); } static struct led_trigger defon_led_trigger = { .name = "default-on", .activate = defon_trig_activate, }; static int __init defon_trig_init(void) { return led_trigger_register(&defon_led_trigger); } static void __exit defon_trig_exit(void) { led_trigger_unregister(&defon_led_trigger); } module_init(defon_trig_init); module_exit(defon_trig_exit); MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>"); MODULE_DESCRIPTION("Default-ON LED trigger"); MODULE_LICENSE("GPL");
gpl-2.0
arunthomas/linux
arch/parisc/kernel/topology.c
13893
1142
/* * arch/parisc/kernel/topology.c - Populate sysfs with topology information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/cache.h> static DEFINE_PER_CPU(struct cpu, cpu_devices); static int __init topology_init(void) { int num; for_each_present_cpu(num) { register_cpu(&per_cpu(cpu_devices, num), num); } return 0; } subsys_initcall(topology_init);
gpl-2.0
Filmetrics/smartinstr-linux-kernel
arch/parisc/math-emu/sfcmp.c
14149
4514
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfcmp.c $Revision: 1.1 $ * * Purpose: * sgl_cmp: compare two values * * External Interfaces: * sgl_fcmp(leftptr, rightptr, cond, status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * sgl_cmp: compare two values */ int sgl_fcmp (sgl_floating_point * leftptr, sgl_floating_point * rightptr, unsigned int cond, unsigned int *status) /* The predicate to be tested */ { register unsigned int left, right; register int xorresult; /* Create local copies of the numbers */ left = *leftptr; right = *rightptr; /* * Test for NaN */ if( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT) || (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) ) { /* Check if a NaN is involved. Signal an invalid exception when * comparing a signaling NaN or when comparing quiet NaNs and the * low bit of the condition is set */ if( ( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(left) && (Exception(cond) || Sgl_isone_signaling(left))) || ( (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(right) && (Exception(cond) || Sgl_isone_signaling(right)) ) ) { if( Is_invalidtrap_enabled() ) { Set_status_cbit(Unordered(cond)); return(INVALIDEXCEPTION); } else Set_invalidflag(); Set_status_cbit(Unordered(cond)); return(NOEXCEPTION); } /* All the exceptional conditions are handled, now special case NaN compares */ else if( ((Sgl_exponent(left) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(left)) || ((Sgl_exponent(right) == SGL_INFINITY_EXPONENT) && Sgl_isnotzero_mantissa(right)) ) { /* NaNs always compare unordered. */ Set_status_cbit(Unordered(cond)); return(NOEXCEPTION); } /* infinities will drop down to the normal compare mechanisms */ } /* First compare for unequal signs => less or greater or * special equal case */ Sgl_xortointp1(left,right,xorresult); if( xorresult < 0 ) { /* left negative => less, left positive => greater. * equal is possible if both operands are zeros. */ if( Sgl_iszero_exponentmantissa(left) && Sgl_iszero_exponentmantissa(right) ) { Set_status_cbit(Equal(cond)); } else if( Sgl_isone_sign(left) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } /* Signs are the same. Treat negative numbers separately * from the positives because of the reversed sense. */ else if( Sgl_all(left) == Sgl_all(right) ) { Set_status_cbit(Equal(cond)); } else if( Sgl_iszero_sign(left) ) { /* Positive compare */ if( Sgl_all(left) < Sgl_all(right) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } else { /* Negative compare. Signed or unsigned compares * both work the same. That distinction is only * important when the sign bits differ. */ if( Sgl_all(left) > Sgl_all(right) ) { Set_status_cbit(Lessthan(cond)); } else { Set_status_cbit(Greaterthan(cond)); } } return(NOEXCEPTION); }
gpl-2.0
MattCrystal/clucking-goose
drivers/usb/host/whci/hw.c
14661
2864
/* * Wireless Host Controller (WHC) hardware access helpers. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val) { unsigned long flags; u32 cmd; spin_lock_irqsave(&whc->lock, flags); cmd = le_readl(whc->base + WUSBCMD); cmd = (cmd & ~mask) | val; le_writel(cmd, whc->base + WUSBCMD); spin_unlock_irqrestore(&whc->lock, flags); } /** * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register * @whc: the WHCI HC * @cmd: command to start. * @params: parameters for the command (the WUSBGENCMDPARAMS register value). * @addr: pointer to any data for the command (may be NULL). * @len: length of the data (if any). */ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) { unsigned long flags; dma_addr_t dma_addr; int t; int ret = 0; mutex_lock(&whc->mutex); /* Wait for previous command to complete. */ t = wait_event_timeout(whc->cmd_wq, (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0, WHC_GENCMD_TIMEOUT_MS); if (t == 0) { dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", le_readl(whc->base + WUSBGENCMDSTS), le_readl(whc->base + WUSBGENCMDPARAMS)); ret = -ETIMEDOUT; goto out; } if (addr) { memcpy(whc->gen_cmd_buf, addr, len); dma_addr = whc->gen_cmd_buf_dma; } else dma_addr = 0; /* Poke registers to start cmd. */ spin_lock_irqsave(&whc->lock, flags); le_writel(params, whc->base + WUSBGENCMDPARAMS); le_writeq(dma_addr, whc->base + WUSBGENADDR); le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd, whc->base + WUSBGENCMDSTS); spin_unlock_irqrestore(&whc->lock, flags); out: mutex_unlock(&whc->mutex); return ret; } /** * whc_hw_error - recover from a hardware error * @whc: the WHCI HC that broke. * @reason: a description of the failure. * * Recover from broken hardware with a full reset. */ void whc_hw_error(struct whc *whc, const char *reason) { struct wusbhc *wusbhc = &whc->wusbhc; dev_err(&whc->umc->dev, "hardware error: %s\n", reason); wusbhc_reset_all(wusbhc); }
gpl-2.0
davidmueller13/xbmc
lib/timidity/libunimod/load_669.c
70
9603
/* MikMod sound library (c) 1998, 1999 Miodrag Vallat and others - see file AUTHORS for complete list. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /*============================================================================== $Id: load_669.c,v 1.30 1999/10/25 16:31:41 miod Exp $ Composer 669 module loader ==============================================================================*/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include "unimod_priv.h" /*========== Module structure */ /* header */ typedef struct S69HEADER { UBYTE marker[2]; CHAR message[108]; UBYTE nos; UBYTE nop; UBYTE looporder; UBYTE orders[0x80]; UBYTE tempos[0x80]; UBYTE breaks[0x80]; } S69HEADER; /* sample information */ typedef struct S69SAMPLE { CHAR filename[13]; SLONG length; SLONG loopbeg; SLONG loopend; } S69SAMPLE; /* encoded note */ typedef struct S69NOTE { UBYTE a, b, c; } S69NOTE; /*========== Loader variables */ /* current pattern */ static S69NOTE *s69pat = NULL; /* Module header */ static S69HEADER *mh = NULL; /* file type identification */ static CHAR *S69_Version[] = { "Composer 669", "Extended 669" }; /*========== Loader code */ BOOL S69_Test (void) { UBYTE buf[0x80]; if (!_mm_read_UBYTES (buf, 2, modreader)) return 0; /* look for id */ if (!memcmp (buf, "if", 2) || !memcmp (buf, "JN", 2)) { int i; /* skip song message */ _mm_fseek (modreader, 108, SEEK_CUR); /* sanity checks */ if (_mm_read_UBYTE (modreader) > 64) return 0; if (_mm_read_UBYTE (modreader) > 128) return 0; if (_mm_read_UBYTE (modreader) > 127) return 0; /* check order table */ if (!_mm_read_UBYTES (buf, 0x80, modreader)) return 0; for (i = 0; i < 0x80; i++) if ((buf[i] >= 0x80) && (buf[i] != 0xff)) return 0; /* check tempos table */ if (!_mm_read_UBYTES (buf, 0x80, modreader)) return 0; for (i = 0; i < 0x80; i++) if ((!buf[i]) || (buf[i] > 32)) return 0; /* check pattern length table */ if (!_mm_read_UBYTES (buf, 0x80, modreader)) return 0; for (i = 0; i < 0x80; i++) if (buf[i] > 0x3f) return 0; } else return 0; return 1; } BOOL S69_Init (void) { if (!(s69pat = (S69NOTE *) _mm_malloc (64 * 8 * sizeof (S69NOTE)))) return 0; if (!(mh = (S69HEADER *) _mm_malloc (sizeof (S69HEADER)))) return 0; return 1; } void S69_Cleanup (void) { _mm_free (s69pat); _mm_free (mh); } static BOOL S69_LoadPatterns (void) { int track, row, channel; UBYTE note, inst, vol, effect, lastfx, lastval; S69NOTE *cur; int tracks = 0; if (!AllocPatterns ()) return 0; if (!AllocTracks ()) return 0; for (track = 0; track < of.numpat; track++) { /* set pattern break locations */ of.pattrows[track] = mh->breaks[track] + 1; /* load the 669 pattern */ cur = s69pat; for (row = 0; row < 64; row++) { for (channel = 0; channel < 8; channel++, cur++) { cur->a = _mm_read_UBYTE (modreader); cur->b = _mm_read_UBYTE (modreader); cur->c = _mm_read_UBYTE (modreader); } } if (_mm_eof (modreader)) { _mm_errno = MMERR_LOADING_PATTERN; return 0; } /* translate the pattern */ for (channel = 0; channel < 8; channel++) { UniReset (); /* set pattern tempo */ UniPTEffect (0xf, 78); UniPTEffect (0xf, mh->tempos[track]); lastfx = 0xff, lastval = 0; for (row = 0; row <= mh->breaks[track]; row++) { int a, b, c; /* fetch the encoded note */ a = s69pat[(row * 8) + channel].a; b = s69pat[(row * 8) + channel].b; c = s69pat[(row * 8) + channel].c; /* decode it */ note = a >> 2; inst = ((a & 0x3) << 4) | ((b & 0xf0) >> 4); vol = b & 0xf; if (a < 0xff) { if (a < 0xfe) { UniInstrument (inst); UniNote (note + 2 * OCTAVE); lastfx = 0xff; /* reset background effect memory */ } UniPTEffect (0xc, vol << 2); } if ((c != 0xff) || (lastfx != 0xff)) { if (c == 0xff) c = lastfx, effect = lastval; else effect = c & 0xf; switch (c >> 4) { case 0: /* porta up */ UniPTEffect (0x1, effect); lastfx = c, lastval = effect; break; case 1: /* porta down */ UniPTEffect (0x2, effect); lastfx = c, lastval = effect; break; case 2: /* porta to note */ UniPTEffect (0x3, effect); lastfx = c, lastval = effect; break; case 3: /* frequency adjust */ /* DMP converts this effect to S3M FF1. Why not ? */ UniEffect (UNI_S3MEFFECTF, 0xf0 | effect); break; case 4: /* vibrato */ UniPTEffect (0x4, effect); lastfx = c, lastval = effect; break; case 5: /* set speed */ if (effect) UniPTEffect (0xf, effect); else if (mh->marker[0] != 0x69) { #ifdef MIKMOD_DEBUG fprintf (stderr, "\r669: unsupported super fast tempo at pat=%d row=%d chan=%d\n", track, row, channel); #endif } break; } } UniNewline (); } if (!(of.tracks[tracks++] = UniDup ())) return 0; } } return 1; } BOOL S69_Load (BOOL curious) { int i; SAMPLE *current; S69SAMPLE sample; /* module header */ _mm_read_UBYTES (mh->marker, 2, modreader); _mm_read_UBYTES (mh->message, 108, modreader); mh->nos = _mm_read_UBYTE (modreader); mh->nop = _mm_read_UBYTE (modreader); mh->looporder = _mm_read_UBYTE (modreader); _mm_read_UBYTES (mh->orders, 0x80, modreader); for (i = 0; i < 0x80; i++) if ((mh->orders[i] >= 0x80) && (mh->orders[i] != 0xff)) { _mm_errno = MMERR_NOT_A_MODULE; return 1; } _mm_read_UBYTES (mh->tempos, 0x80, modreader); for (i = 0; i < 0x80; i++) if ((!mh->tempos[i]) || (mh->tempos[i] > 32)) { _mm_errno = MMERR_NOT_A_MODULE; return 1; } _mm_read_UBYTES (mh->breaks, 0x80, modreader); for (i = 0; i < 0x80; i++) if (mh->breaks[i] > 0x3f) { _mm_errno = MMERR_NOT_A_MODULE; return 1; } /* set module variables */ of.initspeed = 4; of.inittempo = 78; of.songname = DupStr (mh->message, 36, 1); of.modtype = strdup (S69_Version[memcmp (mh->marker, "JN", 2) == 0]); of.numchn = 8; of.numpat = mh->nop; of.numins = of.numsmp = mh->nos; of.numtrk = of.numchn * of.numpat; of.flags = UF_XMPERIODS | UF_LINEAR; for (i = 35; (i >= 0) && (mh->message[i] == ' '); i--) mh->message[i] = 0; for (i = 36 + 35; (i >= 36 + 0) && (mh->message[i] == ' '); i--) mh->message[i] = 0; for (i = 72 + 35; (i >= 72 + 0) && (mh->message[i] == ' '); i--) mh->message[i] = 0; if ((mh->message[0]) || (mh->message[36]) || (mh->message[72])) if ((of.comment = (CHAR *) _mm_malloc (3 * (36 + 1) + 1))) { strncpy (of.comment, mh->message, 36); strcat (of.comment, "\r"); if (mh->message[36]) strncat (of.comment, mh->message + 36, 36); strcat (of.comment, "\r"); if (mh->message[72]) strncat (of.comment, mh->message + 72, 36); strcat (of.comment, "\r"); of.comment[3 * (36 + 1)] = 0; } if (!AllocPositions (0x80)) return 0; for (i = 0; i < 0x80; i++) { if (mh->orders[i] >= mh->nop) break; of.positions[i] = mh->orders[i]; } of.numpos = i; of.reppos = mh->looporder < of.numpos ? mh->looporder : 0; if (!AllocSamples ()) return 0; current = of.samples; for (i = 0; i < of.numins; i++) { /* sample information */ _mm_read_UBYTES ((UBYTE *) sample.filename, 13, modreader); sample.length = _mm_read_I_SLONG (modreader); sample.loopbeg = _mm_read_I_SLONG (modreader); sample.loopend = _mm_read_I_SLONG (modreader); if (sample.loopend == 0xfffff) sample.loopend = 0; if ((sample.length < 0) || (sample.loopbeg < -1) || (sample.loopend < -1)) { _mm_errno = MMERR_LOADING_HEADER; return 0; } current->samplename = DupStr (sample.filename, 13, 1); current->seekpos = 0; current->speed = 0; current->length = sample.length; current->loopstart = sample.loopbeg; current->loopend = (sample.loopend < sample.length) ? sample.loopend : sample.length; current->flags = (sample.loopbeg < sample.loopend) ? SF_LOOP : 0; current->volume = 64; current++; } if (!S69_LoadPatterns ()) return 0; return 1; } CHAR * S69_LoadTitle (void) { CHAR s[36]; _mm_fseek (modreader, 2, SEEK_SET); if (!_mm_read_UBYTES (s, 36, modreader)) return NULL; return (DupStr (s, 36, 1)); } /*========== Loader information */ MLOADER load_669 = { NULL, "669", "669 (Composer 669, Unis 669)", S69_Init, S69_Test, S69_Load, S69_Cleanup, S69_LoadTitle }; /* ex:set ts=4: */
gpl-2.0
w1ndy/linux
arch/arc/mm/fault.c
326
5546
/* Page Fault Handling for ARC (TLB Miss / ProtV) * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/signal.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/perf_event.h> #include <asm/pgalloc.h> #include <asm/mmu.h> static int handle_vmalloc_fault(unsigned long address) { /* * Synchronize this task's top level page-table * with the 'reference' page table. */ pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pgd = pgd_offset_fast(current->active_mm, address); pgd_k = pgd_offset_k(address); if (!pgd_present(*pgd_k)) goto bad_area; pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto bad_area; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto bad_area; set_pmd(pmd, *pmd_k); /* XXX: create the TLB entry here */ return 0; bad_area: return 1; } void do_page_fault(unsigned long address, struct pt_regs *regs) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; siginfo_t info; int fault, ret; int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (address >= VMALLOC_START && address <= VMALLOC_END) { ret = handle_vmalloc_fault(address); if (unlikely(ret)) goto bad_area_nosemaphore; else return; } info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto no_context; if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* Handle protection violation, execute on heap or stack */ if ((regs->ecr_vec == ECR_V_PROTV) && (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) goto bad_area; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) up_read(&mm->mmap_sem); if (user_mode(regs)) return; } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (likely(!(fault & VM_FAULT_ERROR))) { if (flags & FAULT_FLAG_ALLOW_RETRY) { /* To avoid updating stats twice for retry case */ if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; } } /* Fault Handled Gracefully */ up_read(&mm->mmap_sem); return; } if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; /* no man's land */ BUG(); /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.fault_address = address; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void __user *)address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it acesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ if (fixup_exception(regs)) return; die("Oops", regs, address); out_of_memory: up_read(&mm->mmap_sem); if (user_mode(regs)) { pagefault_out_of_memory(); return; } goto no_context; do_sigbus: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; tsk->thread.fault_address = address; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *)address; force_sig_info(SIGBUS, &info, tsk); }
gpl-2.0
TaichiN/android_kernel_google_msm
drivers/media/platform/msm/camera_v1/eeprom/msm_camera_eeprom.c
1350
5134
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_camera_eeprom.h" int32_t msm_camera_eeprom_read(struct msm_eeprom_ctrl_t *ectrl, uint32_t reg_addr, void *data, uint32_t num_byte, uint16_t convert_endian) { int rc = 0; if (ectrl->func_tbl.eeprom_set_dev_addr != NULL) ectrl->func_tbl.eeprom_set_dev_addr(ectrl, &reg_addr); if (!convert_endian) { rc = msm_camera_i2c_read_seq( &ectrl->i2c_client, reg_addr, data, num_byte); } else { unsigned char buf[num_byte]; uint8_t *data_ptr = (uint8_t *) data; int i; rc = msm_camera_i2c_read_seq( &ectrl->i2c_client, reg_addr, buf, num_byte); for (i = 0; i < num_byte; i += 2) { data_ptr[i] = buf[i+1]; data_ptr[i+1] = buf[i]; } } return rc; } int32_t msm_camera_eeprom_read_tbl(struct msm_eeprom_ctrl_t *ectrl, struct msm_camera_eeprom_read_t *read_tbl, uint16_t tbl_size) { int i, rc = 0; CDBG("%s: open\n", __func__); if (read_tbl == NULL) return rc; for (i = 0; i < tbl_size; i++) { rc = msm_camera_eeprom_read (ectrl, read_tbl[i].reg_addr, read_tbl[i].dest_ptr, read_tbl[i].num_byte, read_tbl[i].convert_endian); if (rc < 0) { pr_err("%s: read failed\n", __func__); return rc; } } CDBG("%s: done\n", __func__); return rc; } int32_t msm_camera_eeprom_get_info(struct msm_eeprom_ctrl_t *ectrl, struct msm_camera_eeprom_info_t *einfo) { int rc = 0; CDBG("%s: open\n", __func__); memcpy(einfo, ectrl->info, ectrl->info_size); CDBG("%s: done =%d\n", __func__, rc); return rc; } int32_t msm_camera_eeprom_get_data(struct msm_eeprom_ctrl_t *ectrl, struct msm_eeprom_data_t *edata) { int rc = 0; if (edata->index >= ectrl->data_tbl_size) return -EFAULT; if (copy_to_user(edata->eeprom_data, ectrl->data_tbl[edata->index].data, ectrl->data_tbl[edata->index].size)) rc = -EFAULT; return rc; } int32_t msm_eeprom_config(struct msm_eeprom_ctrl_t *e_ctrl, void __user *argp) { struct msm_eeprom_cfg_data cdata; int32_t rc = 0; if (copy_from_user(&cdata, (void *)argp, sizeof(struct msm_eeprom_cfg_data))) return -EFAULT; mutex_lock(e_ctrl->eeprom_mutex); switch (cdata.cfgtype) { case CFG_GET_EEPROM_INFO: if (e_ctrl->func_tbl.eeprom_get_info == NULL) { rc = -EFAULT; break; } rc = e_ctrl->func_tbl.eeprom_get_info(e_ctrl, &cdata.cfg.get_info); cdata.is_eeprom_supported = 1; if (copy_to_user((void *)argp, &cdata, sizeof(struct msm_eeprom_cfg_data))) rc = -EFAULT; break; case CFG_GET_EEPROM_DATA: if (e_ctrl->func_tbl.eeprom_get_data == NULL) { rc = -EFAULT; break; } rc = e_ctrl->func_tbl.eeprom_get_data(e_ctrl, &cdata.cfg.get_data); if (copy_to_user((void *)argp, &cdata, sizeof(struct msm_eeprom_cfg_data))) rc = -EFAULT; break; default: break; } mutex_unlock(e_ctrl->eeprom_mutex); return rc; } struct msm_eeprom_ctrl_t *get_ectrl(struct v4l2_subdev *sd) { return container_of(sd, struct msm_eeprom_ctrl_t, sdev); } long msm_eeprom_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_eeprom_ctrl_t *e_ctrl = get_ectrl(sd); void __user *argp = (void __user *)arg; switch (cmd) { case VIDIOC_MSM_EEPROM_CFG: return msm_eeprom_config(e_ctrl, argp); default: return -ENOIOCTLCMD; } } int32_t msm_eeprom_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct msm_eeprom_ctrl_t *e_ctrl_t = NULL; CDBG("%s called\n", __func__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("i2c_check_functionality failed\n"); goto probe_failure; } e_ctrl_t = (struct msm_eeprom_ctrl_t *)(id->driver_data); e_ctrl_t->i2c_client.client = client; if (e_ctrl_t->i2c_addr != 0) e_ctrl_t->i2c_client.client->addr = e_ctrl_t->i2c_addr; CDBG("%s client = %x\n", __func__, (unsigned int) client); /* Assign name for sub device */ snprintf(e_ctrl_t->sdev.name, sizeof(e_ctrl_t->sdev.name), "%s", e_ctrl_t->i2c_driver->driver.name); if (e_ctrl_t->func_tbl.eeprom_init != NULL) { rc = e_ctrl_t->func_tbl.eeprom_init(e_ctrl_t, e_ctrl_t->i2c_client.client->adapter); } msm_camera_eeprom_read_tbl(e_ctrl_t, e_ctrl_t->read_tbl, e_ctrl_t->read_tbl_size); if (e_ctrl_t->func_tbl.eeprom_format_data != NULL) e_ctrl_t->func_tbl.eeprom_format_data(); if (e_ctrl_t->func_tbl.eeprom_release != NULL) rc = e_ctrl_t->func_tbl.eeprom_release(e_ctrl_t); /* Initialize sub device */ v4l2_i2c_subdev_init(&e_ctrl_t->sdev, e_ctrl_t->i2c_client.client, e_ctrl_t->eeprom_v4l2_subdev_ops); CDBG("%s success resut=%d\n", __func__, rc); return rc; probe_failure: pr_err("%s failed! rc = %d\n", __func__, rc); return rc; }
gpl-2.0
mtitinger/linux-next
drivers/video/fbdev/p9100.c
1350
9122
/* p9100.c: P9100 frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright 1999 Derrick J Brashear (shadow@dementia.org) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int p9100_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int p9100_blank(int, struct fb_info *); static int p9100_mmap(struct fb_info *, struct vm_area_struct *); static int p9100_ioctl(struct fb_info *, unsigned int, unsigned long); /* * Frame buffer operations */ static struct fb_ops p9100_ops = { .owner = THIS_MODULE, .fb_setcolreg = p9100_setcolreg, .fb_blank = p9100_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = p9100_mmap, .fb_ioctl = p9100_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* P9100 control registers */ #define P9100_SYSCTL_OFF 0x0UL #define P9100_VIDEOCTL_OFF 0x100UL #define P9100_VRAMCTL_OFF 0x180UL #define P9100_RAMDAC_OFF 0x200UL #define P9100_VIDEOCOPROC_OFF 0x400UL /* P9100 command registers */ #define P9100_CMD_OFF 0x0UL /* P9100 framebuffer memory */ #define P9100_FB_OFF 0x0UL /* 3 bits: 2=8bpp 3=16bpp 5=32bpp 7=24bpp */ #define SYS_CONFIG_PIXELSIZE_SHIFT 26 #define SCREENPAINT_TIMECTL1_ENABLE_VIDEO 0x20 /* 0 = off, 1 = on */ struct p9100_regs { /* Registers for the system control */ u32 sys_base; u32 sys_config; u32 sys_intr; u32 sys_int_ena; u32 sys_alt_rd; u32 sys_alt_wr; u32 sys_xxx[58]; /* Registers for the video control */ u32 vid_base; u32 vid_hcnt; u32 vid_htotal; u32 vid_hsync_rise; u32 vid_hblank_rise; u32 vid_hblank_fall; u32 vid_hcnt_preload; u32 vid_vcnt; u32 vid_vlen; u32 vid_vsync_rise; u32 vid_vblank_rise; u32 vid_vblank_fall; u32 vid_vcnt_preload; u32 vid_screenpaint_addr; u32 vid_screenpaint_timectl1; u32 vid_screenpaint_qsfcnt; u32 vid_screenpaint_timectl2; u32 vid_xxx[15]; /* Registers for the video control */ u32 vram_base; u32 vram_memcfg; u32 vram_refresh_pd; u32 vram_refresh_cnt; u32 vram_raslo_max; u32 vram_raslo_cur; u32 pwrup_cfg; u32 vram_xxx[25]; /* Registers for IBM RGB528 Palette */ u32 ramdac_cmap_wridx; u32 ramdac_palette_data; u32 ramdac_pixel_mask; u32 ramdac_palette_rdaddr; u32 ramdac_idx_lo; u32 ramdac_idx_hi; u32 ramdac_idx_data; u32 ramdac_idx_ctl; u32 ramdac_xxx[1784]; }; struct p9100_cmd_parameng { u32 parameng_status; u32 parameng_bltcmd; u32 parameng_quadcmd; }; struct p9100_par { spinlock_t lock; struct p9100_regs __iomem *regs; u32 flags; #define P9100_FLAG_BLANKED 0x00000001 unsigned long which_io; }; /** * p9100_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int p9100_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct p9100_par *par = (struct p9100_par *) info->par; struct p9100_regs __iomem *regs = par->regs; unsigned long flags; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); sbus_writel((regno << 16), &regs->ramdac_cmap_wridx); sbus_writel((red << 16), &regs->ramdac_palette_data); sbus_writel((green << 16), &regs->ramdac_palette_data); sbus_writel((blue << 16), &regs->ramdac_palette_data); spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * p9100_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int p9100_blank(int blank, struct fb_info *info) { struct p9100_par *par = (struct p9100_par *) info->par; struct p9100_regs __iomem *regs = par->regs; unsigned long flags; u32 val; spin_lock_irqsave(&par->lock, flags); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val = sbus_readl(&regs->vid_screenpaint_timectl1); val |= SCREENPAINT_TIMECTL1_ENABLE_VIDEO; sbus_writel(val, &regs->vid_screenpaint_timectl1); par->flags &= ~P9100_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val = sbus_readl(&regs->vid_screenpaint_timectl1); val &= ~SCREENPAINT_TIMECTL1_ENABLE_VIDEO; sbus_writel(val, &regs->vid_screenpaint_timectl1); par->flags |= P9100_FLAG_BLANKED; break; } spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map p9100_mmap_map[] = { { CG3_MMAP_OFFSET, 0, SBUS_MMAP_FBSIZE(1) }, { 0, 0, 0 } }; static int p9100_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct p9100_par *par = (struct p9100_par *)info->par; return sbusfb_mmap_helper(p9100_mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int p9100_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { /* Make it look like a cg3. */ return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_SUN3COLOR, 8, info->fix.smem_len); } /* * Initialisation */ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CGTHREE; } static int p9100_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct p9100_par *par; int linebytes, err; info = framebuffer_alloc(sizeof(struct p9100_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); /* This is the framebuffer and the only resource apps can mmap. */ info->fix.smem_start = op->resource[2].start; par->which_io = op->resource[2].flags & IORESOURCE_BITS; sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); par->regs = of_ioremap(&op->resource[0], 0, sizeof(struct p9100_regs), "p9100 regs"); if (!par->regs) goto out_release_fb; info->flags = FBINFO_DEFAULT; info->fbops = &p9100_ops; info->screen_base = of_ioremap(&op->resource[2], 0, info->fix.smem_len, "p9100 ram"); if (!info->screen_base) goto out_unmap_regs; p9100_blank(FB_BLANK_UNBLANK, info); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_screen; p9100_init_fix(info, linebytes, dp); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; fb_set_cmap(&info->cmap, info); dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: p9100 at %lx:%lx\n", dp->full_name, par->which_io, info->fix.smem_start); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_screen: of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len); out_unmap_regs: of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs)); out_release_fb: framebuffer_release(info); out_err: return err; } static int p9100_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct p9100_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs)); of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len); framebuffer_release(info); return 0; } static const struct of_device_id p9100_match[] = { { .name = "p9100", }, {}, }; MODULE_DEVICE_TABLE(of, p9100_match); static struct platform_driver p9100_driver = { .driver = { .name = "p9100", .of_match_table = p9100_match, }, .probe = p9100_probe, .remove = p9100_remove, }; static int __init p9100_init(void) { if (fb_get_options("p9100fb", NULL)) return -ENODEV; return platform_driver_register(&p9100_driver); } static void __exit p9100_exit(void) { platform_driver_unregister(&p9100_driver); } module_init(p9100_init); module_exit(p9100_exit); MODULE_DESCRIPTION("framebuffer driver for P9100 chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
lazy404/kernel
fs/hfs/string.c
2118
3807
/* * linux/fs/hfs/string.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the string comparison function for the * Macintosh character set. * * The code in this file is derived from code which is copyright * 1986, 1989, 1990 by Abacus Research and Development, Inc. (ARDI) * It is used here by the permission of ARDI's president Cliff Matthews. */ #include "hfs_fs.h" #include <linux/dcache.h> /*================ File-local variables ================*/ /* * unsigned char caseorder[] * * Defines the lexical ordering of characters on the Macintosh * * Composition of the 'casefold' and 'order' tables from ARDI's code * with the entry for 0x20 changed to match that for 0xCA to remove * special case for those two characters. */ static unsigned char caseorder[256] = { 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F, 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F, 0x20,0x22,0x23,0x28,0x29,0x2A,0x2B,0x2C,0x2F,0x30,0x31,0x32,0x33,0x34,0x35,0x36, 0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,0x40,0x41,0x42,0x43,0x44,0x45,0x46, 0x47,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E, 0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xA9,0xAA,0xAB,0xAC,0xAD, 0x4E,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E, 0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xAF,0xB0,0xB1,0xB2,0xB3, 0x4A,0x4C,0x5A,0x60,0x7B,0x7F,0x98,0x4F,0x49,0x51,0x4A,0x4B,0x4C,0x5A,0x60,0x63, 0x64,0x65,0x6E,0x6F,0x70,0x71,0x7B,0x84,0x85,0x86,0x7F,0x80,0x9A,0x9B,0x9C,0x98, 0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0x94,0xBB,0xBC,0xBD,0xBE,0xBF,0xC0,0x4D,0x81, 0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0x55,0x8A,0xCC,0x4D,0x81, 0xCD,0xCE,0xCF,0xD0,0xD1,0xD2,0xD3,0x26,0x27,0xD4,0x20,0x49,0x4B,0x80,0x82,0x82, 0xD5,0xD6,0x24,0x25,0x2D,0x2E,0xD7,0xD8,0xA6,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF }; /*================ Global functions ================*/ /* * Hash a string to an integer in a case-independent way */ int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this) { const unsigned char *name = this->name; unsigned int hash, len = this->len; if (len > HFS_NAMELEN) len = HFS_NAMELEN; hash = init_name_hash(); for (; len; len--) hash = partial_name_hash(caseorder[*name++], hash); this->hash = end_name_hash(hash); return 0; } /* * Compare two strings in the HFS filename character ordering * Returns positive, negative, or zero, not just 0 or (+/-)1 * * Equivalent to ARDI's call: * ROMlib_RelString(s1+1, s2+1, true, false, (s1[0]<<16) | s2[0]) */ int hfs_strcmp(const unsigned char *s1, unsigned int len1, const unsigned char *s2, unsigned int len2) { int len, tmp; len = (len1 > len2) ? len2 : len1; while (len--) { tmp = (int)caseorder[*(s1++)] - (int)caseorder[*(s2++)]; if (tmp) return tmp; } return len1 - len2; } /* * Test for equality of two strings in the HFS filename character ordering. * return 1 on failure and 0 on success */ int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { const unsigned char *n1, *n2; if (len >= HFS_NAMELEN) { if (name->len < HFS_NAMELEN) return 1; len = HFS_NAMELEN; } else if (len != name->len) return 1; n1 = str; n2 = name->name; while (len--) { if (caseorder[*n1++] != caseorder[*n2++]) return 1; } return 0; }
gpl-2.0
andrewevans01/T889_Kernel_Recharged
drivers/infiniband/hw/nes/nes_nic.c
2374
62562
/* * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/ethtool.h> #include <linux/slab.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/inet.h> #include "nes.h" static struct nic_qp_map nic_qp_mapping_0[] = { {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0}, {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}, {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_1[] = { {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0}, {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_2[] = { {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0} }; static struct nic_qp_map nic_qp_mapping_3[] = { {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0} }; static struct nic_qp_map nic_qp_mapping_4[] = { {28,8,0,0},{32,12,0,0} }; static struct nic_qp_map nic_qp_mapping_5[] = { {29,9,1,0},{33,13,1,0} }; static struct nic_qp_map nic_qp_mapping_6[] = { {30,10,2,0},{34,14,2,0} }; static struct nic_qp_map nic_qp_mapping_7[] = { {31,11,3,0},{35,15,3,0} }; static struct nic_qp_map *nic_qp_mapping_per_function[] = { nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3, nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7 }; static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; static int nics_per_function = 1; /** * nes_netdev_poll */ static int nes_netdev_poll(struct napi_struct *napi, int budget) { struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq; nesvnic->budget = budget; nescq->cqes_pending = 0; nescq->rx_cqes_completed = 0; nescq->cqe_allocs_pending = 0; nescq->rx_pkts_indicated = 0; nes_nic_ce_handler(nesdev, nescq); if (nescq->cqes_pending == 0) { napi_complete(napi); /* clear out completed cqes and arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); nes_read32(nesdev->regs+NES_CQE_ALLOC); } else { /* clear out completed cqes but don't arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, nescq->cq_number | (nescq->cqe_allocs_pending << 16)); nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n", nesvnic->netdev->name); } return nescq->rx_pkts_indicated; } /** * nes_netdev_open - Activate the network interface; ifconfig * ethx up. */ static int nes_netdev_open(struct net_device *netdev) { u32 macaddr_low; u16 macaddr_high; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; int ret; int i; struct nes_vnic *first_nesvnic = NULL; u32 nic_active_bit; u32 nic_active; struct list_head *list_pos, *list_temp; unsigned long flags; assert(nesdev != NULL); if (nesvnic->netdev_open == 1) return 0; if (netif_msg_ifup(nesvnic)) printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name); ret = nes_init_nic_qp(nesdev, netdev); if (ret) { return ret; } netif_carrier_off(netdev); netif_stop_queue(netdev); if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) { nesvnic->nesibdev = nes_init_ofa_device(netdev); if (nesvnic->nesibdev == NULL) { printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name); } else { nesvnic->nesibdev->nesvnic = nesvnic; ret = nes_register_ofa_device(nesvnic->nesibdev); if (ret) { printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n", netdev->name, ret); } } } /* Set packet filters */ nic_active_bit = 1 << nesvnic->nic_index; nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); macaddr_high = ((u16)netdev->dev_addr[0]) << 8; macaddr_high += (u16)netdev->dev_addr[1]; macaddr_low = ((u32)netdev->dev_addr[2]) << 24; macaddr_low += ((u32)netdev->dev_addr[3]) << 16; macaddr_low += ((u32)netdev->dev_addr[4]) << 8; macaddr_low += (u32)netdev->dev_addr[5]; /* Program the various MAC regs */ for (i = 0; i < NES_MAX_PORT_COUNT; i++) { if (nesvnic->qp_nic_index[i] == 0xf) { break; } nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW" " (Addr:%08X) = %08X, HIGH = %08X.\n", i, nesvnic->qp_nic_index[i], NES_IDX_PERFECT_FILTER_LOW+ (nesvnic->qp_nic_index[i] * 8), macaddr_low, (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), macaddr_low); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); } nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nesvnic->nic_cq.cq_number); nes_read32(nesdev->regs+NES_CQE_ALLOC); list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) { first_nesvnic = container_of(list_pos, struct nes_vnic, list); if (first_nesvnic->netdev_open == 1) break; } if (first_nesvnic->netdev_open == 0) { nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n"); nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); first_nesvnic = nesvnic; } if (first_nesvnic->linkup) { /* Enable network packets */ nesvnic->linkup = 1; netif_start_queue(netdev); netif_carrier_on(netdev); } spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { if (nesdev->link_recheck) cancel_delayed_work(&nesdev->work); nesdev->link_recheck = 1; schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); } spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); if (nesvnic->of_device_registered) { nesdev->nesadapter->send_term_ok = 1; if (nesvnic->linkup == 1) { if (nesdev->iw_status == 0) { nesdev->iw_status = 1; nes_port_ibevent(nesvnic); } } else { nesdev->iw_status = 0; } } spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); napi_enable(&nesvnic->napi); nesvnic->netdev_open = 1; return 0; } /** * nes_netdev_stop */ static int nes_netdev_stop(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u32 nic_active_mask; u32 nic_active; struct nes_vnic *first_nesvnic = NULL; struct list_head *list_pos, *list_temp; unsigned long flags; nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", nesvnic, nesdev, netdev, netdev->name); if (nesvnic->netdev_open == 0) return 0; if (netif_msg_ifdown(nesvnic)) printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name); netif_carrier_off(netdev); /* Disable network packets */ napi_disable(&nesvnic->napi); netif_stop_queue(netdev); list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) { first_nesvnic = container_of(list_pos, struct nes_vnic, list); if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic)) break; } if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) && (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) { nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+ (0x200*nesdev->mac_index), 0xffffffff); nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK+ (0x200*first_nesvnic->nesdev->mac_index), ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); } else { nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); } nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+ (nesvnic->perfect_filter_index*8), 0); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON); nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); if (nesvnic->of_device_registered) { nesdev->nesadapter->send_term_ok = 0; nesdev->iw_status = 0; if (nesvnic->linkup == 1) nes_port_ibevent(nesvnic); } del_timer_sync(&nesvnic->event_timer); nesvnic->event_timer.function = NULL; spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); nes_destroy_nic_qp(nesvnic); nesvnic->netdev_open = 0; return 0; } /** * nes_nic_send */ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic *nesnic = &nesvnic->nic; struct nes_hw_nic_sq_wqe *nic_sqe; struct tcphdr *tcph; __le16 *wqe_fragment_length; u32 wqe_misc; u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */ u16 skb_fragment_index; dma_addr_t bus_address; nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ if (vlan_tx_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", netdev->name, vlan_tx_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); } else wqe_misc = 0; /* bump past the vlan tag */ wqe_fragment_length++; /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */ if (skb->ip_summed == CHECKSUM_PARTIAL) { tcph = tcp_hdr(skb); if (1) { if (skb_is_gso(skb)) { /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n", netdev->name, skb_is_gso(skb)); */ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb); set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, ((u32)tcph->doff) | (((u32)(((unsigned char *)tcph) - skb->data)) << 4)); } else { wqe_misc |= NES_NIC_SQ_WQE_COMPLETION; } } } else { /* CHECKSUM_HW */ wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION; } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, skb->len); memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb))); wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb))); wqe_fragment_length[1] = 0; if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { if ((skb_shinfo(skb)->nr_frags + 1) > 4) { nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n", netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb)); kfree_skb(skb); nesvnic->tx_sw_dropped++; return NETDEV_TX_LOCKED; } set_bit(nesnic->sq_head, nesnic->first_frag_overflow); bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE, skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE); wqe_fragment_length[wqe_fragment_index++] = cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE); wqe_fragment_length[wqe_fragment_index] = 0; set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, ((u64)(bus_address))); nesnic->tx_skb[nesnic->sq_head] = skb; } if (skb_headlen(skb) == skb->len) { if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; nesnic->tx_skb[nesnic->sq_head] = skb; } } else { /* Deal with Fragments */ nesnic->tx_skb[nesnic->sq_head] = skb; for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags; skb_fragment_index++) { bus_address = pci_map_page( nesdev->pcidev, skb_shinfo(skb)->frags[skb_fragment_index].page, skb_shinfo(skb)->frags[skb_fragment_index].page_offset, skb_shinfo(skb)->frags[skb_fragment_index].size, PCI_DMA_TODEVICE); wqe_fragment_length[wqe_fragment_index] = cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), bus_address); wqe_fragment_index++; if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; } } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); nesnic->sq_head++; nesnic->sq_head &= nesnic->sq_size - 1; return NETDEV_TX_OK; } /** * nes_netdev_start_xmit */ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_nic *nesnic = &nesvnic->nic; struct nes_hw_nic_sq_wqe *nic_sqe; struct tcphdr *tcph; /* struct udphdr *udph; */ #define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS /* 64K segment plus overflow on each side */ dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS]; dma_addr_t bus_address; u32 tso_frag_index; u32 tso_frag_count; u32 tso_wqe_length; u32 curr_tcp_seq; u32 wqe_count=1; u32 send_rc; struct iphdr *iph; __le16 *wqe_fragment_length; u32 nr_frags; u32 original_first_length; /* u64 *wqe_fragment_address; */ /* first fragment (0) is used by copy buffer */ u16 wqe_fragment_index=1; u16 hoffset; u16 nhoffset; u16 wqes_needed; u16 wqes_available; u32 wqe_misc; /* * nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," * " (%u frags), tso_size=%u\n", * netdev->name, skb->len, skb_headlen(skb), * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); */ if (!netif_carrier_ok(netdev)) return NETDEV_TX_OK; if (netif_queue_stopped(netdev)) return NETDEV_TX_BUSY; /* Check if SQ is full */ if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { if (!netif_queue_stopped(netdev)) { netif_stop_queue(netdev); barrier(); if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) { netif_start_queue(netdev); goto sq_no_longer_full; } } nesvnic->sq_full++; return NETDEV_TX_BUSY; } sq_no_longer_full: nr_frags = skb_shinfo(skb)->nr_frags; if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) { nr_frags++; } /* Check if too many fragments */ if (unlikely((nr_frags > 4))) { if (skb_is_gso(skb)) { nesvnic->segmented_tso_requests++; nesvnic->tso_requests++; /* Basically 4 fragments available per WQE with extended fragments */ wqes_needed = nr_frags >> 2; wqes_needed += (nr_frags&3)?1:0; wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) & (nesnic->sq_size - 1); if (unlikely(wqes_needed > wqes_available)) { if (!netif_queue_stopped(netdev)) { netif_stop_queue(netdev); barrier(); wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) & (nesnic->sq_size - 1); if (wqes_needed <= wqes_available) { netif_start_queue(netdev); goto tso_sq_no_longer_full; } } nesvnic->sq_full++; nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", netdev->name); return NETDEV_TX_BUSY; } tso_sq_no_longer_full: /* Map all the buffers */ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags; tso_frag_count++) { tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev, skb_shinfo(skb)->frags[tso_frag_count].page, skb_shinfo(skb)->frags[tso_frag_count].page_offset, skb_shinfo(skb)->frags[tso_frag_count].size, PCI_DMA_TODEVICE); } tso_frag_index = 0; curr_tcp_seq = ntohl(tcp_hdr(skb)->seq); hoffset = skb_transport_header(skb) - skb->data; nhoffset = skb_network_header(skb) - skb->data; original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2); for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) { tso_wqe_length = 0; nic_sqe = &nesnic->sq_vbase[nesnic->sq_head]; wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ if (vlan_tx_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", netdev->name, vlan_tx_tag_get(skb) ); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); } else wqe_misc = 0; /* bump past the vlan tag */ wqe_fragment_length++; /* Assumes header totally fits in allocated buffer and is in first fragment */ if (original_first_length > NES_FIRST_FRAG_SIZE) { nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n", original_first_length, NES_FIRST_FRAG_SIZE); nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," " (%u frags), tso_size=%u\n", netdev->name, skb->len, skb_headlen(skb), skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); } memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer, skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), original_first_length)); iph = (struct iphdr *) (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]); tcph = (struct tcphdr *) (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]); if ((wqe_count+1)!=(u32)wqes_needed) { tcph->fin = 0; tcph->psh = 0; tcph->rst = 0; tcph->urg = 0; } if (wqe_count) { tcph->syn = 0; } tcph->seq = htonl(curr_tcp_seq); wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE), original_first_length)); wqe_fragment_index = 1; if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) { set_bit(nesnic->sq_head, nesnic->first_frag_overflow); bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length, skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE); wqe_fragment_length[wqe_fragment_index++] = cpu_to_le16(skb_headlen(skb) - original_first_length); wqe_fragment_length[wqe_fragment_index] = 0; set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, bus_address); tso_wqe_length += skb_headlen(skb) - original_first_length; } while (wqe_fragment_index < 5) { wqe_fragment_length[wqe_fragment_index] = cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size); set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), (u64)tso_bus_address[tso_frag_index]); wqe_fragment_index++; tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size; if (wqe_fragment_index < 5) wqe_fragment_length[wqe_fragment_index] = 0; if (tso_frag_index == tso_frag_count) break; } if ((wqe_count+1) == (u32)wqes_needed) { nesnic->tx_skb[nesnic->sq_head] = skb; } else { nesnic->tx_skb[nesnic->sq_head] = NULL; } wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb); if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) { wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE; } else { iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset); } set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc); set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX, ((u32)tcph->doff) | (((u32)hoffset) << 4)); set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX, tso_wqe_length + original_first_length); curr_tcp_seq += tso_wqe_length; nesnic->sq_head++; nesnic->sq_head &= nesnic->sq_size-1; } } else { nesvnic->linearized_skbs++; hoffset = skb_transport_header(skb) - skb->data; nhoffset = skb_network_header(skb) - skb->data; skb_linearize(skb); skb_set_transport_header(skb, hoffset); skb_set_network_header(skb, nhoffset); send_rc = nes_nic_send(skb, netdev); if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; } } else { send_rc = nes_nic_send(skb, netdev); if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; } barrier(); if (wqe_count) nes_write32(nesdev->regs+NES_WQE_ALLOC, (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); netdev->trans_start = jiffies; return NETDEV_TX_OK; } /** * nes_netdev_get_stats */ static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u64 u64temp; u32 u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->endnode_nstat_rx_discard += u32temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_rx_octets += u64temp; nesvnic->netstats.rx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_rx_frames += u64temp; nesvnic->netstats.rx_packets += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_tx_octets += u64temp; nesvnic->netstats.tx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32; nesvnic->endnode_nstat_tx_frames += u64temp; nesvnic->netstats.tx_packets += u64temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_short_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_oversized_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_jabber_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_length_errors += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_crc_errors += u32temp; nesvnic->netstats.rx_crc_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_tx_errors += u32temp; nesvnic->netstats.tx_errors += u32temp; return &nesvnic->netstats; } /** * nes_netdev_tx_timeout */ static void nes_netdev_tx_timeout(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); if (netif_msg_timer(nesvnic)) nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name); } /** * nes_netdev_set_mac_address */ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct sockaddr *mac_addr = p; int i; u32 macaddr_low; u16 macaddr_high; if (!is_valid_ether_addr(mac_addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); printk(PFX "%s: Address length = %d, Address = %pM\n", __func__, netdev->addr_len, mac_addr->sa_data); macaddr_high = ((u16)netdev->dev_addr[0]) << 8; macaddr_high += (u16)netdev->dev_addr[1]; macaddr_low = ((u32)netdev->dev_addr[2]) << 24; macaddr_low += ((u32)netdev->dev_addr[3]) << 16; macaddr_low += ((u32)netdev->dev_addr[4]) << 8; macaddr_low += (u32)netdev->dev_addr[5]; for (i = 0; i < NES_MAX_PORT_COUNT; i++) { if (nesvnic->qp_nic_index[i] == 0xf) { break; } nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8), macaddr_low); nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)nesvnic->nic_index) << 16))); } return 0; } static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit) { u32 nic_active; nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } #define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN) /** * nes_netdev_set_multicast_list */ static void nes_netdev_set_multicast_list(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; u32 nic_active_bit; u32 nic_active; u32 perfect_filter_register_address; u32 macaddr_low; u16 macaddr_high; u8 mc_all_on = 0; u8 mc_index; int mc_nic_index = -1; u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count * nics_per_function, 4); u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated; unsigned long flags; int mc_count = netdev_mc_count(netdev); spin_lock_irqsave(&nesadapter->resource_lock, flags); nic_active_bit = 1 << nesvnic->nic_index; if (netdev->flags & IFF_PROMISC) { nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active |= nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); mc_all_on = 1; } else if ((netdev->flags & IFF_ALLMULTI) || (nesvnic->nic_index > 3)) { set_allmulti(nesdev, nic_active_bit); mc_all_on = 1; } else { nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active &= ~nic_active_bit; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscuous = %d, All Multicast = %d.\n", mc_count, !!(netdev->flags & IFF_PROMISC), !!(netdev->flags & IFF_ALLMULTI)); if (!mc_all_on) { char *addrs; int i; struct netdev_hw_addr *ha; addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC); if (!addrs) { set_allmulti(nesdev, nic_active_bit); goto unlock; } i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN); perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + pft_entries_preallocated * 0x8; for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable; mc_index++) { while (i < mc_count && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, get_addr(addrs, i++))) == 0)); if (mc_nic_index < 0) mc_nic_index = nesvnic->nic_index; while (nesadapter->pft_mcast_map[mc_index] < 16 && nesadapter->pft_mcast_map[mc_index] != nesvnic->nic_index && mc_index < max_pft_entries_avaiable) { nes_debug(NES_DBG_NIC_RX, "mc_index=%d skipping nic_index=%d, " "used for=%d \n", mc_index, nesvnic->nic_index, nesadapter->pft_mcast_map[mc_index]); mc_index++; } if (mc_index >= max_pft_entries_avaiable) break; if (i < mc_count) { char *addr = get_addr(addrs, i++); nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n", addr, perfect_filter_register_address+(mc_index * 8), mc_nic_index); macaddr_high = ((u16) addr[0]) << 8; macaddr_high += (u16) addr[1]; macaddr_low = ((u32) addr[2]) << 24; macaddr_low += ((u32) addr[3]) << 16; macaddr_low += ((u32) addr[4]) << 8; macaddr_low += (u32) addr[5]; nes_write_indexed(nesdev, perfect_filter_register_address+(mc_index * 8), macaddr_low); nes_write_indexed(nesdev, perfect_filter_register_address+4+(mc_index * 8), (u32)macaddr_high | NES_MAC_ADDR_VALID | ((((u32)(1<<mc_nic_index)) << 16))); nesadapter->pft_mcast_map[mc_index] = nesvnic->nic_index; } else { nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n", perfect_filter_register_address+(mc_index * 8)); nes_write_indexed(nesdev, perfect_filter_register_address+4+(mc_index * 8), 0); nesadapter->pft_mcast_map[mc_index] = 255; } } kfree(addrs); /* PFT is not large enough */ if (i < mc_count) set_allmulti(nesdev, nic_active_bit); } unlock: spin_unlock_irqrestore(&nesadapter->resource_lock, flags); } /** * nes_netdev_change_mtu */ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; int ret = 0; u8 jumbomode = 0; u32 nic_active; u32 nic_active_bit; u32 uc_all_active; u32 mc_all_active; if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) return -EINVAL; netdev->mtu = new_mtu; nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN; if (netdev->mtu > 1500) { jumbomode=1; } nes_nic_init_timer_defaults(nesdev, jumbomode); if (netif_running(netdev)) { nic_active_bit = 1 << nesvnic->nic_index; mc_all_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit; uc_all_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL) & nic_active_bit; nes_netdev_stop(netdev); nes_netdev_open(netdev); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); nic_active |= mc_all_active; nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active); nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL); nic_active |= uc_all_active; nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); } return ret; } static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Link Change Interrupts", "Linearized SKBs", "T/GSO Requests", "Pause Frames Sent", "Pause Frames Received", "Internal Routing Errors", "SQ SW Dropped SKBs", "SQ Full", "Segmented TSO Requests", "Rx Symbol Errors", "Rx Jabber Errors", "Rx Oversized Frames", "Rx Short Frames", "Rx Length Errors", "Rx CRC Errors", "Rx Port Discard", "Endnode Rx Discards", "Endnode Rx Octets", "Endnode Rx Frames", "Endnode Tx Octets", "Endnode Tx Frames", "Tx Errors", "mh detected", "mh pauses", "Retransmission Count", "CM Connects", "CM Accepts", "Disconnects", "Connected Events", "Connect Requests", "CM Rejects", "ModifyQP Timeouts", "CreateQPs", "SW DestroyQPs", "DestroyQPs", "CM Closes", "CM Packets Sent", "CM Packets Bounced", "CM Packets Created", "CM Packets Rcvd", "CM Packets Dropped", "CM Packets Retrans", "CM Listens Created", "CM Listens Destroyed", "CM Backlog Drops", "CM Loopbacks", "CM Nodes Created", "CM Nodes Destroyed", "CM Accel Drops", "CM Resets Received", "Free 4Kpbls", "Free 256pbls", "Timer Inits", "LRO aggregated", "LRO flushed", "LRO no_desc", }; #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) /** * nes_netdev_get_sset_count */ static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset) { if (stringset == ETH_SS_STATS) return NES_ETHTOOL_STAT_COUNT; else return -EINVAL; } /** * nes_netdev_get_strings */ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset, u8 *ethtool_strings) { if (stringset == ETH_SS_STATS) memcpy(ethtool_strings, &nes_ethtool_stringset, sizeof(nes_ethtool_stringset)); } /** * nes_netdev_get_ethtool_stats */ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) { u64 u64temp; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 nic_count; u32 u32temp; u32 index = 0; target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; target_stat_values[index] = nesvnic->nesdev->link_status_interrupts; target_stat_values[++index] = nesvnic->linearized_skbs; target_stat_values[++index] = nesvnic->tso_requests; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_pause_frames_sent += u32temp; target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_pause_frames_received += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); nesvnic->nesdev->port_rx_discards += u32temp; nesvnic->netstats.rx_dropped += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40)); nesvnic->nesdev->port_tx_discards += u32temp; nesvnic->netstats.tx_dropped += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_short_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_oversized_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_jabber_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->netstats.rx_length_errors += u32temp; nesvnic->nesdev->mac_rx_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_rx_errors += u32temp; nesvnic->nesdev->mac_rx_crc_errors += u32temp; nesvnic->netstats.rx_crc_errors += u32temp; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_tx_errors += u32temp; nesvnic->netstats.tx_errors += u32temp; for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { if (nesvnic->qp_nic_index[nic_count] == 0xf) break; u32temp = nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->qp_nic_index[nic_count]*0x200)); nesvnic->netstats.rx_dropped += u32temp; nesvnic->endnode_nstat_rx_discard += u32temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_rx_octets += u64temp; nesvnic->netstats.rx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_rx_frames += u64temp; nesvnic->netstats.rx_packets += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_tx_octets += u64temp; nesvnic->netstats.tx_bytes += u64temp; u64temp = (u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->qp_nic_index[nic_count]*0x200)); u64temp += ((u64)nes_read_indexed(nesdev, NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->qp_nic_index[nic_count]*0x200))) << 32; nesvnic->endnode_nstat_tx_frames += u64temp; nesvnic->netstats.tx_packets += u64temp; u32temp = nes_read_indexed(nesdev, NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200)); nesvnic->endnode_ipv4_tcp_retransmits += u32temp; } target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received; target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err; target_stat_values[++index] = nesvnic->tx_sw_dropped; target_stat_values[++index] = nesvnic->sq_full; target_stat_values[++index] = nesvnic->segmented_tso_requests; target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; target_stat_values[++index] = nesvnic->netstats.rx_length_errors; target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors; target_stat_values[++index] = nesvnic->nesdev->port_rx_discards; target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors; target_stat_values[++index] = mh_detected; target_stat_values[++index] = mh_pauses_sent; target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; target_stat_values[++index] = atomic_read(&cm_connects); target_stat_values[++index] = atomic_read(&cm_accepts); target_stat_values[++index] = atomic_read(&cm_disconnects); target_stat_values[++index] = atomic_read(&cm_connecteds); target_stat_values[++index] = atomic_read(&cm_connect_reqs); target_stat_values[++index] = atomic_read(&cm_rejects); target_stat_values[++index] = atomic_read(&mod_qp_timouts); target_stat_values[++index] = atomic_read(&qps_created); target_stat_values[++index] = atomic_read(&sw_qps_destroyed); target_stat_values[++index] = atomic_read(&qps_destroyed); target_stat_values[++index] = atomic_read(&cm_closes); target_stat_values[++index] = cm_packets_sent; target_stat_values[++index] = cm_packets_bounced; target_stat_values[++index] = cm_packets_created; target_stat_values[++index] = cm_packets_received; target_stat_values[++index] = cm_packets_dropped; target_stat_values[++index] = cm_packets_retrans; target_stat_values[++index] = atomic_read(&cm_listens_created); target_stat_values[++index] = atomic_read(&cm_listens_destroyed); target_stat_values[++index] = cm_backlog_drops; target_stat_values[++index] = atomic_read(&cm_loopbacks); target_stat_values[++index] = atomic_read(&cm_nodes_created); target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); target_stat_values[++index] = atomic_read(&cm_resets_recvd); target_stat_values[++index] = nesadapter->free_4kpbl; target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; } /** * nes_netdev_get_drvinfo */ static void nes_netdev_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; strcpy(drvinfo->driver, DRV_NAME); strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev)); sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16, nesadapter->firmware_version & 0x000000ff); strcpy(drvinfo->version, DRV_VERSION); drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; drvinfo->regdump_len = 0; } /** * nes_netdev_set_coalesce */ static int nes_netdev_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *et_coalesce) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; unsigned long flags; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); if (et_coalesce->rx_max_coalesced_frames_low) { shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low; } if (et_coalesce->rx_max_coalesced_frames_irq) { shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq; } if (et_coalesce->rx_max_coalesced_frames_high) { shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high; } if (et_coalesce->rx_coalesce_usecs_low) { shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low; } if (et_coalesce->rx_coalesce_usecs_high) { shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high; } spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); /* using this to drive total interrupt moderation */ nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq; if (et_coalesce->use_adaptive_rx_coalesce) { nesadapter->et_use_adaptive_rx_coalesce = 1; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; nesadapter->et_rx_coalesce_usecs_irq = 0; if (et_coalesce->pkt_rate_low) { nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low; } } else { nesadapter->et_use_adaptive_rx_coalesce = 0; nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; if (nesadapter->et_rx_coalesce_usecs_irq) { nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8))); } } return 0; } /** * nes_netdev_get_coalesce */ static int nes_netdev_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *et_coalesce) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; struct ethtool_coalesce temp_et_coalesce; struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; unsigned long flags; memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce)); temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq; temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce; temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval; temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low; spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low; temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target; temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high; temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min; temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max; if (nesadapter->et_use_adaptive_rx_coalesce) { temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use; } spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce)); return 0; } /** * nes_netdev_get_pauseparam */ static void nes_netdev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *et_pauseparam) { struct nes_vnic *nesvnic = netdev_priv(netdev); et_pauseparam->autoneg = 0; et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0; et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0; } /** * nes_netdev_set_pauseparam */ static int nes_netdev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *et_pauseparam) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; u32 u32temp; if (et_pauseparam->autoneg) { /* TODO: should return unsupported */ return 0; } if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 0; } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 1; } if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); nesdev->disable_rx_flow_control = 0; } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40)); u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE; nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp); nesdev->disable_rx_flow_control = 1; } return 0; } /** * nes_netdev_get_settings */ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 mac_index = nesdev->mac_index; u8 phy_type = nesadapter->phy_type[mac_index]; u8 phy_index = nesadapter->phy_index[mac_index]; u16 phy_data; et_cmd->duplex = DUPLEX_FULL; et_cmd->port = PORT_MII; et_cmd->maxtxpkt = 511; et_cmd->maxrxpkt = 511; if (nesadapter->OneG_Mode) { ethtool_cmd_speed_set(et_cmd, SPEED_1000); if (phy_type == NES_PHY_TYPE_PUMA_1G) { et_cmd->supported = SUPPORTED_1000baseT_Full; et_cmd->advertising = ADVERTISED_1000baseT_Full; et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_INTERNAL; et_cmd->phy_address = mac_index; } else { unsigned long flags; et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); if (phy_data & 0x1000) et_cmd->autoneg = AUTONEG_ENABLE; else et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_EXTERNAL; et_cmd->phy_address = phy_index; } return 0; } if ((phy_type == NES_PHY_TYPE_ARGUS) || (phy_type == NES_PHY_TYPE_SFP_D) || (phy_type == NES_PHY_TYPE_KR)) { et_cmd->transceiver = XCVR_EXTERNAL; et_cmd->port = PORT_FIBRE; et_cmd->supported = SUPPORTED_FIBRE; et_cmd->advertising = ADVERTISED_FIBRE; et_cmd->phy_address = phy_index; } else { et_cmd->transceiver = XCVR_INTERNAL; et_cmd->supported = SUPPORTED_10000baseT_Full; et_cmd->advertising = ADVERTISED_10000baseT_Full; et_cmd->phy_address = mac_index; } ethtool_cmd_speed_set(et_cmd, SPEED_10000); et_cmd->autoneg = AUTONEG_DISABLE; return 0; } /** * nes_netdev_set_settings */ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { unsigned long flags; u16 phy_data; u8 phy_index = nesadapter->phy_index[nesdev->mac_index]; spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); if (et_cmd->autoneg) { /* Turn on Full duplex, Autoneg, and restart autonegotiation */ phy_data |= 0x1300; } else { /* Turn off autoneg */ phy_data &= ~0x1000; } nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } return 0; } static const struct ethtool_ops nes_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = nes_netdev_get_settings, .set_settings = nes_netdev_set_settings, .get_strings = nes_netdev_get_strings, .get_sset_count = nes_netdev_get_sset_count, .get_ethtool_stats = nes_netdev_get_ethtool_stats, .get_drvinfo = nes_netdev_get_drvinfo, .get_coalesce = nes_netdev_get_coalesce, .set_coalesce = nes_netdev_set_coalesce, .get_pauseparam = nes_netdev_get_pauseparam, .set_pauseparam = nes_netdev_set_pauseparam, }; static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; unsigned long flags; spin_lock_irqsave(&nesadapter->phy_lock, flags); nesvnic->vlan_grp = grp; nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name); /* Enable/Disable VLAN Stripping */ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); if (grp) u32temp &= 0xfdffffff; else u32temp |= 0x02000000; nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp); spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } static const struct net_device_ops nes_netdev_ops = { .ndo_open = nes_netdev_open, .ndo_stop = nes_netdev_stop, .ndo_start_xmit = nes_netdev_start_xmit, .ndo_get_stats = nes_netdev_get_stats, .ndo_tx_timeout = nes_netdev_tx_timeout, .ndo_set_mac_address = nes_netdev_set_mac_address, .ndo_set_multicast_list = nes_netdev_set_multicast_list, .ndo_change_mtu = nes_netdev_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, }; /** * nes_netdev_init - initialize network device */ struct net_device *nes_netdev_init(struct nes_device *nesdev, void __iomem *mmio_addr) { u64 u64temp; struct nes_vnic *nesvnic; struct net_device *netdev; struct nic_qp_map *curr_qp_map; u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index]; netdev = alloc_etherdev(sizeof(struct nes_vnic)); if (!netdev) { printk(KERN_ERR PFX "nesvnic etherdev alloc failed"); return NULL; } nesvnic = netdev_priv(netdev); nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name); SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev); netdev->watchdog_timeo = NES_TX_TIMEOUT; netdev->irq = nesdev->pcidev->irq; netdev->mtu = ETH_DATA_LEN; netdev->hard_header_len = ETH_HLEN; netdev->addr_len = ETH_ALEN; netdev->type = ARPHRD_ETHER; netdev->features = NETIF_F_HIGHDMA; netdev->netdev_ops = &nes_netdev_ops; netdev->ethtool_ops = &nes_ethtool_ops; netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; /* Fill in the port structure */ nesvnic->netdev = netdev; nesvnic->nesdev = nesdev; nesvnic->msg_enable = netif_msg_init(debug, default_msg); nesvnic->netdev_index = nesdev->netdev_count; nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count; nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN; curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)]; nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid; nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index; nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port; /* Setup the burned in MAC address */ u64temp = (u64)nesdev->nesadapter->mac_addr_low; u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32; u64temp += nesvnic->nic_index; netdev->dev_addr[0] = (u8)(u64temp>>40); netdev->dev_addr[1] = (u8)(u64temp>>32); netdev->dev_addr[2] = (u8)(u64temp>>24); netdev->dev_addr[3] = (u8)(u64temp>>16); netdev->dev_addr[4] = (u8)(u64temp>>8); netdev->dev_addr[5] = (u8)u64temp; memcpy(netdev->perm_addr, netdev->dev_addr, 6); netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM; if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) netdev->hw_features |= NETIF_F_TSO; netdev->features |= netdev->hw_features; netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," " nic_index = %d, logical_port = %d, mac_index = %d.\n", nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id, nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index); if (nesvnic->nesdev->nesadapter->port_count == 1 && nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1; if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) { nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } else { nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2; nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3; } } else { if (nesvnic->nesdev->nesadapter->port_count == 2 || (nesvnic->nesdev->nesadapter->port_count == 1 && nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2; nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } else { nesvnic->qp_nic_index[0] = nesvnic->nic_index; nesvnic->qp_nic_index[1] = 0xf; nesvnic->qp_nic_index[2] = 0xf; nesvnic->qp_nic_index[3] = 0xf; } } nesvnic->next_qp_nic_index = 0; if (nesdev->netdev_count == 0) { nesvnic->rdma_enabled = 1; } else { nesvnic->rdma_enabled = 0; } nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; init_timer(&nesvnic->event_timer); nesvnic->event_timer.function = NULL; spin_lock_init(&nesvnic->tx_lock); spin_lock_init(&nesvnic->port_ibevent_lock); nesdev->netdev[nesdev->netdev_count] = netdev; nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", nesvnic, nesdev->mac_index); list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]); if ((nesdev->netdev_count == 0) && ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) || ((phy_type == NES_PHY_TYPE_PUMA_1G) && (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { u32 u32temp; u32 link_mask = 0; u32 link_val = 0; u16 temp_phy_data; u16 phy_data = 0; unsigned long flags; u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); if (phy_type != NES_PHY_TYPE_PUMA_1G) { u32temp |= 0x00200000; nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1)), u32temp); } /* Check and set linkup here. This is for back to back */ /* configuration where second port won't get link interrupt */ switch (phy_type) { case NES_PHY_TYPE_PUMA_1G: if (nesdev->mac_index < 2) { link_mask = 0x01010000; link_val = 0x01010000; } else { link_mask = 0x02020000; link_val = 0x02020000; } break; case NES_PHY_TYPE_SFP_D: spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 0x9003); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 3, 0x0021); nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 3, 0x0021); phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; break; default: link_mask = 0x0f1f0000; link_val = 0x0f0f0000; break; } u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); if (phy_type == NES_PHY_TYPE_SFP_D) { if (phy_data & 0x0004) nesvnic->linkup = 1; } else { if ((u32temp & link_mask) == link_val) nesvnic->linkup = 1; } /* clear the MAC interrupt status, assumes direct logical to physical mapping */ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp); nes_init_phy(nesdev); } return netdev; } /** * nes_netdev_destroy - destroy network device structure */ void nes_netdev_destroy(struct net_device *netdev) { struct nes_vnic *nesvnic = netdev_priv(netdev); /* make sure 'stop' method is called by Linux stack */ /* nes_netdev_stop(netdev); */ list_del(&nesvnic->list); if (nesvnic->of_device_registered) { nes_destroy_ofa_device(nesvnic->nesibdev); } free_netdev(netdev); } /** * nes_nic_cm_xmit -- CM calls this to send out pkts */ int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev) { int ret; skb->dev = netdev; ret = dev_queue_xmit(skb); if (ret) { nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret); } return ret; }
gpl-2.0
AdrianoMartins/android_kernel_lge_v500
sound/soc/samsung/i2s.c
4166
26172
/* sound/soc/samsung/i2s.c * * ALSA SoC Audio Layer - Samsung I2S Controller driver * * Copyright (c) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh <jassisinghbrar@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <plat/audio.h> #include "dma.h" #include "idma.h" #include "i2s.h" #include "i2s-regs.h" #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) struct i2s_dai { /* Platform device for this DAI */ struct platform_device *pdev; /* IOREMAP'd SFRs */ void __iomem *addr; /* Physical base address of SFRs */ u32 base; /* Rate of RCLK source clock */ unsigned long rclk_srcrate; /* Frame Clock */ unsigned frmclk; /* * Specifically requested RCLK,BCLK by MACHINE Driver. * 0 indicates CPU driver is free to choose any value. */ unsigned rfs, bfs; /* I2S Controller's core clock */ struct clk *clk; /* Clock for generating I2S signals */ struct clk *op_clk; /* Array of clock names for op_clk */ const char **src_clk; /* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */ struct i2s_dai *pri_dai; /* Pointer to the Secondary_Fifo if it has one, NULL otherwise */ struct i2s_dai *sec_dai; #define DAI_OPENED (1 << 0) /* Dai is opened */ #define DAI_MANAGER (1 << 1) /* Dai is the manager */ unsigned mode; /* Driver for this DAI */ struct snd_soc_dai_driver i2s_dai_drv; /* DMA parameters */ struct s3c_dma_params dma_playback; struct s3c_dma_params dma_capture; struct s3c_dma_params idma_playback; u32 quirks; u32 suspend_i2smod; u32 suspend_i2scon; u32 suspend_i2spsr; }; /* Lock for cross i/f checks */ static DEFINE_SPINLOCK(lock); /* If this is the 'overlay' stereo DAI */ static inline bool is_secondary(struct i2s_dai *i2s) { return i2s->pri_dai ? true : false; } /* If operating in SoC-Slave mode */ static inline bool is_slave(struct i2s_dai *i2s) { return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false; } /* If this interface of the controller is transmitting data */ static inline bool tx_active(struct i2s_dai *i2s) { u32 active; if (!i2s) return false; active = readl(i2s->addr + I2SCON); if (is_secondary(i2s)) active &= CON_TXSDMA_ACTIVE; else active &= CON_TXDMA_ACTIVE; return active ? true : false; } /* If the other interface of the controller is transmitting data */ static inline bool other_tx_active(struct i2s_dai *i2s) { struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; return tx_active(other); } /* If any interface of the controller is transmitting data */ static inline bool any_tx_active(struct i2s_dai *i2s) { return tx_active(i2s) || other_tx_active(i2s); } /* If this interface of the controller is receiving data */ static inline bool rx_active(struct i2s_dai *i2s) { u32 active; if (!i2s) return false; active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE; return active ? true : false; } /* If the other interface of the controller is receiving data */ static inline bool other_rx_active(struct i2s_dai *i2s) { struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; return rx_active(other); } /* If any interface of the controller is receiving data */ static inline bool any_rx_active(struct i2s_dai *i2s) { return rx_active(i2s) || other_rx_active(i2s); } /* If the other DAI is transmitting or receiving data */ static inline bool other_active(struct i2s_dai *i2s) { return other_rx_active(i2s) || other_tx_active(i2s); } /* If this DAI is transmitting or receiving data */ static inline bool this_active(struct i2s_dai *i2s) { return tx_active(i2s) || rx_active(i2s); } /* If the controller is active anyway */ static inline bool any_active(struct i2s_dai *i2s) { return this_active(i2s) || other_active(i2s); } static inline struct i2s_dai *to_info(struct snd_soc_dai *dai) { return snd_soc_dai_get_drvdata(dai); } static inline bool is_opened(struct i2s_dai *i2s) { if (i2s && (i2s->mode & DAI_OPENED)) return true; else return false; } static inline bool is_manager(struct i2s_dai *i2s) { if (is_opened(i2s) && (i2s->mode & DAI_MANAGER)) return true; else return false; } /* Read RCLK of I2S (in multiples of LRCLK) */ static inline unsigned get_rfs(struct i2s_dai *i2s) { u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3; switch (rfs) { case 3: return 768; case 2: return 384; case 1: return 512; default: return 256; } } /* Write RCLK of I2S (in multiples of LRCLK) */ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs) { u32 mod = readl(i2s->addr + I2SMOD); mod &= ~MOD_RCLK_MASK; switch (rfs) { case 768: mod |= MOD_RCLK_768FS; break; case 512: mod |= MOD_RCLK_512FS; break; case 384: mod |= MOD_RCLK_384FS; break; default: mod |= MOD_RCLK_256FS; break; } writel(mod, i2s->addr + I2SMOD); } /* Read Bit-Clock of I2S (in multiples of LRCLK) */ static inline unsigned get_bfs(struct i2s_dai *i2s) { u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3; switch (bfs) { case 3: return 24; case 2: return 16; case 1: return 48; default: return 32; } } /* Write Bit-Clock of I2S (in multiples of LRCLK) */ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs) { u32 mod = readl(i2s->addr + I2SMOD); mod &= ~MOD_BCLK_MASK; switch (bfs) { case 48: mod |= MOD_BCLK_48FS; break; case 32: mod |= MOD_BCLK_32FS; break; case 24: mod |= MOD_BCLK_24FS; break; case 16: mod |= MOD_BCLK_16FS; break; default: dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n"); return; } writel(mod, i2s->addr + I2SMOD); } /* Sample-Size */ static inline int get_blc(struct i2s_dai *i2s) { int blc = readl(i2s->addr + I2SMOD); blc = (blc >> 13) & 0x3; switch (blc) { case 2: return 24; case 1: return 8; default: return 16; } } /* TX Channel Control */ static void i2s_txctrl(struct i2s_dai *i2s, int on) { void __iomem *addr = i2s->addr; u32 con = readl(addr + I2SCON); u32 mod = readl(addr + I2SMOD) & ~MOD_MASK; if (on) { con |= CON_ACTIVE; con &= ~CON_TXCH_PAUSE; if (is_secondary(i2s)) { con |= CON_TXSDMA_ACTIVE; con &= ~CON_TXSDMA_PAUSE; } else { con |= CON_TXDMA_ACTIVE; con &= ~CON_TXDMA_PAUSE; } if (any_rx_active(i2s)) mod |= MOD_TXRX; else mod |= MOD_TXONLY; } else { if (is_secondary(i2s)) { con |= CON_TXSDMA_PAUSE; con &= ~CON_TXSDMA_ACTIVE; } else { con |= CON_TXDMA_PAUSE; con &= ~CON_TXDMA_ACTIVE; } if (other_tx_active(i2s)) { writel(con, addr + I2SCON); return; } con |= CON_TXCH_PAUSE; if (any_rx_active(i2s)) mod |= MOD_RXONLY; else con &= ~CON_ACTIVE; } writel(mod, addr + I2SMOD); writel(con, addr + I2SCON); } /* RX Channel Control */ static void i2s_rxctrl(struct i2s_dai *i2s, int on) { void __iomem *addr = i2s->addr; u32 con = readl(addr + I2SCON); u32 mod = readl(addr + I2SMOD) & ~MOD_MASK; if (on) { con |= CON_RXDMA_ACTIVE | CON_ACTIVE; con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE); if (any_tx_active(i2s)) mod |= MOD_TXRX; else mod |= MOD_RXONLY; } else { con |= CON_RXDMA_PAUSE | CON_RXCH_PAUSE; con &= ~CON_RXDMA_ACTIVE; if (any_tx_active(i2s)) mod |= MOD_TXONLY; else con &= ~CON_ACTIVE; } writel(mod, addr + I2SMOD); writel(con, addr + I2SCON); } /* Flush FIFO of an interface */ static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush) { void __iomem *fic; u32 val; if (!i2s) return; if (is_secondary(i2s)) fic = i2s->addr + I2SFICS; else fic = i2s->addr + I2SFIC; /* Flush the FIFO */ writel(readl(fic) | flush, fic); /* Be patient */ val = msecs_to_loops(1) / 1000; /* 1 usec */ while (--val) cpu_relax(); writel(readl(fic) & ~flush, fic); } static int i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int rfs, int dir) { struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; u32 mod = readl(i2s->addr + I2SMOD); switch (clk_id) { case SAMSUNG_I2S_CDCLK: /* Shouldn't matter in GATING(CLOCK_IN) mode */ if (dir == SND_SOC_CLOCK_IN) rfs = 0; if ((rfs && other->rfs && (other->rfs != rfs)) || (any_active(i2s) && (((dir == SND_SOC_CLOCK_IN) && !(mod & MOD_CDCLKCON)) || ((dir == SND_SOC_CLOCK_OUT) && (mod & MOD_CDCLKCON))))) { dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; } if (dir == SND_SOC_CLOCK_IN) mod |= MOD_CDCLKCON; else mod &= ~MOD_CDCLKCON; i2s->rfs = rfs; break; case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */ case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */ if ((i2s->quirks & QUIRK_NO_MUXPSR) || (clk_id == SAMSUNG_I2S_RCLKSRC_0)) clk_id = 0; else clk_id = 1; if (!any_active(i2s)) { if (i2s->op_clk) { if ((clk_id && !(mod & MOD_IMS_SYSMUX)) || (!clk_id && (mod & MOD_IMS_SYSMUX))) { clk_disable(i2s->op_clk); clk_put(i2s->op_clk); } else { i2s->rclk_srcrate = clk_get_rate(i2s->op_clk); return 0; } } i2s->op_clk = clk_get(&i2s->pdev->dev, i2s->src_clk[clk_id]); clk_enable(i2s->op_clk); i2s->rclk_srcrate = clk_get_rate(i2s->op_clk); /* Over-ride the other's */ if (other) { other->op_clk = i2s->op_clk; other->rclk_srcrate = i2s->rclk_srcrate; } } else if ((!clk_id && (mod & MOD_IMS_SYSMUX)) || (clk_id && !(mod & MOD_IMS_SYSMUX))) { dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; } else { /* Call can't be on the active DAI */ i2s->op_clk = other->op_clk; i2s->rclk_srcrate = other->rclk_srcrate; return 0; } if (clk_id == 0) mod &= ~MOD_IMS_SYSMUX; else mod |= MOD_IMS_SYSMUX; break; default: dev_err(&i2s->pdev->dev, "We don't serve that!\n"); return -EINVAL; } writel(mod, i2s->addr + I2SMOD); return 0; } static int i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct i2s_dai *i2s = to_info(dai); u32 mod = readl(i2s->addr + I2SMOD); u32 tmp = 0; /* Format is priority */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: tmp |= MOD_LR_RLOW; tmp |= MOD_SDF_MSB; break; case SND_SOC_DAIFMT_LEFT_J: tmp |= MOD_LR_RLOW; tmp |= MOD_SDF_LSB; break; case SND_SOC_DAIFMT_I2S: tmp |= MOD_SDF_IIS; break; default: dev_err(&i2s->pdev->dev, "Format not supported\n"); return -EINVAL; } /* * INV flag is relative to the FORMAT flag - if set it simply * flips the polarity specified by the Standard */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: if (tmp & MOD_LR_RLOW) tmp &= ~MOD_LR_RLOW; else tmp |= MOD_LR_RLOW; break; default: dev_err(&i2s->pdev->dev, "Polarity not supported\n"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: tmp |= MOD_SLAVE; break; case SND_SOC_DAIFMT_CBS_CFS: /* Set default source clock in Master mode */ if (i2s->rclk_srcrate == 0) i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 0, SND_SOC_CLOCK_IN); break; default: dev_err(&i2s->pdev->dev, "master/slave format not supported\n"); return -EINVAL; } if (any_active(i2s) && ((mod & (MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE)) != tmp)) { dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; } mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE); mod |= tmp; writel(mod, i2s->addr + I2SMOD); return 0; } static int i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); u32 mod = readl(i2s->addr + I2SMOD); if (!is_secondary(i2s)) mod &= ~(MOD_DC2_EN | MOD_DC1_EN); switch (params_channels(params)) { case 6: mod |= MOD_DC2_EN; case 4: mod |= MOD_DC1_EN; break; case 2: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) i2s->dma_playback.dma_size = 4; else i2s->dma_capture.dma_size = 4; break; case 1: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) i2s->dma_playback.dma_size = 2; else i2s->dma_capture.dma_size = 2; break; default: dev_err(&i2s->pdev->dev, "%d channels not supported\n", params_channels(params)); return -EINVAL; } if (is_secondary(i2s)) mod &= ~MOD_BLCS_MASK; else mod &= ~MOD_BLCP_MASK; if (is_manager(i2s)) mod &= ~MOD_BLC_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: if (is_secondary(i2s)) mod |= MOD_BLCS_8BIT; else mod |= MOD_BLCP_8BIT; if (is_manager(i2s)) mod |= MOD_BLC_8BIT; break; case SNDRV_PCM_FORMAT_S16_LE: if (is_secondary(i2s)) mod |= MOD_BLCS_16BIT; else mod |= MOD_BLCP_16BIT; if (is_manager(i2s)) mod |= MOD_BLC_16BIT; break; case SNDRV_PCM_FORMAT_S24_LE: if (is_secondary(i2s)) mod |= MOD_BLCS_24BIT; else mod |= MOD_BLCP_24BIT; if (is_manager(i2s)) mod |= MOD_BLC_24BIT; break; default: dev_err(&i2s->pdev->dev, "Format(%d) not supported\n", params_format(params)); return -EINVAL; } writel(mod, i2s->addr + I2SMOD); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_soc_dai_set_dma_data(dai, substream, (void *)&i2s->dma_playback); else snd_soc_dai_set_dma_data(dai, substream, (void *)&i2s->dma_capture); i2s->frmclk = params_rate(params); return 0; } /* We set constraints on the substream acc to the version of I2S */ static int i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; unsigned long flags; spin_lock_irqsave(&lock, flags); i2s->mode |= DAI_OPENED; if (is_manager(other)) i2s->mode &= ~DAI_MANAGER; else i2s->mode |= DAI_MANAGER; /* Enforce set_sysclk in Master mode */ i2s->rclk_srcrate = 0; spin_unlock_irqrestore(&lock, flags); return 0; } static void i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; unsigned long flags; spin_lock_irqsave(&lock, flags); i2s->mode &= ~DAI_OPENED; i2s->mode &= ~DAI_MANAGER; if (is_opened(other)) other->mode |= DAI_MANAGER; /* Reset any constraint on RFS and BFS */ i2s->rfs = 0; i2s->bfs = 0; spin_unlock_irqrestore(&lock, flags); /* Gate CDCLK by default */ if (!is_opened(other)) i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK, 0, SND_SOC_CLOCK_IN); } static int config_setup(struct i2s_dai *i2s) { struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; unsigned rfs, bfs, blc; u32 psr; blc = get_blc(i2s); bfs = i2s->bfs; if (!bfs && other) bfs = other->bfs; /* Select least possible multiple(2) if no constraint set */ if (!bfs) bfs = blc * 2; rfs = i2s->rfs; if (!rfs && other) rfs = other->rfs; if ((rfs == 256 || rfs == 512) && (blc == 24)) { dev_err(&i2s->pdev->dev, "%d-RFS not supported for 24-blc\n", rfs); return -EINVAL; } if (!rfs) { if (bfs == 16 || bfs == 32) rfs = 256; else rfs = 384; } /* If already setup and running */ if (any_active(i2s) && (get_rfs(i2s) != rfs || get_bfs(i2s) != bfs)) { dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; } /* Don't bother RFS, BFS & PSR in Slave mode */ if (is_slave(i2s)) return 0; set_bfs(i2s, bfs); set_rfs(i2s, rfs); if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { psr = i2s->rclk_srcrate / i2s->frmclk / rfs; writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); dev_dbg(&i2s->pdev->dev, "RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n", i2s->rclk_srcrate, psr, rfs, bfs); } return 0; } static int i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); struct snd_soc_pcm_runtime *rtd = substream->private_data; struct i2s_dai *i2s = to_info(rtd->cpu_dai); unsigned long flags; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: local_irq_save(flags); if (config_setup(i2s)) { local_irq_restore(flags); return -EINVAL; } if (capture) i2s_rxctrl(i2s, 1); else i2s_txctrl(i2s, 1); local_irq_restore(flags); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: local_irq_save(flags); if (capture) { i2s_rxctrl(i2s, 0); i2s_fifo(i2s, FIC_RXFLUSH); } else { i2s_txctrl(i2s, 0); i2s_fifo(i2s, FIC_TXFLUSH); } local_irq_restore(flags); break; } return 0; } static int i2s_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) { struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; switch (div_id) { case SAMSUNG_I2S_DIV_BCLK: if ((any_active(i2s) && div && (get_bfs(i2s) != div)) || (other && other->bfs && (other->bfs != div))) { dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; } i2s->bfs = div; break; default: dev_err(&i2s->pdev->dev, "Invalid clock divider(%d)\n", div_id); return -EINVAL; } return 0; } static snd_pcm_sframes_t i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); u32 reg = readl(i2s->addr + I2SFIC); snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) delay = FIC_RXCOUNT(reg); else if (is_secondary(i2s)) delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS)); else delay = FIC_TXCOUNT(reg); return delay; } #ifdef CONFIG_PM static int i2s_suspend(struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); if (dai->active) { i2s->suspend_i2smod = readl(i2s->addr + I2SMOD); i2s->suspend_i2scon = readl(i2s->addr + I2SCON); i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR); } return 0; } static int i2s_resume(struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); if (dai->active) { writel(i2s->suspend_i2scon, i2s->addr + I2SCON); writel(i2s->suspend_i2smod, i2s->addr + I2SMOD); writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR); } return 0; } #else #define i2s_suspend NULL #define i2s_resume NULL #endif static int samsung_i2s_dai_probe(struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; if (other && other->clk) /* If this is probe on secondary */ goto probe_exit; i2s->addr = ioremap(i2s->base, 0x100); if (i2s->addr == NULL) { dev_err(&i2s->pdev->dev, "cannot ioremap registers\n"); return -ENXIO; } i2s->clk = clk_get(&i2s->pdev->dev, "iis"); if (IS_ERR(i2s->clk)) { dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n"); iounmap(i2s->addr); return -ENOENT; } clk_enable(i2s->clk); if (other) { other->addr = i2s->addr; other->clk = i2s->clk; } if (i2s->quirks & QUIRK_NEED_RSTCLR) writel(CON_RSTCLR, i2s->addr + I2SCON); if (i2s->quirks & QUIRK_SEC_DAI) idma_reg_addr_init(i2s->addr, i2s->sec_dai->idma_playback.dma_addr); probe_exit: /* Reset any constraint on RFS and BFS */ i2s->rfs = 0; i2s->bfs = 0; i2s_txctrl(i2s, 0); i2s_rxctrl(i2s, 0); i2s_fifo(i2s, FIC_TXFLUSH); i2s_fifo(other, FIC_TXFLUSH); i2s_fifo(i2s, FIC_RXFLUSH); /* Gate CDCLK by default */ if (!is_opened(other)) i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK, 0, SND_SOC_CLOCK_IN); return 0; } static int samsung_i2s_dai_remove(struct snd_soc_dai *dai) { struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai); struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai; if (!other || !other->clk) { if (i2s->quirks & QUIRK_NEED_RSTCLR) writel(0, i2s->addr + I2SCON); clk_disable(i2s->clk); clk_put(i2s->clk); iounmap(i2s->addr); } i2s->clk = NULL; return 0; } static const struct snd_soc_dai_ops samsung_i2s_dai_ops = { .trigger = i2s_trigger, .hw_params = i2s_hw_params, .set_fmt = i2s_set_fmt, .set_clkdiv = i2s_set_clkdiv, .set_sysclk = i2s_set_sysclk, .startup = i2s_startup, .shutdown = i2s_shutdown, .delay = i2s_delay, }; #define SAMSUNG_I2S_RATES SNDRV_PCM_RATE_8000_96000 #define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE) static __devinit struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) { struct i2s_dai *i2s; i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); if (i2s == NULL) return NULL; i2s->pdev = pdev; i2s->pri_dai = NULL; i2s->sec_dai = NULL; i2s->i2s_dai_drv.symmetric_rates = 1; i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe; i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove; i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops; i2s->i2s_dai_drv.suspend = i2s_suspend; i2s->i2s_dai_drv.resume = i2s_resume; i2s->i2s_dai_drv.playback.channels_min = 2; i2s->i2s_dai_drv.playback.channels_max = 2; i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES; i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS; if (!sec) { i2s->i2s_dai_drv.capture.channels_min = 1; i2s->i2s_dai_drv.capture.channels_max = 2; i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; } else { /* Create a new platform_device for Secondary */ i2s->pdev = platform_device_register_resndata(NULL, pdev->name, pdev->id + SAMSUNG_I2S_SECOFF, NULL, 0, NULL, 0); if (IS_ERR(i2s->pdev)) return NULL; } /* Pre-assign snd_soc_dai_set_drvdata */ dev_set_drvdata(&i2s->pdev->dev, i2s); return i2s; } static __devinit int samsung_i2s_probe(struct platform_device *pdev) { u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan; struct i2s_dai *pri_dai, *sec_dai = NULL; struct s3c_audio_pdata *i2s_pdata; struct samsung_i2s *i2s_cfg; struct resource *res; u32 regs_base, quirks; int ret = 0; /* Call during Seconday interface registration */ if (pdev->id >= SAMSUNG_I2S_SECOFF) { sec_dai = dev_get_drvdata(&pdev->dev); snd_soc_register_dai(&sec_dai->pdev->dev, &sec_dai->i2s_dai_drv); return 0; } i2s_pdata = pdev->dev.platform_data; if (i2s_pdata == NULL) { dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n"); return -ENXIO; } dma_pl_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n"); return -ENXIO; } dma_cp_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 2); if (res) dma_pl_sec_chan = res->start; else dma_pl_sec_chan = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get I2S SFR address\n"); return -ENXIO; } if (!request_mem_region(res->start, resource_size(res), "samsung-i2s")) { dev_err(&pdev->dev, "Unable to request SFR region\n"); return -EBUSY; } regs_base = res->start; i2s_cfg = &i2s_pdata->type.i2s; quirks = i2s_cfg->quirks; pri_dai = i2s_alloc_dai(pdev, false); if (!pri_dai) { dev_err(&pdev->dev, "Unable to alloc I2S_pri\n"); ret = -ENOMEM; goto err; } pri_dai->dma_playback.dma_addr = regs_base + I2STXD; pri_dai->dma_capture.dma_addr = regs_base + I2SRXD; pri_dai->dma_playback.client = (struct s3c2410_dma_client *)&pri_dai->dma_playback; pri_dai->dma_capture.client = (struct s3c2410_dma_client *)&pri_dai->dma_capture; pri_dai->dma_playback.channel = dma_pl_chan; pri_dai->dma_capture.channel = dma_cp_chan; pri_dai->src_clk = i2s_cfg->src_clk; pri_dai->dma_playback.dma_size = 4; pri_dai->dma_capture.dma_size = 4; pri_dai->base = regs_base; pri_dai->quirks = quirks; if (quirks & QUIRK_PRI_6CHAN) pri_dai->i2s_dai_drv.playback.channels_max = 6; if (quirks & QUIRK_SEC_DAI) { sec_dai = i2s_alloc_dai(pdev, true); if (!sec_dai) { dev_err(&pdev->dev, "Unable to alloc I2S_sec\n"); ret = -ENOMEM; goto err; } sec_dai->dma_playback.dma_addr = regs_base + I2STXDS; sec_dai->dma_playback.client = (struct s3c2410_dma_client *)&sec_dai->dma_playback; /* Use iDMA always if SysDMA not provided */ sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1; sec_dai->src_clk = i2s_cfg->src_clk; sec_dai->dma_playback.dma_size = 4; sec_dai->base = regs_base; sec_dai->quirks = quirks; sec_dai->idma_playback.dma_addr = i2s_cfg->idma_addr; sec_dai->pri_dai = pri_dai; pri_dai->sec_dai = sec_dai; } if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) { dev_err(&pdev->dev, "Unable to configure gpio\n"); ret = -EINVAL; goto err; } snd_soc_register_dai(&pri_dai->pdev->dev, &pri_dai->i2s_dai_drv); pm_runtime_enable(&pdev->dev); return 0; err: release_mem_region(regs_base, resource_size(res)); return ret; } static __devexit int samsung_i2s_remove(struct platform_device *pdev) { struct i2s_dai *i2s, *other; struct resource *res; i2s = dev_get_drvdata(&pdev->dev); other = i2s->pri_dai ? : i2s->sec_dai; if (other) { other->pri_dai = NULL; other->sec_dai = NULL; } else { pm_runtime_disable(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); } i2s->pri_dai = NULL; i2s->sec_dai = NULL; snd_soc_unregister_dai(&pdev->dev); return 0; } static struct platform_driver samsung_i2s_driver = { .probe = samsung_i2s_probe, .remove = __devexit_p(samsung_i2s_remove), .driver = { .name = "samsung-i2s", .owner = THIS_MODULE, }, }; module_platform_driver(samsung_i2s_driver); /* Module information */ MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>"); MODULE_DESCRIPTION("Samsung I2S Interface"); MODULE_ALIAS("platform:samsung-i2s"); MODULE_LICENSE("GPL");
gpl-2.0
zeroblade1984/LG_MSM8974
lib/mpi/mpi-mpow.c
4934
3074
/* mpi-mpow.c - MPI functions * Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" #include "longlong.h" static int build_index(const MPI *exparray, int k, int i, int t) { int j, bitno; int index = 0; bitno = t - i; for (j = k - 1; j >= 0; j--) { index <<= 1; if (mpi_test_bit(exparray[j], bitno)) index |= 1; } return index; } /**************** * RES = (BASE[0] ^ EXP[0]) * (BASE[1] ^ EXP[1]) * ... * mod M */ int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI m) { int rc = -ENOMEM; int k; /* number of elements */ int t; /* bit size of largest exponent */ int i, j, idx; MPI *G = NULL; /* table with precomputed values of size 2^k */ MPI tmp = NULL; for (k = 0; basearray[k]; k++) ; if (!k) { pr_emerg("mpi_mulpowm: assert(k) failed\n"); BUG(); } for (t = 0, i = 0; (tmp = exparray[i]); i++) { j = mpi_get_nbits(tmp); if (j > t) t = j; } if (i != k) { pr_emerg("mpi_mulpowm: assert(i==k) failed\n"); BUG(); } if (!t) { pr_emerg("mpi_mulpowm: assert(t) failed\n"); BUG(); } if (k >= 10) { pr_emerg("mpi_mulpowm: assert(k<10) failed\n"); BUG(); } G = kzalloc((1 << k) * sizeof *G, GFP_KERNEL); if (!G) goto err_out; /* and calculate */ tmp = mpi_alloc(mpi_get_nlimbs(m) + 1); if (!tmp) goto nomem; if (mpi_set_ui(res, 1) < 0) goto nomem; for (i = 1; i <= t; i++) { if (mpi_mulm(tmp, res, res, m) < 0) goto nomem; idx = build_index(exparray, k, i, t); if (!(idx >= 0 && idx < (1 << k))) { pr_emerg("mpi_mulpowm: assert(idx >= 0 && idx < (1<<k)) failed\n"); BUG(); } if (!G[idx]) { if (!idx) { G[0] = mpi_alloc_set_ui(1); if (!G[0]) goto nomem; } else { for (j = 0; j < k; j++) { if ((idx & (1 << j))) { if (!G[idx]) { if (mpi_copy (&G[idx], basearray[j]) < 0) goto nomem; } else { if (mpi_mulm (G[idx], G[idx], basearray[j], m) < 0) goto nomem; } } } if (!G[idx]) { G[idx] = mpi_alloc(0); if (!G[idx]) goto nomem; } } } if (mpi_mulm(res, tmp, G[idx], m) < 0) goto nomem; } rc = 0; nomem: /* cleanup */ mpi_free(tmp); for (i = 0; i < (1 << k); i++) mpi_free(G[i]); kfree(G); err_out: return rc; }
gpl-2.0
hroark13/Z750C_2_WARP
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
5446
11321
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_kms.h" #define vmw_crtc_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.crtc) #define vmw_encoder_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.encoder) #define vmw_connector_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.connector) struct vmw_legacy_display { struct list_head active; unsigned num_active; unsigned last_num_active; struct vmw_framebuffer *fb; }; /** * Display unit using the legacy register interface. */ struct vmw_legacy_display_unit { struct vmw_display_unit base; struct list_head active; }; static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) { list_del_init(&ldu->active); vmw_display_unit_cleanup(&ldu->base); kfree(ldu); } /* * Legacy Display Unit CRTC functions */ static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) { vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); } static int vmw_ldu_commit_list(struct vmw_private *dev_priv) { struct vmw_legacy_display *lds = dev_priv->ldu_priv; struct vmw_legacy_display_unit *entry; struct vmw_display_unit *du = NULL; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; int i = 0, ret; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { int w = 0, h = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); i++; } if (crtc == NULL) return 0; fb = entry->base.crtc.fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->bits_per_pixel, fb->depth); } if (!list_empty(&lds->active)) { entry = list_entry(lds->active.next, typeof(*entry), active); fb = entry->base.crtc.fb; vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], fb->bits_per_pixel, fb->depth); } /* Make sure we always show something. */ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active ? lds->num_active : 1); i = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); i++; } BUG_ON(i != lds->num_active); lds->last_num_active = lds->num_active; /* Find the first du with a cursor. */ list_for_each_entry(entry, &lds->active, active) { du = &entry->base; if (!du->cursor_dmabuf) continue; ret = vmw_cursor_update_dmabuf(dev_priv, du->cursor_dmabuf, 64, 64, du->hotspot_x, du->hotspot_y); if (ret == 0) break; DRM_ERROR("Could not update cursor image\n"); } return 0; } static int vmw_ldu_del_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *ldu) { struct vmw_legacy_display *ld = vmw_priv->ldu_priv; if (list_empty(&ldu->active)) return 0; /* Must init otherwise list_empty(&ldu->active) will not work. */ list_del_init(&ldu->active); if (--(ld->num_active) == 0) { BUG_ON(!ld->fb); if (ld->fb->unpin) ld->fb->unpin(ld->fb); ld->fb = NULL; } return 0; } static int vmw_ldu_add_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *ldu, struct vmw_framebuffer *vfb) { struct vmw_legacy_display *ld = vmw_priv->ldu_priv; struct vmw_legacy_display_unit *entry; struct list_head *at; BUG_ON(!ld->num_active && ld->fb); if (vfb != ld->fb) { if (ld->fb && ld->fb->unpin) ld->fb->unpin(ld->fb); if (vfb->pin) vfb->pin(vfb); ld->fb = vfb; } if (!list_empty(&ldu->active)) return 0; at = &ld->active; list_for_each_entry(entry, &ld->active, active) { if (entry->base.unit > ldu->base.unit) break; at = &entry->active; } list_add(&ldu->active, at); ld->num_active++; return 0; } static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_legacy_display_unit *ldu; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_encoder *encoder; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_crtc *crtc; if (!set) return -EINVAL; if (!set->crtc) return -EINVAL; /* get the ldu */ crtc = set->crtc; ldu = vmw_crtc_to_ldu(crtc); vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { DRM_ERROR("to many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &ldu->base.connector) { DRM_ERROR("connector doesn't match %p %p\n", set->connectors[0], &ldu->base.connector); return -EINVAL; } /* ldu only supports one fb active at the time */ if (dev_priv->ldu_priv->fb && vfb && !(dev_priv->ldu_priv->num_active == 1 && !list_empty(&ldu->active)) && dev_priv->ldu_priv->fb != vfb) { DRM_ERROR("Multiple framebuffers not supported\n"); return -EINVAL; } /* since they always map one to one these are safe */ connector = &ldu->base.connector; encoder = &ldu->base.encoder; /* should we turn the crtc off? */ if (set->num_connectors == 0 || !set->mode || !set->fb) { connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; vmw_ldu_del_active(dev_priv, ldu); return vmw_ldu_commit_list(dev_priv); } /* we now know we want to set a mode */ mode = set->mode; fb = set->fb; if (set->x + mode->hdisplay > fb->width || set->y + mode->vdisplay > fb->height) { DRM_ERROR("set outside of framebuffer\n"); return -EINVAL; } vmw_fb_off(dev_priv); crtc->fb = fb; encoder->crtc = crtc; connector->encoder = encoder; crtc->x = set->x; crtc->y = set->y; crtc->mode = *mode; vmw_ldu_add_active(dev_priv, ldu, vfb); return vmw_ldu_commit_list(dev_priv); } static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { .save = vmw_du_crtc_save, .restore = vmw_du_crtc_restore, .cursor_set = vmw_du_crtc_cursor_set, .cursor_move = vmw_du_crtc_cursor_move, .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_ldu_crtc_destroy, .set_config = vmw_ldu_crtc_set_config, }; /* * Legacy Display Unit encoder functions */ static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder) { vmw_ldu_destroy(vmw_encoder_to_ldu(encoder)); } static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { .destroy = vmw_ldu_encoder_destroy, }; /* * Legacy Display Unit connector functions */ static void vmw_ldu_connector_destroy(struct drm_connector *connector) { vmw_ldu_destroy(vmw_connector_to_ldu(connector)); } static struct drm_connector_funcs vmw_legacy_connector_funcs = { .dpms = vmw_du_connector_dpms, .save = vmw_du_connector_save, .restore = vmw_du_connector_restore, .detect = vmw_du_connector_detect, .fill_modes = vmw_du_connector_fill_modes, .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, }; static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_legacy_display_unit *ldu; struct drm_device *dev = dev_priv->dev; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_crtc *crtc; ldu = kzalloc(sizeof(*ldu), GFP_KERNEL); if (!ldu) return -ENOMEM; ldu->base.unit = unit; crtc = &ldu->base.crtc; encoder = &ldu->base.encoder; connector = &ldu->base.connector; INIT_LIST_HEAD(&ldu->active); ldu->base.pref_active = (unit == 0); ldu->base.pref_width = dev_priv->initial_width; ldu->base.pref_height = dev_priv->initial_height; ldu->base.pref_mode = NULL; ldu->base.is_implicit = true; drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); return 0; } int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; int i, ret; if (dev_priv->ldu_priv) { DRM_INFO("ldu system already on\n"); return -EINVAL; } dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); if (!dev_priv->ldu_priv) return -ENOMEM; INIT_LIST_HEAD(&dev_priv->ldu_priv->active); dev_priv->ldu_priv->num_active = 0; dev_priv->ldu_priv->last_num_active = 0; dev_priv->ldu_priv->fb = NULL; /* for old hardware without multimon only enable one display */ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); else ret = drm_vblank_init(dev, 1); if (ret != 0) goto err_free; ret = drm_mode_create_dirty_info_property(dev); if (ret != 0) goto err_vblank_cleanup; if (dev_priv->capabilities & SVGA_CAP_MULTIMON) for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_ldu_init(dev_priv, i); else vmw_ldu_init(dev_priv, 0); return 0; err_vblank_cleanup: drm_vblank_cleanup(dev); err_free: kfree(dev_priv->ldu_priv); dev_priv->ldu_priv = NULL; return ret; } int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; if (!dev_priv->ldu_priv) return -ENOSYS; drm_vblank_cleanup(dev); BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); kfree(dev_priv->ldu_priv); return 0; }
gpl-2.0
wzhy90/android_kernel_sony_msm8974ab
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
5446
11321
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_kms.h" #define vmw_crtc_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.crtc) #define vmw_encoder_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.encoder) #define vmw_connector_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.connector) struct vmw_legacy_display { struct list_head active; unsigned num_active; unsigned last_num_active; struct vmw_framebuffer *fb; }; /** * Display unit using the legacy register interface. */ struct vmw_legacy_display_unit { struct vmw_display_unit base; struct list_head active; }; static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) { list_del_init(&ldu->active); vmw_display_unit_cleanup(&ldu->base); kfree(ldu); } /* * Legacy Display Unit CRTC functions */ static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) { vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); } static int vmw_ldu_commit_list(struct vmw_private *dev_priv) { struct vmw_legacy_display *lds = dev_priv->ldu_priv; struct vmw_legacy_display_unit *entry; struct vmw_display_unit *du = NULL; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; int i = 0, ret; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { int w = 0, h = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); i++; } if (crtc == NULL) return 0; fb = entry->base.crtc.fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->bits_per_pixel, fb->depth); } if (!list_empty(&lds->active)) { entry = list_entry(lds->active.next, typeof(*entry), active); fb = entry->base.crtc.fb; vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], fb->bits_per_pixel, fb->depth); } /* Make sure we always show something. */ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active ? lds->num_active : 1); i = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); i++; } BUG_ON(i != lds->num_active); lds->last_num_active = lds->num_active; /* Find the first du with a cursor. */ list_for_each_entry(entry, &lds->active, active) { du = &entry->base; if (!du->cursor_dmabuf) continue; ret = vmw_cursor_update_dmabuf(dev_priv, du->cursor_dmabuf, 64, 64, du->hotspot_x, du->hotspot_y); if (ret == 0) break; DRM_ERROR("Could not update cursor image\n"); } return 0; } static int vmw_ldu_del_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *ldu) { struct vmw_legacy_display *ld = vmw_priv->ldu_priv; if (list_empty(&ldu->active)) return 0; /* Must init otherwise list_empty(&ldu->active) will not work. */ list_del_init(&ldu->active); if (--(ld->num_active) == 0) { BUG_ON(!ld->fb); if (ld->fb->unpin) ld->fb->unpin(ld->fb); ld->fb = NULL; } return 0; } static int vmw_ldu_add_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *ldu, struct vmw_framebuffer *vfb) { struct vmw_legacy_display *ld = vmw_priv->ldu_priv; struct vmw_legacy_display_unit *entry; struct list_head *at; BUG_ON(!ld->num_active && ld->fb); if (vfb != ld->fb) { if (ld->fb && ld->fb->unpin) ld->fb->unpin(ld->fb); if (vfb->pin) vfb->pin(vfb); ld->fb = vfb; } if (!list_empty(&ldu->active)) return 0; at = &ld->active; list_for_each_entry(entry, &ld->active, active) { if (entry->base.unit > ldu->base.unit) break; at = &entry->active; } list_add(&ldu->active, at); ld->num_active++; return 0; } static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_legacy_display_unit *ldu; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_encoder *encoder; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_crtc *crtc; if (!set) return -EINVAL; if (!set->crtc) return -EINVAL; /* get the ldu */ crtc = set->crtc; ldu = vmw_crtc_to_ldu(crtc); vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { DRM_ERROR("to many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &ldu->base.connector) { DRM_ERROR("connector doesn't match %p %p\n", set->connectors[0], &ldu->base.connector); return -EINVAL; } /* ldu only supports one fb active at the time */ if (dev_priv->ldu_priv->fb && vfb && !(dev_priv->ldu_priv->num_active == 1 && !list_empty(&ldu->active)) && dev_priv->ldu_priv->fb != vfb) { DRM_ERROR("Multiple framebuffers not supported\n"); return -EINVAL; } /* since they always map one to one these are safe */ connector = &ldu->base.connector; encoder = &ldu->base.encoder; /* should we turn the crtc off? */ if (set->num_connectors == 0 || !set->mode || !set->fb) { connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; vmw_ldu_del_active(dev_priv, ldu); return vmw_ldu_commit_list(dev_priv); } /* we now know we want to set a mode */ mode = set->mode; fb = set->fb; if (set->x + mode->hdisplay > fb->width || set->y + mode->vdisplay > fb->height) { DRM_ERROR("set outside of framebuffer\n"); return -EINVAL; } vmw_fb_off(dev_priv); crtc->fb = fb; encoder->crtc = crtc; connector->encoder = encoder; crtc->x = set->x; crtc->y = set->y; crtc->mode = *mode; vmw_ldu_add_active(dev_priv, ldu, vfb); return vmw_ldu_commit_list(dev_priv); } static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { .save = vmw_du_crtc_save, .restore = vmw_du_crtc_restore, .cursor_set = vmw_du_crtc_cursor_set, .cursor_move = vmw_du_crtc_cursor_move, .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_ldu_crtc_destroy, .set_config = vmw_ldu_crtc_set_config, }; /* * Legacy Display Unit encoder functions */ static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder) { vmw_ldu_destroy(vmw_encoder_to_ldu(encoder)); } static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { .destroy = vmw_ldu_encoder_destroy, }; /* * Legacy Display Unit connector functions */ static void vmw_ldu_connector_destroy(struct drm_connector *connector) { vmw_ldu_destroy(vmw_connector_to_ldu(connector)); } static struct drm_connector_funcs vmw_legacy_connector_funcs = { .dpms = vmw_du_connector_dpms, .save = vmw_du_connector_save, .restore = vmw_du_connector_restore, .detect = vmw_du_connector_detect, .fill_modes = vmw_du_connector_fill_modes, .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, }; static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_legacy_display_unit *ldu; struct drm_device *dev = dev_priv->dev; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_crtc *crtc; ldu = kzalloc(sizeof(*ldu), GFP_KERNEL); if (!ldu) return -ENOMEM; ldu->base.unit = unit; crtc = &ldu->base.crtc; encoder = &ldu->base.encoder; connector = &ldu->base.connector; INIT_LIST_HEAD(&ldu->active); ldu->base.pref_active = (unit == 0); ldu->base.pref_width = dev_priv->initial_width; ldu->base.pref_height = dev_priv->initial_height; ldu->base.pref_mode = NULL; ldu->base.is_implicit = true; drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); return 0; } int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; int i, ret; if (dev_priv->ldu_priv) { DRM_INFO("ldu system already on\n"); return -EINVAL; } dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); if (!dev_priv->ldu_priv) return -ENOMEM; INIT_LIST_HEAD(&dev_priv->ldu_priv->active); dev_priv->ldu_priv->num_active = 0; dev_priv->ldu_priv->last_num_active = 0; dev_priv->ldu_priv->fb = NULL; /* for old hardware without multimon only enable one display */ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); else ret = drm_vblank_init(dev, 1); if (ret != 0) goto err_free; ret = drm_mode_create_dirty_info_property(dev); if (ret != 0) goto err_vblank_cleanup; if (dev_priv->capabilities & SVGA_CAP_MULTIMON) for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_ldu_init(dev_priv, i); else vmw_ldu_init(dev_priv, 0); return 0; err_vblank_cleanup: drm_vblank_cleanup(dev); err_free: kfree(dev_priv->ldu_priv); dev_priv->ldu_priv = NULL; return ret; } int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; if (!dev_priv->ldu_priv) return -ENOSYS; drm_vblank_cleanup(dev); BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); kfree(dev_priv->ldu_priv); return 0; }
gpl-2.0
londbell/ZTE_U988S_JellyBean-4.2.2-Kernel-3.4.35
sound/ppc/tumbler.c
8006
39532
/* * PMac Tumbler/Snapper lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Rene Rebe <rene.rebe@gmx.net>: * * update from shadow registers on wakeup and headphone plug * * automatically toggle DRC on headphone plug * */ #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/string.h> #include <sound/core.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include "pmac.h" #include "tumbler_volume.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) printk(KERN_DEBUG fmt) #else #define DBG(fmt...) #endif #define IS_G4DA (of_machine_is_compatible("PowerMac3,4")) /* i2c address for tumbler */ #define TAS_I2C_ADDR 0x34 /* registers */ #define TAS_REG_MCS 0x01 /* main control */ #define TAS_REG_DRC 0x02 #define TAS_REG_VOL 0x04 #define TAS_REG_TREBLE 0x05 #define TAS_REG_BASS 0x06 #define TAS_REG_INPUT1 0x07 #define TAS_REG_INPUT2 0x08 /* tas3001c */ #define TAS_REG_PCM TAS_REG_INPUT1 /* tas3004 */ #define TAS_REG_LMIX TAS_REG_INPUT1 #define TAS_REG_RMIX TAS_REG_INPUT2 #define TAS_REG_MCS2 0x43 /* main control 2 */ #define TAS_REG_ACS 0x40 /* analog control */ /* mono volumes for tas3001c/tas3004 */ enum { VOL_IDX_PCM_MONO, /* tas3001c only */ VOL_IDX_BASS, VOL_IDX_TREBLE, VOL_IDX_LAST_MONO }; /* stereo volumes for tas3004 */ enum { VOL_IDX_PCM, VOL_IDX_PCM2, VOL_IDX_ADC, VOL_IDX_LAST_MIX }; struct pmac_gpio { unsigned int addr; u8 active_val; u8 inactive_val; u8 active_state; }; struct pmac_tumbler { struct pmac_keywest i2c; struct pmac_gpio audio_reset; struct pmac_gpio amp_mute; struct pmac_gpio line_mute; struct pmac_gpio line_detect; struct pmac_gpio hp_mute; struct pmac_gpio hp_detect; int headphone_irq; int lineout_irq; unsigned int save_master_vol[2]; unsigned int master_vol[2]; unsigned int save_master_switch[2]; unsigned int master_switch[2]; unsigned int mono_vol[VOL_IDX_LAST_MONO]; unsigned int mix_vol[VOL_IDX_LAST_MIX][2]; /* stereo volumes for tas3004 */ int drc_range; int drc_enable; int capture_source; int anded_reset; int auto_mute_notify; int reset_on_sleep; u8 acs; }; /* */ static int send_init_client(struct pmac_keywest *i2c, unsigned int *regs) { while (*regs > 0) { int err, count = 10; do { err = i2c_smbus_write_byte_data(i2c->client, regs[0], regs[1]); if (err >= 0) break; DBG("(W) i2c error %d\n", err); mdelay(10); } while (count--); if (err < 0) return -ENXIO; regs += 2; } return 0; } static int tumbler_init_client(struct pmac_keywest *i2c) { static unsigned int regs[] = { /* normal operation, SCLK=64fps, i2s output, i2s input, 16bit width */ TAS_REG_MCS, (1<<6)|(2<<4)|(2<<2)|0, 0, /* terminator */ }; DBG("(I) tumbler init client\n"); return send_init_client(i2c, regs); } static int snapper_init_client(struct pmac_keywest *i2c) { static unsigned int regs[] = { /* normal operation, SCLK=64fps, i2s output, 16bit width */ TAS_REG_MCS, (1<<6)|(2<<4)|0, /* normal operation, all-pass mode */ TAS_REG_MCS2, (1<<1), /* normal output, no deemphasis, A input, power-up, line-in */ TAS_REG_ACS, 0, 0, /* terminator */ }; DBG("(I) snapper init client\n"); return send_init_client(i2c, regs); } /* * gpio access */ #define do_gpio_write(gp, val) \ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, (gp)->addr, val) #define do_gpio_read(gp) \ pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, (gp)->addr, 0) #define tumbler_gpio_free(gp) /* NOP */ static void write_audio_gpio(struct pmac_gpio *gp, int active) { if (! gp->addr) return; active = active ? gp->active_val : gp->inactive_val; do_gpio_write(gp, active); DBG("(I) gpio %x write %d\n", gp->addr, active); } static int check_audio_gpio(struct pmac_gpio *gp) { int ret; if (! gp->addr) return 0; ret = do_gpio_read(gp); return (ret & 0x1) == (gp->active_val & 0x1); } static int read_audio_gpio(struct pmac_gpio *gp) { int ret; if (! gp->addr) return 0; ret = do_gpio_read(gp); ret = (ret & 0x02) !=0; return ret == gp->active_state; } /* * update master volume */ static int tumbler_set_master_volume(struct pmac_tumbler *mix) { unsigned char block[6]; unsigned int left_vol, right_vol; if (! mix->i2c.client) return -ENODEV; if (! mix->master_switch[0]) left_vol = 0; else { left_vol = mix->master_vol[0]; if (left_vol >= ARRAY_SIZE(master_volume_table)) left_vol = ARRAY_SIZE(master_volume_table) - 1; left_vol = master_volume_table[left_vol]; } if (! mix->master_switch[1]) right_vol = 0; else { right_vol = mix->master_vol[1]; if (right_vol >= ARRAY_SIZE(master_volume_table)) right_vol = ARRAY_SIZE(master_volume_table) - 1; right_vol = master_volume_table[right_vol]; } block[0] = (left_vol >> 16) & 0xff; block[1] = (left_vol >> 8) & 0xff; block[2] = (left_vol >> 0) & 0xff; block[3] = (right_vol >> 16) & 0xff; block[4] = (right_vol >> 8) & 0xff; block[5] = (right_vol >> 0) & 0xff; if (i2c_smbus_write_i2c_block_data(mix->i2c.client, TAS_REG_VOL, 6, block) < 0) { snd_printk(KERN_ERR "failed to set volume \n"); return -EINVAL; } DBG("(I) succeeded to set volume (%u, %u)\n", left_vol, right_vol); return 0; } /* output volume */ static int tumbler_info_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = ARRAY_SIZE(master_volume_table) - 1; return 0; } static int tumbler_get_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; ucontrol->value.integer.value[0] = mix->master_vol[0]; ucontrol->value.integer.value[1] = mix->master_vol[1]; return 0; } static int tumbler_put_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; unsigned int vol[2]; int change; vol[0] = ucontrol->value.integer.value[0]; vol[1] = ucontrol->value.integer.value[1]; if (vol[0] >= ARRAY_SIZE(master_volume_table) || vol[1] >= ARRAY_SIZE(master_volume_table)) return -EINVAL; change = mix->master_vol[0] != vol[0] || mix->master_vol[1] != vol[1]; if (change) { mix->master_vol[0] = vol[0]; mix->master_vol[1] = vol[1]; tumbler_set_master_volume(mix); } return change; } /* output switch */ static int tumbler_get_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; ucontrol->value.integer.value[0] = mix->master_switch[0]; ucontrol->value.integer.value[1] = mix->master_switch[1]; return 0; } static int tumbler_put_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; int change; change = mix->master_switch[0] != ucontrol->value.integer.value[0] || mix->master_switch[1] != ucontrol->value.integer.value[1]; if (change) { mix->master_switch[0] = !!ucontrol->value.integer.value[0]; mix->master_switch[1] = !!ucontrol->value.integer.value[1]; tumbler_set_master_volume(mix); } return change; } /* * TAS3001c dynamic range compression */ #define TAS3001_DRC_MAX 0x5f static int tumbler_set_drc(struct pmac_tumbler *mix) { unsigned char val[2]; if (! mix->i2c.client) return -ENODEV; if (mix->drc_enable) { val[0] = 0xc1; /* enable, 3:1 compression */ if (mix->drc_range > TAS3001_DRC_MAX) val[1] = 0xf0; else if (mix->drc_range < 0) val[1] = 0x91; else val[1] = mix->drc_range + 0x91; } else { val[0] = 0; val[1] = 0; } if (i2c_smbus_write_i2c_block_data(mix->i2c.client, TAS_REG_DRC, 2, val) < 0) { snd_printk(KERN_ERR "failed to set DRC\n"); return -EINVAL; } DBG("(I) succeeded to set DRC (%u, %u)\n", val[0], val[1]); return 0; } /* * TAS3004 */ #define TAS3004_DRC_MAX 0xef static int snapper_set_drc(struct pmac_tumbler *mix) { unsigned char val[6]; if (! mix->i2c.client) return -ENODEV; if (mix->drc_enable) val[0] = 0x50; /* 3:1 above threshold */ else val[0] = 0x51; /* disabled */ val[1] = 0x02; /* 1:1 below threshold */ if (mix->drc_range > 0xef) val[2] = 0xef; else if (mix->drc_range < 0) val[2] = 0x00; else val[2] = mix->drc_range; val[3] = 0xb0; val[4] = 0x60; val[5] = 0xa0; if (i2c_smbus_write_i2c_block_data(mix->i2c.client, TAS_REG_DRC, 6, val) < 0) { snd_printk(KERN_ERR "failed to set DRC\n"); return -EINVAL; } DBG("(I) succeeded to set DRC (%u, %u)\n", val[0], val[1]); return 0; } static int tumbler_info_drc_value(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = chip->model == PMAC_TUMBLER ? TAS3001_DRC_MAX : TAS3004_DRC_MAX; return 0; } static int tumbler_get_drc_value(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->drc_range; return 0; } static int tumbler_put_drc_value(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; unsigned int val; int change; if (! (mix = chip->mixer_data)) return -ENODEV; val = ucontrol->value.integer.value[0]; if (chip->model == PMAC_TUMBLER) { if (val > TAS3001_DRC_MAX) return -EINVAL; } else { if (val > TAS3004_DRC_MAX) return -EINVAL; } change = mix->drc_range != val; if (change) { mix->drc_range = val; if (chip->model == PMAC_TUMBLER) tumbler_set_drc(mix); else snapper_set_drc(mix); } return change; } static int tumbler_get_drc_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->drc_enable; return 0; } static int tumbler_put_drc_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; int change; if (! (mix = chip->mixer_data)) return -ENODEV; change = mix->drc_enable != ucontrol->value.integer.value[0]; if (change) { mix->drc_enable = !!ucontrol->value.integer.value[0]; if (chip->model == PMAC_TUMBLER) tumbler_set_drc(mix); else snapper_set_drc(mix); } return change; } /* * mono volumes */ struct tumbler_mono_vol { int index; int reg; int bytes; unsigned int max; unsigned int *table; }; static int tumbler_set_mono_volume(struct pmac_tumbler *mix, struct tumbler_mono_vol *info) { unsigned char block[4]; unsigned int vol; int i; if (! mix->i2c.client) return -ENODEV; vol = mix->mono_vol[info->index]; if (vol >= info->max) vol = info->max - 1; vol = info->table[vol]; for (i = 0; i < info->bytes; i++) block[i] = (vol >> ((info->bytes - i - 1) * 8)) & 0xff; if (i2c_smbus_write_i2c_block_data(mix->i2c.client, info->reg, info->bytes, block) < 0) { snd_printk(KERN_ERR "failed to set mono volume %d\n", info->index); return -EINVAL; } return 0; } static int tumbler_info_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct tumbler_mono_vol *info = (struct tumbler_mono_vol *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = info->max - 1; return 0; } static int tumbler_get_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tumbler_mono_vol *info = (struct tumbler_mono_vol *)kcontrol->private_value; struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->mono_vol[info->index]; return 0; } static int tumbler_put_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tumbler_mono_vol *info = (struct tumbler_mono_vol *)kcontrol->private_value; struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; unsigned int vol; int change; if (! (mix = chip->mixer_data)) return -ENODEV; vol = ucontrol->value.integer.value[0]; if (vol >= info->max) return -EINVAL; change = mix->mono_vol[info->index] != vol; if (change) { mix->mono_vol[info->index] = vol; tumbler_set_mono_volume(mix, info); } return change; } /* TAS3001c mono volumes */ static struct tumbler_mono_vol tumbler_pcm_vol_info = { .index = VOL_IDX_PCM_MONO, .reg = TAS_REG_PCM, .bytes = 3, .max = ARRAY_SIZE(mixer_volume_table), .table = mixer_volume_table, }; static struct tumbler_mono_vol tumbler_bass_vol_info = { .index = VOL_IDX_BASS, .reg = TAS_REG_BASS, .bytes = 1, .max = ARRAY_SIZE(bass_volume_table), .table = bass_volume_table, }; static struct tumbler_mono_vol tumbler_treble_vol_info = { .index = VOL_IDX_TREBLE, .reg = TAS_REG_TREBLE, .bytes = 1, .max = ARRAY_SIZE(treble_volume_table), .table = treble_volume_table, }; /* TAS3004 mono volumes */ static struct tumbler_mono_vol snapper_bass_vol_info = { .index = VOL_IDX_BASS, .reg = TAS_REG_BASS, .bytes = 1, .max = ARRAY_SIZE(snapper_bass_volume_table), .table = snapper_bass_volume_table, }; static struct tumbler_mono_vol snapper_treble_vol_info = { .index = VOL_IDX_TREBLE, .reg = TAS_REG_TREBLE, .bytes = 1, .max = ARRAY_SIZE(snapper_treble_volume_table), .table = snapper_treble_volume_table, }; #define DEFINE_MONO(xname,type) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\ .name = xname, \ .info = tumbler_info_mono, \ .get = tumbler_get_mono, \ .put = tumbler_put_mono, \ .private_value = (unsigned long)(&tumbler_##type##_vol_info), \ } #define DEFINE_SNAPPER_MONO(xname,type) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\ .name = xname, \ .info = tumbler_info_mono, \ .get = tumbler_get_mono, \ .put = tumbler_put_mono, \ .private_value = (unsigned long)(&snapper_##type##_vol_info), \ } /* * snapper mixer volumes */ static int snapper_set_mix_vol1(struct pmac_tumbler *mix, int idx, int ch, int reg) { int i, j, vol; unsigned char block[9]; vol = mix->mix_vol[idx][ch]; if (vol >= ARRAY_SIZE(mixer_volume_table)) { vol = ARRAY_SIZE(mixer_volume_table) - 1; mix->mix_vol[idx][ch] = vol; } for (i = 0; i < 3; i++) { vol = mix->mix_vol[i][ch]; vol = mixer_volume_table[vol]; for (j = 0; j < 3; j++) block[i * 3 + j] = (vol >> ((2 - j) * 8)) & 0xff; } if (i2c_smbus_write_i2c_block_data(mix->i2c.client, reg, 9, block) < 0) { snd_printk(KERN_ERR "failed to set mono volume %d\n", reg); return -EINVAL; } return 0; } static int snapper_set_mix_vol(struct pmac_tumbler *mix, int idx) { if (! mix->i2c.client) return -ENODEV; if (snapper_set_mix_vol1(mix, idx, 0, TAS_REG_LMIX) < 0 || snapper_set_mix_vol1(mix, idx, 1, TAS_REG_RMIX) < 0) return -EINVAL; return 0; } static int snapper_info_mix(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = ARRAY_SIZE(mixer_volume_table) - 1; return 0; } static int snapper_get_mix(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int idx = (int)kcontrol->private_value; struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->mix_vol[idx][0]; ucontrol->value.integer.value[1] = mix->mix_vol[idx][1]; return 0; } static int snapper_put_mix(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int idx = (int)kcontrol->private_value; struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; unsigned int vol[2]; int change; if (! (mix = chip->mixer_data)) return -ENODEV; vol[0] = ucontrol->value.integer.value[0]; vol[1] = ucontrol->value.integer.value[1]; if (vol[0] >= ARRAY_SIZE(mixer_volume_table) || vol[1] >= ARRAY_SIZE(mixer_volume_table)) return -EINVAL; change = mix->mix_vol[idx][0] != vol[0] || mix->mix_vol[idx][1] != vol[1]; if (change) { mix->mix_vol[idx][0] = vol[0]; mix->mix_vol[idx][1] = vol[1]; snapper_set_mix_vol(mix, idx); } return change; } /* * mute switches. FIXME: Turn that into software mute when both outputs are muted * to avoid codec reset on ibook M7 */ enum { TUMBLER_MUTE_HP, TUMBLER_MUTE_AMP, TUMBLER_MUTE_LINE }; static int tumbler_get_mute_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; struct pmac_gpio *gp; if (! (mix = chip->mixer_data)) return -ENODEV; switch(kcontrol->private_value) { case TUMBLER_MUTE_HP: gp = &mix->hp_mute; break; case TUMBLER_MUTE_AMP: gp = &mix->amp_mute; break; case TUMBLER_MUTE_LINE: gp = &mix->line_mute; break; default: gp = NULL; } if (gp == NULL) return -EINVAL; ucontrol->value.integer.value[0] = !check_audio_gpio(gp); return 0; } static int tumbler_put_mute_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix; struct pmac_gpio *gp; int val; #ifdef PMAC_SUPPORT_AUTOMUTE if (chip->update_automute && chip->auto_mute) return 0; /* don't touch in the auto-mute mode */ #endif if (! (mix = chip->mixer_data)) return -ENODEV; switch(kcontrol->private_value) { case TUMBLER_MUTE_HP: gp = &mix->hp_mute; break; case TUMBLER_MUTE_AMP: gp = &mix->amp_mute; break; case TUMBLER_MUTE_LINE: gp = &mix->line_mute; break; default: gp = NULL; } if (gp == NULL) return -EINVAL; val = ! check_audio_gpio(gp); if (val != ucontrol->value.integer.value[0]) { write_audio_gpio(gp, ! ucontrol->value.integer.value[0]); return 1; } return 0; } static int snapper_set_capture_source(struct pmac_tumbler *mix) { if (! mix->i2c.client) return -ENODEV; if (mix->capture_source) mix->acs |= 2; else mix->acs &= ~2; return i2c_smbus_write_byte_data(mix->i2c.client, TAS_REG_ACS, mix->acs); } static int snapper_info_capture_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[2] = { "Line", "Mic" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 2; if (uinfo->value.enumerated.item > 1) uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snapper_get_capture_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; ucontrol->value.enumerated.item[0] = mix->capture_source; return 0; } static int snapper_put_capture_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_tumbler *mix = chip->mixer_data; int change; change = ucontrol->value.enumerated.item[0] != mix->capture_source; if (change) { mix->capture_source = !!ucontrol->value.enumerated.item[0]; snapper_set_capture_source(mix); } return change; } #define DEFINE_SNAPPER_MIX(xname,idx,ofs) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\ .name = xname, \ .info = snapper_info_mix, \ .get = snapper_get_mix, \ .put = snapper_put_mix, \ .index = idx,\ .private_value = ofs, \ } /* */ static struct snd_kcontrol_new tumbler_mixers[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Volume", .info = tumbler_info_master_volume, .get = tumbler_get_master_volume, .put = tumbler_put_master_volume }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = snd_pmac_boolean_stereo_info, .get = tumbler_get_master_switch, .put = tumbler_put_master_switch }, DEFINE_MONO("Tone Control - Bass", bass), DEFINE_MONO("Tone Control - Treble", treble), DEFINE_MONO("PCM Playback Volume", pcm), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DRC Range", .info = tumbler_info_drc_value, .get = tumbler_get_drc_value, .put = tumbler_put_drc_value }, }; static struct snd_kcontrol_new snapper_mixers[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Volume", .info = tumbler_info_master_volume, .get = tumbler_get_master_volume, .put = tumbler_put_master_volume }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = snd_pmac_boolean_stereo_info, .get = tumbler_get_master_switch, .put = tumbler_put_master_switch }, DEFINE_SNAPPER_MIX("PCM Playback Volume", 0, VOL_IDX_PCM), /* Alternative PCM is assigned to Mic analog loopback on iBook G4 */ DEFINE_SNAPPER_MIX("Mic Playback Volume", 0, VOL_IDX_PCM2), DEFINE_SNAPPER_MIX("Monitor Mix Volume", 0, VOL_IDX_ADC), DEFINE_SNAPPER_MONO("Tone Control - Bass", bass), DEFINE_SNAPPER_MONO("Tone Control - Treble", treble), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DRC Range", .info = tumbler_info_drc_value, .get = tumbler_get_drc_value, .put = tumbler_put_drc_value }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Input Source", /* FIXME: "Capture Source" doesn't work properly */ .info = snapper_info_capture_source, .get = snapper_get_capture_source, .put = snapper_put_capture_source }, }; static struct snd_kcontrol_new tumbler_hp_sw __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Switch", .info = snd_pmac_boolean_mono_info, .get = tumbler_get_mute_switch, .put = tumbler_put_mute_switch, .private_value = TUMBLER_MUTE_HP, }; static struct snd_kcontrol_new tumbler_speaker_sw __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Speaker Playback Switch", .info = snd_pmac_boolean_mono_info, .get = tumbler_get_mute_switch, .put = tumbler_put_mute_switch, .private_value = TUMBLER_MUTE_AMP, }; static struct snd_kcontrol_new tumbler_lineout_sw __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line Out Playback Switch", .info = snd_pmac_boolean_mono_info, .get = tumbler_get_mute_switch, .put = tumbler_put_mute_switch, .private_value = TUMBLER_MUTE_LINE, }; static struct snd_kcontrol_new tumbler_drc_sw __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DRC Switch", .info = snd_pmac_boolean_mono_info, .get = tumbler_get_drc_switch, .put = tumbler_put_drc_switch }; #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute stuffs */ static int tumbler_detect_headphone(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; int detect = 0; if (mix->hp_detect.addr) detect |= read_audio_gpio(&mix->hp_detect); return detect; } static int tumbler_detect_lineout(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; int detect = 0; if (mix->line_detect.addr) detect |= read_audio_gpio(&mix->line_detect); return detect; } static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int do_notify, struct snd_kcontrol *sw) { if (check_audio_gpio(gp) != val) { write_audio_gpio(gp, val); if (do_notify) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &sw->id); } } static struct work_struct device_change; static struct snd_pmac *device_change_chip; static void device_change_handler(struct work_struct *work) { struct snd_pmac *chip = device_change_chip; struct pmac_tumbler *mix; int headphone, lineout; if (!chip) return; mix = chip->mixer_data; if (snd_BUG_ON(!mix)) return; headphone = tumbler_detect_headphone(chip); lineout = tumbler_detect_lineout(chip); DBG("headphone: %d, lineout: %d\n", headphone, lineout); if (headphone || lineout) { /* unmute headphone/lineout & mute speaker */ if (headphone) check_mute(chip, &mix->hp_mute, 0, mix->auto_mute_notify, chip->master_sw_ctl); if (lineout && mix->line_mute.addr != 0) check_mute(chip, &mix->line_mute, 0, mix->auto_mute_notify, chip->lineout_sw_ctl); if (mix->anded_reset) msleep(10); check_mute(chip, &mix->amp_mute, !IS_G4DA, mix->auto_mute_notify, chip->speaker_sw_ctl); } else { /* unmute speaker, mute others */ check_mute(chip, &mix->amp_mute, 0, mix->auto_mute_notify, chip->speaker_sw_ctl); if (mix->anded_reset) msleep(10); check_mute(chip, &mix->hp_mute, 1, mix->auto_mute_notify, chip->master_sw_ctl); if (mix->line_mute.addr != 0) check_mute(chip, &mix->line_mute, 1, mix->auto_mute_notify, chip->lineout_sw_ctl); } if (mix->auto_mute_notify) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hp_detect_ctl->id); #ifdef CONFIG_SND_POWERMAC_AUTO_DRC mix->drc_enable = ! (headphone || lineout); if (mix->auto_mute_notify) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->drc_sw_ctl->id); if (chip->model == PMAC_TUMBLER) tumbler_set_drc(mix); else snapper_set_drc(mix); #endif /* reset the master volume so the correct amplification is applied */ tumbler_set_master_volume(mix); } static void tumbler_update_automute(struct snd_pmac *chip, int do_notify) { if (chip->auto_mute) { struct pmac_tumbler *mix; mix = chip->mixer_data; if (snd_BUG_ON(!mix)) return; mix->auto_mute_notify = do_notify; schedule_work(&device_change); } } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* interrupt - headphone plug changed */ static irqreturn_t headphone_intr(int irq, void *devid) { struct snd_pmac *chip = devid; if (chip->update_automute && chip->initialized) { chip->update_automute(chip, 1); return IRQ_HANDLED; } return IRQ_NONE; } /* look for audio-gpio device */ static struct device_node *find_audio_device(const char *name) { struct device_node *gpiop; struct device_node *np; gpiop = of_find_node_by_name(NULL, "gpio"); if (! gpiop) return NULL; for (np = of_get_next_child(gpiop, NULL); np; np = of_get_next_child(gpiop, np)) { const char *property = of_get_property(np, "audio-gpio", NULL); if (property && strcmp(property, name) == 0) break; } of_node_put(gpiop); return np; } /* look for audio-gpio device */ static struct device_node *find_compatible_audio_device(const char *name) { struct device_node *gpiop; struct device_node *np; gpiop = of_find_node_by_name(NULL, "gpio"); if (!gpiop) return NULL; for (np = of_get_next_child(gpiop, NULL); np; np = of_get_next_child(gpiop, np)) { if (of_device_is_compatible(np, name)) break; } of_node_put(gpiop); return np; } /* find an audio device and get its address */ static long tumbler_find_device(const char *device, const char *platform, struct pmac_gpio *gp, int is_compatible) { struct device_node *node; const u32 *base; u32 addr; long ret; if (is_compatible) node = find_compatible_audio_device(device); else node = find_audio_device(device); if (! node) { DBG("(W) cannot find audio device %s !\n", device); snd_printdd("cannot find device %s\n", device); return -ENODEV; } base = of_get_property(node, "AAPL,address", NULL); if (! base) { base = of_get_property(node, "reg", NULL); if (!base) { DBG("(E) cannot find address for device %s !\n", device); snd_printd("cannot find address for device %s\n", device); of_node_put(node); return -ENODEV; } addr = *base; if (addr < 0x50) addr += 0x50; } else addr = *base; gp->addr = addr & 0x0000ffff; /* Try to find the active state, default to 0 ! */ base = of_get_property(node, "audio-gpio-active-state", NULL); if (base) { gp->active_state = *base; gp->active_val = (*base) ? 0x5 : 0x4; gp->inactive_val = (*base) ? 0x4 : 0x5; } else { const u32 *prop = NULL; gp->active_state = IS_G4DA && !strncmp(device, "keywest-gpio1", 13); gp->active_val = 0x4; gp->inactive_val = 0x5; /* Here are some crude hacks to extract the GPIO polarity and * open collector informations out of the do-platform script * as we don't yet have an interpreter for these things */ if (platform) prop = of_get_property(node, platform, NULL); if (prop) { if (prop[3] == 0x9 && prop[4] == 0x9) { gp->active_val = 0xd; gp->inactive_val = 0xc; } if (prop[3] == 0x1 && prop[4] == 0x1) { gp->active_val = 0x5; gp->inactive_val = 0x4; } } } DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n", device, gp->addr, gp->active_state); ret = irq_of_parse_and_map(node, 0); of_node_put(node); return ret; } /* reset audio */ static void tumbler_reset_audio(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; if (mix->anded_reset) { DBG("(I) codec anded reset !\n"); write_audio_gpio(&mix->hp_mute, 0); write_audio_gpio(&mix->amp_mute, 0); msleep(200); write_audio_gpio(&mix->hp_mute, 1); write_audio_gpio(&mix->amp_mute, 1); msleep(100); write_audio_gpio(&mix->hp_mute, 0); write_audio_gpio(&mix->amp_mute, 0); msleep(100); } else { DBG("(I) codec normal reset !\n"); write_audio_gpio(&mix->audio_reset, 0); msleep(200); write_audio_gpio(&mix->audio_reset, 1); msleep(100); write_audio_gpio(&mix->audio_reset, 0); msleep(100); } } #ifdef CONFIG_PM /* suspend mixer */ static void tumbler_suspend(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; if (mix->headphone_irq >= 0) disable_irq(mix->headphone_irq); if (mix->lineout_irq >= 0) disable_irq(mix->lineout_irq); mix->save_master_switch[0] = mix->master_switch[0]; mix->save_master_switch[1] = mix->master_switch[1]; mix->save_master_vol[0] = mix->master_vol[0]; mix->save_master_vol[1] = mix->master_vol[1]; mix->master_switch[0] = mix->master_switch[1] = 0; tumbler_set_master_volume(mix); if (!mix->anded_reset) { write_audio_gpio(&mix->amp_mute, 1); write_audio_gpio(&mix->hp_mute, 1); } if (chip->model == PMAC_SNAPPER) { mix->acs |= 1; i2c_smbus_write_byte_data(mix->i2c.client, TAS_REG_ACS, mix->acs); } if (mix->anded_reset) { write_audio_gpio(&mix->amp_mute, 1); write_audio_gpio(&mix->hp_mute, 1); } else write_audio_gpio(&mix->audio_reset, 1); } /* resume mixer */ static void tumbler_resume(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; mix->acs &= ~1; mix->master_switch[0] = mix->save_master_switch[0]; mix->master_switch[1] = mix->save_master_switch[1]; mix->master_vol[0] = mix->save_master_vol[0]; mix->master_vol[1] = mix->save_master_vol[1]; tumbler_reset_audio(chip); if (mix->i2c.client && mix->i2c.init_client) { if (mix->i2c.init_client(&mix->i2c) < 0) printk(KERN_ERR "tumbler_init_client error\n"); } else printk(KERN_ERR "tumbler: i2c is not initialized\n"); if (chip->model == PMAC_TUMBLER) { tumbler_set_mono_volume(mix, &tumbler_pcm_vol_info); tumbler_set_mono_volume(mix, &tumbler_bass_vol_info); tumbler_set_mono_volume(mix, &tumbler_treble_vol_info); tumbler_set_drc(mix); } else { snapper_set_mix_vol(mix, VOL_IDX_PCM); snapper_set_mix_vol(mix, VOL_IDX_PCM2); snapper_set_mix_vol(mix, VOL_IDX_ADC); tumbler_set_mono_volume(mix, &snapper_bass_vol_info); tumbler_set_mono_volume(mix, &snapper_treble_vol_info); snapper_set_drc(mix); snapper_set_capture_source(mix); } tumbler_set_master_volume(mix); if (chip->update_automute) chip->update_automute(chip, 0); if (mix->headphone_irq >= 0) { unsigned char val; enable_irq(mix->headphone_irq); /* activate headphone status interrupts */ val = do_gpio_read(&mix->hp_detect); do_gpio_write(&mix->hp_detect, val | 0x80); } if (mix->lineout_irq >= 0) enable_irq(mix->lineout_irq); } #endif /* initialize tumbler */ static int __devinit tumbler_init(struct snd_pmac *chip) { int irq; struct pmac_tumbler *mix = chip->mixer_data; if (tumbler_find_device("audio-hw-reset", "platform-do-hw-reset", &mix->audio_reset, 0) < 0) tumbler_find_device("hw-reset", "platform-do-hw-reset", &mix->audio_reset, 1); if (tumbler_find_device("amp-mute", "platform-do-amp-mute", &mix->amp_mute, 0) < 0) tumbler_find_device("amp-mute", "platform-do-amp-mute", &mix->amp_mute, 1); if (tumbler_find_device("headphone-mute", "platform-do-headphone-mute", &mix->hp_mute, 0) < 0) tumbler_find_device("headphone-mute", "platform-do-headphone-mute", &mix->hp_mute, 1); if (tumbler_find_device("line-output-mute", "platform-do-lineout-mute", &mix->line_mute, 0) < 0) tumbler_find_device("line-output-mute", "platform-do-lineout-mute", &mix->line_mute, 1); irq = tumbler_find_device("headphone-detect", NULL, &mix->hp_detect, 0); if (irq <= NO_IRQ) irq = tumbler_find_device("headphone-detect", NULL, &mix->hp_detect, 1); if (irq <= NO_IRQ) irq = tumbler_find_device("keywest-gpio15", NULL, &mix->hp_detect, 1); mix->headphone_irq = irq; irq = tumbler_find_device("line-output-detect", NULL, &mix->line_detect, 0); if (irq <= NO_IRQ) irq = tumbler_find_device("line-output-detect", NULL, &mix->line_detect, 1); if (IS_G4DA && irq <= NO_IRQ) irq = tumbler_find_device("keywest-gpio16", NULL, &mix->line_detect, 1); mix->lineout_irq = irq; tumbler_reset_audio(chip); return 0; } static void tumbler_cleanup(struct snd_pmac *chip) { struct pmac_tumbler *mix = chip->mixer_data; if (! mix) return; if (mix->headphone_irq >= 0) free_irq(mix->headphone_irq, chip); if (mix->lineout_irq >= 0) free_irq(mix->lineout_irq, chip); tumbler_gpio_free(&mix->audio_reset); tumbler_gpio_free(&mix->amp_mute); tumbler_gpio_free(&mix->hp_mute); tumbler_gpio_free(&mix->hp_detect); snd_pmac_keywest_cleanup(&mix->i2c); kfree(mix); chip->mixer_data = NULL; } /* exported */ int __devinit snd_pmac_tumbler_init(struct snd_pmac *chip) { int i, err; struct pmac_tumbler *mix; const u32 *paddr; struct device_node *tas_node, *np; char *chipname; request_module("i2c-powermac"); mix = kzalloc(sizeof(*mix), GFP_KERNEL); if (! mix) return -ENOMEM; mix->headphone_irq = -1; chip->mixer_data = mix; chip->mixer_free = tumbler_cleanup; mix->anded_reset = 0; mix->reset_on_sleep = 1; for (np = chip->node->child; np; np = np->sibling) { if (!strcmp(np->name, "sound")) { if (of_get_property(np, "has-anded-reset", NULL)) mix->anded_reset = 1; if (of_get_property(np, "layout-id", NULL)) mix->reset_on_sleep = 0; break; } } if ((err = tumbler_init(chip)) < 0) return err; /* set up TAS */ tas_node = of_find_node_by_name(NULL, "deq"); if (tas_node == NULL) tas_node = of_find_node_by_name(NULL, "codec"); if (tas_node == NULL) return -ENODEV; paddr = of_get_property(tas_node, "i2c-address", NULL); if (paddr == NULL) paddr = of_get_property(tas_node, "reg", NULL); if (paddr) mix->i2c.addr = (*paddr) >> 1; else mix->i2c.addr = TAS_I2C_ADDR; of_node_put(tas_node); DBG("(I) TAS i2c address is: %x\n", mix->i2c.addr); if (chip->model == PMAC_TUMBLER) { mix->i2c.init_client = tumbler_init_client; mix->i2c.name = "TAS3001c"; chipname = "Tumbler"; } else { mix->i2c.init_client = snapper_init_client; mix->i2c.name = "TAS3004"; chipname = "Snapper"; } if ((err = snd_pmac_keywest_init(&mix->i2c)) < 0) return err; /* * build mixers */ sprintf(chip->card->mixername, "PowerMac %s", chipname); if (chip->model == PMAC_TUMBLER) { for (i = 0; i < ARRAY_SIZE(tumbler_mixers); i++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&tumbler_mixers[i], chip))) < 0) return err; } } else { for (i = 0; i < ARRAY_SIZE(snapper_mixers); i++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snapper_mixers[i], chip))) < 0) return err; } } chip->master_sw_ctl = snd_ctl_new1(&tumbler_hp_sw, chip); if ((err = snd_ctl_add(chip->card, chip->master_sw_ctl)) < 0) return err; chip->speaker_sw_ctl = snd_ctl_new1(&tumbler_speaker_sw, chip); if ((err = snd_ctl_add(chip->card, chip->speaker_sw_ctl)) < 0) return err; if (mix->line_mute.addr != 0) { chip->lineout_sw_ctl = snd_ctl_new1(&tumbler_lineout_sw, chip); if ((err = snd_ctl_add(chip->card, chip->lineout_sw_ctl)) < 0) return err; } chip->drc_sw_ctl = snd_ctl_new1(&tumbler_drc_sw, chip); if ((err = snd_ctl_add(chip->card, chip->drc_sw_ctl)) < 0) return err; /* set initial DRC range to 60% */ if (chip->model == PMAC_TUMBLER) mix->drc_range = (TAS3001_DRC_MAX * 6) / 10; else mix->drc_range = (TAS3004_DRC_MAX * 6) / 10; mix->drc_enable = 1; /* will be changed later if AUTO_DRC is set */ if (chip->model == PMAC_TUMBLER) tumbler_set_drc(mix); else snapper_set_drc(mix); #ifdef CONFIG_PM chip->suspend = tumbler_suspend; chip->resume = tumbler_resume; #endif INIT_WORK(&device_change, device_change_handler); device_change_chip = chip; #ifdef PMAC_SUPPORT_AUTOMUTE if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) && (err = snd_pmac_add_automute(chip)) < 0) return err; chip->detect_headphone = tumbler_detect_headphone; chip->update_automute = tumbler_update_automute; tumbler_update_automute(chip, 0); /* update the status only */ /* activate headphone status interrupts */ if (mix->headphone_irq >= 0) { unsigned char val; if ((err = request_irq(mix->headphone_irq, headphone_intr, 0, "Sound Headphone Detection", chip)) < 0) return 0; /* activate headphone status interrupts */ val = do_gpio_read(&mix->hp_detect); do_gpio_write(&mix->hp_detect, val | 0x80); } if (mix->lineout_irq >= 0) { unsigned char val; if ((err = request_irq(mix->lineout_irq, headphone_intr, 0, "Sound Lineout Detection", chip)) < 0) return 0; /* activate headphone status interrupts */ val = do_gpio_read(&mix->line_detect); do_gpio_write(&mix->line_detect, val | 0x80); } #endif return 0; }
gpl-2.0
codename13/android_kernel_ba2x_2.0
drivers/media/dvb/frontends/lnbp21.c
9286
5105
/* * lnbp21.c - driver for lnb supply and control ic lnbp21 * * Copyright (C) 2006, 2009 Oliver Endriss <o.endriss@gmx.de> * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "lnbp21.h" #include "lnbh24.h" struct lnbp21 { u8 config; u8 override_or; u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; }; static int lnbp21_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; lnbp21->config &= ~(LNBP21_VSEL | LNBP21_EN); switch(voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: lnbp21->config |= LNBP21_EN; break; case SEC_VOLTAGE_18: lnbp21->config |= (LNBP21_EN | LNBP21_VSEL); break; default: return -EINVAL; }; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; if (arg) lnbp21->config |= LNBP21_LLC; else lnbp21->config &= ~LNBP21_LLC; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int lnbp21_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv; struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0, .buf = &lnbp21->config, .len = sizeof(lnbp21->config) }; switch (tone) { case SEC_TONE_OFF: lnbp21->config &= ~LNBP21_TEN; break; case SEC_TONE_ON: lnbp21->config |= LNBP21_TEN; break; default: return -EINVAL; }; lnbp21->config |= lnbp21->override_or; lnbp21->config &= lnbp21->override_and; return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void lnbp21_release(struct dvb_frontend *fe) { /* LNBP power off */ lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF); /* free data */ kfree(fe->sec_priv); fe->sec_priv = NULL; } static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear, u8 i2c_addr, u8 config) { struct lnbp21 *lnbp21 = kmalloc(sizeof(struct lnbp21), GFP_KERNEL); if (!lnbp21) return NULL; /* default configuration */ lnbp21->config = config; lnbp21->i2c = i2c; lnbp21->i2c_addr = i2c_addr; fe->sec_priv = lnbp21; /* bits which should be forced to '1' */ lnbp21->override_or = override_set; /* bits which should be forced to '0' */ lnbp21->override_and = ~override_clear; /* detect if it is present or not */ if (lnbp21_set_voltage(fe, SEC_VOLTAGE_OFF)) { kfree(lnbp21); return NULL; } /* install release callback */ fe->ops.release_sec = lnbp21_release; /* override frontend ops */ fe->ops.set_voltage = lnbp21_set_voltage; fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage; if (!(override_clear & LNBH24_TEN)) /*22kHz logic controlled by demod*/ fe->ops.set_tone = lnbp21_set_tone; printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr); return fe; } struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear, u8 i2c_addr) { return lnbx2x_attach(fe, i2c, override_set, override_clear, i2c_addr, LNBH24_TTX); } EXPORT_SYMBOL(lnbh24_attach); struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 override_set, u8 override_clear) { return lnbx2x_attach(fe, i2c, override_set, override_clear, 0x08, LNBP21_ISEL); } EXPORT_SYMBOL(lnbp21_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24"); MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
zhenyw/linux
drivers/scsi/cxlflash/main.c
71
72671
/* * CXL Flash Device Driver * * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation * * Copyright (C) 2015 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/delay.h> #include <linux/list.h> #include <linux/module.h> #include <linux/pci.h> #include <asm/unaligned.h> #include <misc/cxl.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <uapi/scsi/cxlflash_ioctl.h> #include "main.h" #include "sislite.h" #include "common.h" MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); MODULE_LICENSE("GPL"); /** * cmd_checkout() - checks out an AFU command * @afu: AFU to checkout from. * * Commands are checked out in a round-robin fashion. Note that since * the command pool is larger than the hardware queue, the majority of * times we will only loop once or twice before getting a command. The * buffer and CDB within the command are initialized (zeroed) prior to * returning. * * Return: The checked out command or NULL when command pool is empty. */ static struct afu_cmd *cmd_checkout(struct afu *afu) { int k, dec = CXLFLASH_NUM_CMDS; struct afu_cmd *cmd; while (dec--) { k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); cmd = &afu->cmd[k]; if (!atomic_dec_if_positive(&cmd->free)) { pr_devel("%s: returning found index=%d cmd=%p\n", __func__, cmd->slot, cmd); memset(cmd->buf, 0, CMD_BUFSIZE); memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); return cmd; } } return NULL; } /** * cmd_checkin() - checks in an AFU command * @cmd: AFU command to checkin. * * Safe to pass commands that have already been checked in. Several * internal tracking fields are reset as part of the checkin. Note * that these are intentionally reset prior to toggling the free bit * to avoid clobbering values in the event that the command is checked * out right away. */ static void cmd_checkin(struct afu_cmd *cmd) { cmd->rcb.scp = NULL; cmd->rcb.timeout = 0; cmd->sa.ioasc = 0; cmd->cmd_tmf = false; cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ if (unlikely(atomic_inc_return(&cmd->free) != 1)) { pr_err("%s: Freeing cmd (%d) that is not in use!\n", __func__, cmd->slot); return; } pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); } /** * process_cmd_err() - command error handler * @cmd: AFU command that experienced the error. * @scp: SCSI command associated with the AFU command in error. * * Translates error bits from AFU command to SCSI command results. */ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) { struct sisl_ioarcb *ioarcb; struct sisl_ioasa *ioasa; u32 resid; if (unlikely(!cmd)) return; ioarcb = &(cmd->rcb); ioasa = &(cmd->sa); if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { resid = ioasa->resid; scsi_set_resid(scp, resid); pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", __func__, cmd, scp, resid); } if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { pr_debug("%s: cmd underrun cmd = %p scp = %p\n", __func__, cmd, scp); scp->result = (DID_ERROR << 16); } pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); if (ioasa->rc.scsi_rc) { /* We have a SCSI status */ if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { memcpy(scp->sense_buffer, ioasa->sense_data, SISL_SENSE_DATA_LEN); scp->result = ioasa->rc.scsi_rc; } else scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); } /* * We encountered an error. Set scp->result based on nature * of error. */ if (ioasa->rc.fc_rc) { /* We have an FC status */ switch (ioasa->rc.fc_rc) { case SISL_FC_RC_LINKDOWN: scp->result = (DID_REQUEUE << 16); break; case SISL_FC_RC_RESID: /* This indicates an FCP resid underrun */ if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { /* If the SISL_RC_FLAGS_OVERRUN flag was set, * then we will handle this error else where. * If not then we must handle it here. * This is probably an AFU bug. */ scp->result = (DID_ERROR << 16); } break; case SISL_FC_RC_RESIDERR: /* Resid mismatch between adapter and device */ case SISL_FC_RC_TGTABORT: case SISL_FC_RC_ABORTOK: case SISL_FC_RC_ABORTFAIL: case SISL_FC_RC_NOLOGI: case SISL_FC_RC_ABORTPEND: case SISL_FC_RC_WRABORTPEND: case SISL_FC_RC_NOEXP: case SISL_FC_RC_INUSE: scp->result = (DID_ERROR << 16); break; } } if (ioasa->rc.afu_rc) { /* We have an AFU error */ switch (ioasa->rc.afu_rc) { case SISL_AFU_RC_NO_CHANNELS: scp->result = (DID_NO_CONNECT << 16); break; case SISL_AFU_RC_DATA_DMA_ERR: switch (ioasa->afu_extra) { case SISL_AFU_DMA_ERR_PAGE_IN: /* Retry */ scp->result = (DID_IMM_RETRY << 16); break; case SISL_AFU_DMA_ERR_INVALID_EA: default: scp->result = (DID_ERROR << 16); } break; case SISL_AFU_RC_OUT_OF_DATA_BUFS: /* Retry */ scp->result = (DID_ALLOC_FAILURE << 16); break; default: scp->result = (DID_ERROR << 16); } } } /** * cmd_complete() - command completion handler * @cmd: AFU command that has completed. * * Prepares and submits command that has either completed or timed out to * the SCSI stack. Checks AFU command back into command pool for non-internal * (rcb.scp populated) commands. */ static void cmd_complete(struct afu_cmd *cmd) { struct scsi_cmnd *scp; ulong lock_flags; struct afu *afu = cmd->parent; struct cxlflash_cfg *cfg = afu->parent; bool cmd_is_tmf; spin_lock_irqsave(&cmd->slock, lock_flags); cmd->sa.host_use_b[0] |= B_DONE; spin_unlock_irqrestore(&cmd->slock, lock_flags); if (cmd->rcb.scp) { scp = cmd->rcb.scp; if (unlikely(cmd->sa.ioasc)) process_cmd_err(cmd, scp); else scp->result = (DID_OK << 16); cmd_is_tmf = cmd->cmd_tmf; cmd_checkin(cmd); /* Don't use cmd after here */ pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " "ioasc=%d\n", __func__, scp, scp->result, cmd->sa.ioasc); scsi_dma_unmap(scp); scp->scsi_done(scp); if (cmd_is_tmf) { spin_lock_irqsave(&cfg->tmf_slock, lock_flags); cfg->tmf_active = false; wake_up_all_locked(&cfg->tmf_waitq); spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); } } else complete(&cmd->cevent); } /** * context_reset() - timeout handler for AFU commands * @cmd: AFU command that timed out. * * Sends a reset to the AFU. */ static void context_reset(struct afu_cmd *cmd) { int nretry = 0; u64 rrin = 0x1; u64 room = 0; struct afu *afu = cmd->parent; ulong lock_flags; pr_debug("%s: cmd=%p\n", __func__, cmd); spin_lock_irqsave(&cmd->slock, lock_flags); /* Already completed? */ if (cmd->sa.host_use_b[0] & B_DONE) { spin_unlock_irqrestore(&cmd->slock, lock_flags); return; } cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); spin_unlock_irqrestore(&cmd->slock, lock_flags); /* * We really want to send this reset at all costs, so spread * out wait time on successive retries for available room. */ do { room = readq_be(&afu->host_map->cmd_room); atomic64_set(&afu->room, room); if (room) goto write_rrin; udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); pr_err("%s: no cmd_room to send reset\n", __func__); return; write_rrin: nretry = 0; writeq_be(rrin, &afu->host_map->ioarrin); do { rrin = readq_be(&afu->host_map->ioarrin); if (rrin != 0x1) break; /* Double delay each time */ udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); } /** * send_cmd() - sends an AFU command * @afu: AFU associated with the host. * @cmd: AFU command to send. * * Return: * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure */ static int send_cmd(struct afu *afu, struct afu_cmd *cmd) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; int nretry = 0; int rc = 0; u64 room; long newval; /* * This routine is used by critical users such an AFU sync and to * send a task management function (TMF). Thus we want to retry a * bit before returning an error. To avoid the performance penalty * of MMIO, we spread the update of 'room' over multiple commands. */ retry: newval = atomic64_dec_if_positive(&afu->room); if (!newval) { do { room = readq_be(&afu->host_map->cmd_room); atomic64_set(&afu->room, room); if (room) goto write_ioarrin; udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); dev_err(dev, "%s: no cmd_room to send 0x%X\n", __func__, cmd->rcb.cdb[0]); goto no_room; } else if (unlikely(newval < 0)) { /* This should be rare. i.e. Only if two threads race and * decrement before the MMIO read is done. In this case * just benefit from the other thread having updated * afu->room. */ if (nretry++ < MC_ROOM_RETRY_CNT) { udelay(1 << nretry); goto retry; } goto no_room; } write_ioarrin: writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); out: pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); return rc; no_room: afu->read_room = true; kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } /** * wait_resp() - polls for a response or timeout to a sent AFU command * @afu: AFU associated with the host. * @cmd: AFU command that was sent. */ static void wait_resp(struct afu *afu, struct afu_cmd *cmd) { ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); timeout = wait_for_completion_timeout(&cmd->cevent, timeout); if (!timeout) context_reset(cmd); if (unlikely(cmd->sa.ioasc != 0)) pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, cmd->sa.rc.fc_rc); } /** * send_tmf() - sends a Task Management Function (TMF) * @afu: AFU to checkout from. * @scp: SCSI command from stack. * @tmfcmd: TMF command to send. * * Return: * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure */ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) { struct afu_cmd *cmd; u32 port_sel = scp->device->channel + 1; short lflag = 0; struct Scsi_Host *host = scp->device->host; struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; struct device *dev = &cfg->dev->dev; ulong lock_flags; int rc = 0; ulong to; cmd = cmd_checkout(afu); if (unlikely(!cmd)) { dev_err(dev, "%s: could not get a free command\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } /* When Task Management Function is active do not send another */ spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) wait_event_interruptible_lock_irq(cfg->tmf_waitq, !cfg->tmf_active, cfg->tmf_slock); cfg->tmf_active = true; cmd->cmd_tmf = true; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); cmd->rcb.ctx_id = afu->ctx_hndl; cmd->rcb.port_sel = port_sel; cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); lflag = SISL_REQ_FLAGS_TMF_CMD; cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); /* Stash the scp in the reserved field, for reuse during interrupt */ cmd->rcb.scp = scp; /* Copy the CDB from the cmd passed in */ memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); /* Send the command */ rc = send_cmd(afu, cmd); if (unlikely(rc)) { cmd_checkin(cmd); spin_lock_irqsave(&cfg->tmf_slock, lock_flags); cfg->tmf_active = false; spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); goto out; } spin_lock_irqsave(&cfg->tmf_slock, lock_flags); to = msecs_to_jiffies(5000); to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, !cfg->tmf_active, cfg->tmf_slock, to); if (!to) { cfg->tmf_active = false; dev_err(dev, "%s: TMF timed out!\n", __func__); rc = -1; } spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); out: return rc; } static void afu_unmap(struct kref *ref) { struct afu *afu = container_of(ref, struct afu, mapcount); if (likely(afu->afu_map)) { cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; } } /** * cxlflash_driver_info() - information handler for this host driver * @host: SCSI host associated with device. * * Return: A string describing the device. */ static const char *cxlflash_driver_info(struct Scsi_Host *host) { return CXLFLASH_ADAPTER_NAME; } /** * cxlflash_queuecommand() - sends a mid-layer request * @host: SCSI host associated with device. * @scp: SCSI command to send. * * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure */ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) { struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd; u32 port_sel = scp->device->channel + 1; int nseg, i, ncount; struct scatterlist *sg; ulong lock_flags; short lflag = 0; int rc = 0; int kref_got = 0; dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, get_unaligned_be32(&((u32 *)scp->cmnd)[0]), get_unaligned_be32(&((u32 *)scp->cmnd)[1]), get_unaligned_be32(&((u32 *)scp->cmnd)[2]), get_unaligned_be32(&((u32 *)scp->cmnd)[3])); /* * If a Task Management Function is active, wait for it to complete * before continuing with regular commands. */ spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) { spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); switch (cfg->state) { case STATE_RESET: dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; case STATE_FAILTERM: dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); scp->result = (DID_NO_CONNECT << 16); scp->scsi_done(scp); rc = 0; goto out; default: break; } cmd = cmd_checkout(afu); if (unlikely(!cmd)) { dev_err(dev, "%s: could not get a free command\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } kref_get(&cfg->afu->mapcount); kref_got = 1; cmd->rcb.ctx_id = afu->ctx_hndl; cmd->rcb.port_sel = port_sel; cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); if (scp->sc_data_direction == DMA_TO_DEVICE) lflag = SISL_REQ_FLAGS_HOST_WRITE; else lflag = SISL_REQ_FLAGS_HOST_READ; cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); /* Stash the scp in the reserved field, for reuse during interrupt */ cmd->rcb.scp = scp; nseg = scsi_dma_map(scp); if (unlikely(nseg < 0)) { dev_err(dev, "%s: Fail DMA map! nseg=%d\n", __func__, nseg); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } ncount = scsi_sg_count(scp); scsi_for_each_sg(scp, sg, ncount, i) { cmd->rcb.data_len = sg_dma_len(sg); cmd->rcb.data_ea = sg_dma_address(sg); } /* Copy the CDB from the scsi_cmnd passed in */ memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); /* Send the command */ rc = send_cmd(afu, cmd); if (unlikely(rc)) { cmd_checkin(cmd); scsi_dma_unmap(scp); } out: if (kref_got) kref_put(&afu->mapcount, afu_unmap); pr_devel("%s: returning rc=%d\n", __func__, rc); return rc; } /** * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe * @cfg: Internal structure associated with the host. */ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) { struct pci_dev *pdev = cfg->dev; if (pci_channel_offline(pdev)) wait_event_timeout(cfg->reset_waitq, !pci_channel_offline(pdev), CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); } /** * free_mem() - free memory associated with the AFU * @cfg: Internal structure associated with the host. */ static void free_mem(struct cxlflash_cfg *cfg) { int i; char *buf = NULL; struct afu *afu = cfg->afu; if (cfg->afu) { for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { buf = afu->cmd[i].buf; if (!((u64)buf & (PAGE_SIZE - 1))) free_page((ulong)buf); } free_pages((ulong)afu, get_order(sizeof(struct afu))); cfg->afu = NULL; } } /** * stop_afu() - stops the AFU command timers and unmaps the MMIO space * @cfg: Internal structure associated with the host. * * Safe to call with AFU in a partially allocated/initialized state. * * Cleans up all state associated with the command queue, and unmaps * the MMIO space. * * - complete() will take care of commands we initiated (they'll be checked * in as part of the cleanup that occurs after the completion) * * - cmd_checkin() will take care of entries that we did not initiate and that * have not (and will not) complete because they are sitting on a [now stale] * hardware queue */ static void stop_afu(struct cxlflash_cfg *cfg) { int i; struct afu *afu = cfg->afu; struct afu_cmd *cmd; if (likely(afu)) { for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { cmd = &afu->cmd[i]; complete(&cmd->cevent); if (!atomic_read(&cmd->free)) cmd_checkin(cmd); } if (likely(afu->afu_map)) { cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; } kref_put(&afu->mapcount, afu_unmap); } } /** * term_intr() - disables all AFU interrupts * @cfg: Internal structure associated with the host. * @level: Depth of allocation, where to begin waterfall tear down. * * Safe to call with AFU/MC in partially allocated/initialized state. */ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; if (!afu || !cfg->mcctx) { dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); return; } switch (level) { case UNMAP_THREE: cxl_unmap_afu_irq(cfg->mcctx, 3, afu); case UNMAP_TWO: cxl_unmap_afu_irq(cfg->mcctx, 2, afu); case UNMAP_ONE: cxl_unmap_afu_irq(cfg->mcctx, 1, afu); case FREE_IRQ: cxl_free_afu_irqs(cfg->mcctx); /* fall through */ case UNDO_NOOP: /* No action required */ break; } } /** * term_mc() - terminates the master context * @cfg: Internal structure associated with the host. * @level: Depth of allocation, where to begin waterfall tear down. * * Safe to call with AFU/MC in partially allocated/initialized state. */ static void term_mc(struct cxlflash_cfg *cfg) { int rc = 0; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; if (!afu || !cfg->mcctx) { dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); return; } rc = cxl_stop_context(cfg->mcctx); WARN_ON(rc); cfg->mcctx = NULL; } /** * term_afu() - terminates the AFU * @cfg: Internal structure associated with the host. * * Safe to call with AFU/MC in partially allocated/initialized state. */ static void term_afu(struct cxlflash_cfg *cfg) { /* * Tear down is carefully orchestrated to ensure * no interrupts can come in when the problem state * area is unmapped. * * 1) Disable all AFU interrupts * 2) Unmap the problem state area * 3) Stop the master context */ term_intr(cfg, UNMAP_THREE); if (cfg->afu) stop_afu(cfg); term_mc(cfg); pr_debug("%s: returning\n", __func__); } /** * cxlflash_remove() - PCI entry point to tear down host * @pdev: PCI device associated with the host. * * Safe to use as a cleanup in partially allocated/initialized state. */ static void cxlflash_remove(struct pci_dev *pdev) { struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); ulong lock_flags; /* If a Task Management Function is active, wait for it to complete * before continuing with remove. */ spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) wait_event_interruptible_lock_irq(cfg->tmf_waitq, !cfg->tmf_active, cfg->tmf_slock); spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); cfg->state = STATE_FAILTERM; cxlflash_stop_term_user_contexts(cfg); switch (cfg->init_state) { case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); /* fall through */ case INIT_STATE_AFU: cancel_work_sync(&cfg->work_q); term_afu(cfg); case INIT_STATE_PCI: pci_disable_device(pdev); case INIT_STATE_NONE: free_mem(cfg); scsi_host_put(cfg->host); break; } pr_debug("%s: returning\n", __func__); } /** * alloc_mem() - allocates the AFU and its command pool * @cfg: Internal structure associated with the host. * * A partially allocated state remains on failure. * * Return: * 0 on success * -ENOMEM on failure to allocate memory */ static int alloc_mem(struct cxlflash_cfg *cfg) { int rc = 0; int i; char *buf = NULL; struct device *dev = &cfg->dev->dev; /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(sizeof(struct afu))); if (unlikely(!cfg->afu)) { dev_err(dev, "%s: cannot get %d free pages\n", __func__, get_order(sizeof(struct afu))); rc = -ENOMEM; goto out; } cfg->afu->parent = cfg; cfg->afu->afu_map = NULL; for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { if (!((u64)buf & (PAGE_SIZE - 1))) { buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); if (unlikely(!buf)) { dev_err(dev, "%s: Allocate command buffers fail!\n", __func__); rc = -ENOMEM; free_mem(cfg); goto out; } } cfg->afu->cmd[i].buf = buf; atomic_set(&cfg->afu->cmd[i].free, 1); cfg->afu->cmd[i].slot = i; } out: return rc; } /** * init_pci() - initializes the host as a PCI device * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ static int init_pci(struct cxlflash_cfg *cfg) { struct pci_dev *pdev = cfg->dev; int rc = 0; rc = pci_enable_device(pdev); if (rc || pci_channel_offline(pdev)) { if (pci_channel_offline(pdev)) { cxlflash_wait_for_pci_err_recovery(cfg); rc = pci_enable_device(pdev); } if (rc) { dev_err(&pdev->dev, "%s: Cannot enable adapter\n", __func__); cxlflash_wait_for_pci_err_recovery(cfg); goto out; } } out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * init_scsi() - adds the host to the SCSI stack and kicks off host scan * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ static int init_scsi(struct cxlflash_cfg *cfg) { struct pci_dev *pdev = cfg->dev; int rc = 0; rc = scsi_add_host(cfg->host, &pdev->dev); if (rc) { dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", __func__, rc); goto out; } scsi_scan_host(cfg->host); out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * set_port_online() - transitions the specified host FC port to online state * @fc_regs: Top of MMIO region defined for specified port. * * The provided MMIO region must be mapped prior to call. Online state means * that the FC link layer has synced, completed the handshaking process, and * is ready for login to start. */ static void set_port_online(__be64 __iomem *fc_regs) { u64 cmdcfg; cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); } /** * set_port_offline() - transitions the specified host FC port to offline state * @fc_regs: Top of MMIO region defined for specified port. * * The provided MMIO region must be mapped prior to call. */ static void set_port_offline(__be64 __iomem *fc_regs) { u64 cmdcfg; cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); } /** * wait_port_online() - waits for the specified host FC port come online * @fc_regs: Top of MMIO region defined for specified port. * @delay_us: Number of microseconds to delay between reading port status. * @nretry: Number of cycles to retry reading port status. * * The provided MMIO region must be mapped prior to call. This will timeout * when the cable is not plugged in. * * Return: * TRUE (1) when the specified port is online * FALSE (0) when the specified port fails to come online after timeout * -EINVAL when @delay_us is less than 1000 */ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) { u64 status; if (delay_us < 1000) { pr_err("%s: invalid delay specified %d\n", __func__, delay_us); return -EINVAL; } do { msleep(delay_us / 1000); status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && nretry--); return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); } /** * wait_port_offline() - waits for the specified host FC port go offline * @fc_regs: Top of MMIO region defined for specified port. * @delay_us: Number of microseconds to delay between reading port status. * @nretry: Number of cycles to retry reading port status. * * The provided MMIO region must be mapped prior to call. * * Return: * TRUE (1) when the specified port is offline * FALSE (0) when the specified port fails to go offline after timeout * -EINVAL when @delay_us is less than 1000 */ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) { u64 status; if (delay_us < 1000) { pr_err("%s: invalid delay specified %d\n", __func__, delay_us); return -EINVAL; } do { msleep(delay_us / 1000); status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && nretry--); return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); } /** * afu_set_wwpn() - configures the WWPN for the specified host FC port * @afu: AFU associated with the host that owns the specified FC port. * @port: Port number being configured. * @fc_regs: Top of MMIO region defined for specified port. * @wwpn: The world-wide-port-number previously discovered for port. * * The provided MMIO region must be mapped prior to call. As part of the * sequence to configure the WWPN, the port is toggled offline and then back * online. This toggling action can cause this routine to delay up to a few * seconds. When configured to use the internal LUN feature of the AFU, a * failure to come online is overridden. * * Return: * 0 when the WWPN is successfully written and the port comes back online * -1 when the port fails to go offline or come back up online */ static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, u64 wwpn) { int rc = 0; set_port_offline(fc_regs); if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) { pr_debug("%s: wait on port %d to go offline timed out\n", __func__, port); rc = -1; /* but continue on to leave the port back online */ } if (rc == 0) writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); /* Always return success after programming WWPN */ rc = 0; set_port_online(fc_regs); if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) { pr_err("%s: wait on port %d to go online timed out\n", __func__, port); } pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * afu_link_reset() - resets the specified host FC port * @afu: AFU associated with the host that owns the specified FC port. * @port: Port number being configured. * @fc_regs: Top of MMIO region defined for specified port. * * The provided MMIO region must be mapped prior to call. The sequence to * reset the port involves toggling it offline and then back online. This * action can cause this routine to delay up to a few seconds. An effort * is made to maintain link with the device by switching to host to use * the alternate port exclusively while the reset takes place. * failure to come online is overridden. */ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) { u64 port_sel; /* first switch the AFU to the other links, if any */ port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); port_sel &= ~(1ULL << port); writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); set_port_offline(fc_regs); if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) pr_err("%s: wait on port %d to go offline timed out\n", __func__, port); set_port_online(fc_regs); if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, FC_PORT_STATUS_RETRY_CNT)) pr_err("%s: wait on port %d to go online timed out\n", __func__, port); /* switch back to include this port */ port_sel |= (1ULL << port); writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); } /* * Asynchronous interrupt information table */ static const struct asyc_intr_info ainfo[] = { {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, {0x0, "", 0, 0} /* terminator */ }; /** * find_ainfo() - locates and returns asynchronous interrupt information * @status: Status code set by AFU on error. * * Return: The located information or NULL when the status code is invalid. */ static const struct asyc_intr_info *find_ainfo(u64 status) { const struct asyc_intr_info *info; for (info = &ainfo[0]; info->status; info++) if (info->status == status) return info; return NULL; } /** * afu_err_intr_init() - clears and initializes the AFU for error interrupts * @afu: AFU associated with the host. */ static void afu_err_intr_init(struct afu *afu) { int i; u64 reg; /* global async interrupts: AFU clears afu_ctrl on context exit * if async interrupts were sent to that context. This prevents * the AFU form sending further async interrupts when * there is * nobody to receive them. */ /* mask all */ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); /* set LISN# to send and point to master context */ reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); if (afu->internal_lun) reg |= 1; /* Bit 63 indicates local lun */ writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); /* clear all */ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); /* unmask bits that are of interest */ /* note: afu can send an interrupt after this step */ writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); /* clear again in case a bit came on after previous clear but before */ /* unmask */ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); /* Clear/Set internal lun bits */ reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); reg &= SISL_FC_INTERNAL_MASK; if (afu->internal_lun) reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); /* now clear FC errors */ for (i = 0; i < NUM_FC_PORTS; i++) { writeq_be(0xFFFFFFFFU, &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); } /* sync interrupts for master's IOARRIN write */ /* note that unlike asyncs, there can be no pending sync interrupts */ /* at this time (this is a fresh context and master has not written */ /* IOARRIN yet), so there is nothing to clear. */ /* set LISN#, it is always sent to the context that wrote IOARRIN */ writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); } /** * cxlflash_sync_err_irq() - interrupt handler for synchronous errors * @irq: Interrupt number. * @data: Private data provided at interrupt registration, the AFU. * * Return: Always return IRQ_HANDLED. */ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) { struct afu *afu = (struct afu *)data; u64 reg; u64 reg_unmasked; reg = readq_be(&afu->host_map->intr_status); reg_unmasked = (reg & SISL_ISTATUS_UNMASK); if (reg_unmasked == 0UL) { pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", __func__, (u64)afu, reg); goto cxlflash_sync_err_irq_exit; } pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", __func__, (u64)afu, reg); writeq_be(reg_unmasked, &afu->host_map->intr_clear); cxlflash_sync_err_irq_exit: pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); return IRQ_HANDLED; } /** * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) * @irq: Interrupt number. * @data: Private data provided at interrupt registration, the AFU. * * Return: Always return IRQ_HANDLED. */ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) { struct afu *afu = (struct afu *)data; struct afu_cmd *cmd; bool toggle = afu->toggle; u64 entry, *hrrq_start = afu->hrrq_start, *hrrq_end = afu->hrrq_end, *hrrq_curr = afu->hrrq_curr; /* Process however many RRQ entries that are ready */ while (true) { entry = *hrrq_curr; if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) break; cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); cmd_complete(cmd); /* Advance to next entry or wrap and flip the toggle bit */ if (hrrq_curr < hrrq_end) hrrq_curr++; else { hrrq_curr = hrrq_start; toggle ^= SISL_RESP_HANDLE_T_BIT; } } afu->hrrq_curr = hrrq_curr; afu->toggle = toggle; return IRQ_HANDLED; } /** * cxlflash_async_err_irq() - interrupt handler for asynchronous errors * @irq: Interrupt number. * @data: Private data provided at interrupt registration, the AFU. * * Return: Always return IRQ_HANDLED. */ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) { struct afu *afu = (struct afu *)data; struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; u64 reg_unmasked; const struct asyc_intr_info *info; struct sisl_global_map __iomem *global = &afu->afu_map->global; u64 reg; u8 port; int i; reg = readq_be(&global->regs.aintr_status); reg_unmasked = (reg & SISL_ASTATUS_UNMASK); if (reg_unmasked == 0) { dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", __func__, reg); goto out; } /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ writeq_be(reg_unmasked, &global->regs.aintr_clear); /* Check each bit that is on */ for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { info = find_ainfo(1ULL << i); if (((reg_unmasked & 0x1) == 0) || !info) continue; port = info->port; dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", __func__, port, info->desc, readq_be(&global->fc_regs[port][FC_STATUS / 8])); /* * Do link reset first, some OTHER errors will set FC_ERROR * again if cleared before or w/o a reset */ if (info->action & LINK_RESET) { dev_err(dev, "%s: FC Port %d: resetting link\n", __func__, port); cfg->lr_state = LINK_RESET_REQUIRED; cfg->lr_port = port; kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } if (info->action & CLR_FC_ERROR) { reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); /* * Since all errors are unmasked, FC_ERROR and FC_ERRCAP * should be the same and tracing one is sufficient. */ dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", __func__, port, reg); writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); } if (info->action & SCAN_HOST) { atomic_inc(&cfg->scan_host_needed); kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } } out: dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); return IRQ_HANDLED; } /** * start_context() - starts the master context * @cfg: Internal structure associated with the host. * * Return: A success or failure value from CXL services. */ static int start_context(struct cxlflash_cfg *cfg) { int rc = 0; rc = cxl_start_context(cfg->mcctx, cfg->afu->work.work_element_descriptor, NULL); pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * read_vpd() - obtains the WWPNs from VPD * @cfg: Internal structure associated with the host. * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs * * Return: 0 on success, -errno on failure */ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) { struct pci_dev *dev = cfg->dev; int rc = 0; int ro_start, ro_size, i, j, k; ssize_t vpd_size; char vpd_data[CXLFLASH_VPD_LEN]; char tmp_buf[WWPN_BUF_LEN] = { 0 }; char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; /* Get the VPD data from the device */ vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); if (unlikely(vpd_size <= 0)) { dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", __func__, vpd_size); rc = -ENODEV; goto out; } /* Get the read only section offset */ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); if (unlikely(ro_start < 0)) { dev_err(&dev->dev, "%s: VPD Read-only data not found\n", __func__); rc = -ENODEV; goto out; } /* Get the read only section size, cap when extends beyond read VPD */ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); j = ro_size; i = ro_start + PCI_VPD_LRDT_TAG_SIZE; if (unlikely((i + j) > vpd_size)) { pr_debug("%s: Might need to read more VPD (%d > %ld)\n", __func__, (i + j), vpd_size); ro_size = vpd_size - i; } /* * Find the offset of the WWPN tag within the read only * VPD data and validate the found field (partials are * no good to us). Convert the ASCII data to an integer * value. Note that we must copy to a temporary buffer * because the conversion service requires that the ASCII * string be terminated. */ for (k = 0; k < NUM_FC_PORTS; k++) { j = ro_size; i = ro_start + PCI_VPD_LRDT_TAG_SIZE; i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); if (unlikely(i < 0)) { dev_err(&dev->dev, "%s: Port %d WWPN not found " "in VPD\n", __func__, k); rc = -ENODEV; goto out; } j = pci_vpd_info_field_size(&vpd_data[i]); i += PCI_VPD_INFO_FLD_HDR_SIZE; if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " "VPD corrupt\n", __func__, k); rc = -ENODEV; goto out; } memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); if (unlikely(rc)) { dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " "to integer\n", __func__, k); rc = -ENODEV; goto out; } } out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * init_pcr() - initialize the provisioning and control registers * @cfg: Internal structure associated with the host. * * Also sets up fast access to the mapped registers and initializes AFU * command fields that never change. */ static void init_pcr(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct sisl_ctrl_map __iomem *ctrl_map; int i; for (i = 0; i < MAX_CONTEXT; i++) { ctrl_map = &afu->afu_map->ctrls[i].ctrl; /* Disrupt any clients that could be running */ /* e.g. clients that survived a master restart */ writeq_be(0, &ctrl_map->rht_start); writeq_be(0, &ctrl_map->rht_cnt_id); writeq_be(0, &ctrl_map->ctx_cap); } /* Copy frequently used fields into afu */ afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; /* Program the Endian Control for the master context */ writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); /* Initialize cmd fields that never change */ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; afu->cmd[i].rcb.rrq = 0x0; } } /** * init_global() - initialize AFU global registers * @cfg: Internal structure associated with the host. */ static int init_global(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ int i = 0, num_ports = 0; int rc = 0; u64 reg; rc = read_vpd(cfg, &wwpn[0]); if (rc) { dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); goto out; } pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); /* Set up RRQ in AFU for master issued cmds */ writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); /* AFU configuration */ reg = readq_be(&afu->afu_map->global.regs.afu_config); reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; /* enable all auto retry options and control endianness */ /* leave others at default: */ /* CTX_CAP write protected, mbox_r does not clear on read and */ /* checker on if dual afu */ writeq_be(reg, &afu->afu_map->global.regs.afu_config); /* Global port select: select either port */ if (afu->internal_lun) { /* Only use port 0 */ writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); num_ports = NUM_FC_PORTS - 1; } else { writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); num_ports = NUM_FC_PORTS; } for (i = 0; i < num_ports; i++) { /* Unmask all errors (but they are still masked at AFU) */ writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); /* Clear CRC error cnt & set a threshold */ (void)readq_be(&afu->afu_map->global. fc_regs[i][FC_CNT_CRCERR / 8]); writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] [FC_CRC_THRESH / 8]); /* Set WWPNs. If already programmed, wwpn[i] is 0 */ if (wwpn[i] != 0 && afu_set_wwpn(afu, i, &afu->afu_map->global.fc_regs[i][0], wwpn[i])) { dev_err(dev, "%s: failed to set WWPN on port %d\n", __func__, i); rc = -EIO; goto out; } /* Programming WWPN back to back causes additional * offline/online transitions and a PLOGI */ msleep(100); } /* Set up master's own CTX_CAP to allow real mode, host translation */ /* tables, afu cmds and read/write GSCSI cmds. */ /* First, unlock ctx_cap write by reading mbox */ (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), &afu->ctrl_map->ctx_cap); /* Initialize heartbeat */ afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); out: return rc; } /** * start_afu() - initializes and starts the AFU * @cfg: Internal structure associated with the host. */ static int start_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct afu_cmd *cmd; int i = 0; int rc = 0; for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { cmd = &afu->cmd[i]; init_completion(&cmd->cevent); spin_lock_init(&cmd->slock); cmd->parent = afu; } init_pcr(cfg); /* After an AFU reset, RRQ entries are stale, clear them */ memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); /* Initialize RRQ pointers */ afu->hrrq_start = &afu->rrq_entry[0]; afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; afu->hrrq_curr = afu->hrrq_start; afu->toggle = 1; rc = init_global(cfg); pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * init_intr() - setup interrupt handlers for the master context * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ static enum undo_level init_intr(struct cxlflash_cfg *cfg, struct cxl_context *ctx) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; int rc = 0; enum undo_level level = UNDO_NOOP; rc = cxl_allocate_afu_irqs(ctx, 3); if (unlikely(rc)) { dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", __func__, rc); level = UNDO_NOOP; goto out; } rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, "SISL_MSI_SYNC_ERROR"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", __func__); level = FREE_IRQ; goto out; } rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, "SISL_MSI_RRQ_UPDATED"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", __func__); level = UNMAP_ONE; goto out; } rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, "SISL_MSI_ASYNC_ERROR"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", __func__); level = UNMAP_TWO; goto out; } out: return level; } /** * init_mc() - create and register as the master context * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ static int init_mc(struct cxlflash_cfg *cfg) { struct cxl_context *ctx; struct device *dev = &cfg->dev->dev; int rc = 0; enum undo_level level; ctx = cxl_get_context(cfg->dev); if (unlikely(!ctx)) { rc = -ENOMEM; goto ret; } cfg->mcctx = ctx; /* Set it up as a master with the CXL */ cxl_set_master(ctx); /* During initialization reset the AFU to start from a clean slate */ rc = cxl_afu_reset(cfg->mcctx); if (unlikely(rc)) { dev_err(dev, "%s: initial AFU reset failed rc=%d\n", __func__, rc); goto ret; } level = init_intr(cfg, ctx); if (unlikely(level)) { dev_err(dev, "%s: setting up interrupts failed rc=%d\n", __func__, rc); goto out; } /* This performs the equivalent of the CXL_IOCTL_START_WORK. * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process * element (pe) that is embedded in the context (ctx) */ rc = start_context(cfg); if (unlikely(rc)) { dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); level = UNMAP_THREE; goto out; } ret: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; out: term_intr(cfg, level); goto ret; } /** * init_afu() - setup as master context and start AFU * @cfg: Internal structure associated with the host. * * This routine is a higher level of control for configuring the * AFU on probe and reset paths. * * Return: 0 on success, -errno on failure */ static int init_afu(struct cxlflash_cfg *cfg) { u64 reg; int rc = 0; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; cxl_perst_reloads_same_image(cfg->cxl_afu, true); rc = init_mc(cfg); if (rc) { dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", __func__, rc); goto out; } /* Map the entire MMIO space of the AFU */ afu->afu_map = cxl_psa_map(cfg->mcctx); if (!afu->afu_map) { dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); rc = -ENOMEM; goto err1; } kref_init(&afu->mapcount); /* No byte reverse on reading afu_version or string will be backwards */ reg = readq(&afu->afu_map->global.regs.afu_version); memcpy(afu->version, &reg, sizeof(reg)); afu->interface_version = readq_be(&afu->afu_map->global.regs.interface_version); if ((afu->interface_version + 1) == 0) { pr_err("Back level AFU, please upgrade. AFU version %s " "interface version 0x%llx\n", afu->version, afu->interface_version); rc = -EINVAL; goto err2; } pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, afu->version, afu->interface_version); rc = start_afu(cfg); if (rc) { dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", __func__, rc); goto err2; } afu_err_intr_init(cfg->afu); atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); /* Restore the LUN mappings */ cxlflash_restore_luntable(cfg); out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; err2: kref_put(&afu->mapcount, afu_unmap); err1: term_intr(cfg, UNMAP_THREE); term_mc(cfg); goto out; } /** * cxlflash_afu_sync() - builds and sends an AFU sync command * @afu: AFU associated with the host. * @ctx_hndl_u: Identifies context requesting sync. * @res_hndl_u: Identifies resource requesting sync. * @mode: Type of sync to issue (lightweight, heavyweight, global). * * The AFU can only take 1 sync command at a time. This routine enforces this * limitation by using a mutex to provide exclusive access to the AFU during * the sync. This design point requires calling threads to not be on interrupt * context due to the possibility of sleeping during concurrent sync operations. * * AFU sync operations are only necessary and allowed when the device is * operating normally. When not operating normally, sync requests can occur as * part of cleaning up resources associated with an adapter prior to removal. * In this scenario, these requests are simply ignored (safe due to the AFU * going away). * * Return: * 0 on success * -1 on failure */ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, res_hndl_t res_hndl_u, u8 mode) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd = NULL; int rc = 0; int retry_cnt = 0; static DEFINE_MUTEX(sync_active); if (cfg->state != STATE_NORMAL) { pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); return 0; } mutex_lock(&sync_active); retry: cmd = cmd_checkout(afu); if (unlikely(!cmd)) { retry_cnt++; udelay(1000 * retry_cnt); if (retry_cnt < MC_RETRY_CNT) goto retry; dev_err(dev, "%s: could not get a free command\n", __func__); rc = -1; goto out; } pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; cmd->rcb.port_sel = 0x0; /* NA */ cmd->rcb.lun_id = 0x0; /* NA */ cmd->rcb.data_len = 0x0; cmd->rcb.data_ea = 0x0; cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ cmd->rcb.cdb[1] = mode; /* The cdb is aligned, no unaligned accessors required */ *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); rc = send_cmd(afu, cmd); if (unlikely(rc)) goto out; wait_resp(afu, cmd); /* Set on timeout */ if (unlikely((cmd->sa.ioasc != 0) || (cmd->sa.host_use_b[0] & B_ERROR))) rc = -1; out: mutex_unlock(&sync_active); if (cmd) cmd_checkin(cmd); pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * afu_reset() - resets the AFU * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ static int afu_reset(struct cxlflash_cfg *cfg) { int rc = 0; /* Stop the context before the reset. Since the context is * no longer available restart it after the reset is complete */ term_afu(cfg); rc = init_afu(cfg); pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * cxlflash_eh_device_reset_handler() - reset a single LUN * @scp: SCSI command to send. * * Return: * SUCCESS as defined in scsi/scsi.h * FAILED as defined in scsi/scsi.h */ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) { int rc = SUCCESS; struct Scsi_Host *host = scp->device->host; struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; struct afu *afu = cfg->afu; int rcr = 0; pr_debug("%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, get_unaligned_be32(&((u32 *)scp->cmnd)[0]), get_unaligned_be32(&((u32 *)scp->cmnd)[1]), get_unaligned_be32(&((u32 *)scp->cmnd)[2]), get_unaligned_be32(&((u32 *)scp->cmnd)[3])); retry: switch (cfg->state) { case STATE_NORMAL: rcr = send_tmf(afu, scp, TMF_LUN_RESET); if (unlikely(rcr)) rc = FAILED; break; case STATE_RESET: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); goto retry; default: rc = FAILED; break; } pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * cxlflash_eh_host_reset_handler() - reset the host adapter * @scp: SCSI command from stack identifying host. * * Return: * SUCCESS as defined in scsi/scsi.h * FAILED as defined in scsi/scsi.h */ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) { int rc = SUCCESS; int rcr = 0; struct Scsi_Host *host = scp->device->host; struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; pr_debug("%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, get_unaligned_be32(&((u32 *)scp->cmnd)[0]), get_unaligned_be32(&((u32 *)scp->cmnd)[1]), get_unaligned_be32(&((u32 *)scp->cmnd)[2]), get_unaligned_be32(&((u32 *)scp->cmnd)[3])); switch (cfg->state) { case STATE_NORMAL: cfg->state = STATE_RESET; cxlflash_mark_contexts_error(cfg); rcr = afu_reset(cfg); if (rcr) { rc = FAILED; cfg->state = STATE_FAILTERM; } else cfg->state = STATE_NORMAL; wake_up_all(&cfg->reset_waitq); break; case STATE_RESET: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) break; /* fall through */ default: rc = FAILED; break; } pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; } /** * cxlflash_change_queue_depth() - change the queue depth for the device * @sdev: SCSI device destined for queue depth change. * @qdepth: Requested queue depth value to set. * * The requested queue depth is capped to the maximum supported value. * * Return: The actual queue depth set. */ static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) qdepth = CXLFLASH_MAX_CMDS_PER_LUN; scsi_change_queue_depth(sdev, qdepth); return sdev->queue_depth; } /** * cxlflash_show_port_status() - queries and presents the current port status * @port: Desired port for status reporting. * @afu: AFU owning the specified port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) { char *disp_status; u64 status; __be64 __iomem *fc_regs; if (port >= NUM_FC_PORTS) return 0; fc_regs = &afu->afu_map->global.fc_regs[port][0]; status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); status &= FC_MTIP_STATUS_MASK; if (status == FC_MTIP_STATUS_ONLINE) disp_status = "online"; else if (status == FC_MTIP_STATUS_OFFLINE) disp_status = "offline"; else disp_status = "unknown"; return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); } /** * port0_show() - queries and presents the current status of port 0 * @dev: Generic device associated with the host owning the port. * @attr: Device attribute representing the port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t port0_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; return cxlflash_show_port_status(0, afu, buf); } /** * port1_show() - queries and presents the current status of port 1 * @dev: Generic device associated with the host owning the port. * @attr: Device attribute representing the port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t port1_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; return cxlflash_show_port_status(1, afu, buf); } /** * lun_mode_show() - presents the current LUN mode of the host * @dev: Generic device associated with the host. * @attr: Device attribute representing the LUN mode. * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t lun_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); } /** * lun_mode_store() - sets the LUN mode of the host * @dev: Generic device associated with the host. * @attr: Device attribute representing the LUN mode. * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. * @count: Length of data resizing in @buf. * * The CXL Flash AFU supports a dummy LUN mode where the external * links and storage are not required. Space on the FPGA is used * to create 1 or 2 small LUNs which are presented to the system * as if they were a normal storage device. This feature is useful * during development and also provides manufacturing with a way * to test the AFU without an actual device. * * 0 = external LUN[s] (default) * 1 = internal LUN (1 x 64K, 512B blocks, id 0) * 2 = internal LUN (1 x 64K, 4K blocks, id 0) * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) * * Return: The size of the ASCII string returned in @buf. */ static ssize_t lun_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; int rc; u32 lun_mode; rc = kstrtouint(buf, 10, &lun_mode); if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { afu->internal_lun = lun_mode; /* * When configured for internal LUN, there is only one channel, * channel number 0, else there will be 2 (default). */ if (afu->internal_lun) shost->max_channel = 0; else shost->max_channel = NUM_FC_PORTS - 1; afu_reset(cfg); scsi_scan_host(cfg->host); } return count; } /** * ioctl_version_show() - presents the current ioctl version of the host * @dev: Generic device associated with the host. * @attr: Device attribute representing the ioctl version. * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t ioctl_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); } /** * cxlflash_show_port_lun_table() - queries and presents the port LUN table * @port: Desired port for status reporting. * @afu: AFU owning the specified port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t cxlflash_show_port_lun_table(u32 port, struct afu *afu, char *buf) { int i; ssize_t bytes = 0; __be64 __iomem *fc_port; if (port >= NUM_FC_PORTS) return 0; fc_port = &afu->afu_map->global.fc_port[port][0]; for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, "%03d: %016llX\n", i, readq_be(&fc_port[i])); return bytes; } /** * port0_lun_table_show() - presents the current LUN table of port 0 * @dev: Generic device associated with the host owning the port. * @attr: Device attribute representing the port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t port0_lun_table_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; return cxlflash_show_port_lun_table(0, afu, buf); } /** * port1_lun_table_show() - presents the current LUN table of port 1 * @dev: Generic device associated with the host owning the port. * @attr: Device attribute representing the port. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t port1_lun_table_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; struct afu *afu = cfg->afu; return cxlflash_show_port_lun_table(1, afu, buf); } /** * mode_show() - presents the current mode of the device * @dev: Generic device associated with the device. * @attr: Device attribute representing the device mode. * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. * * Return: The size of the ASCII string returned in @buf. */ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", sdev->hostdata ? "superpipe" : "legacy"); } /* * Host attributes */ static DEVICE_ATTR_RO(port0); static DEVICE_ATTR_RO(port1); static DEVICE_ATTR_RW(lun_mode); static DEVICE_ATTR_RO(ioctl_version); static DEVICE_ATTR_RO(port0_lun_table); static DEVICE_ATTR_RO(port1_lun_table); static struct device_attribute *cxlflash_host_attrs[] = { &dev_attr_port0, &dev_attr_port1, &dev_attr_lun_mode, &dev_attr_ioctl_version, &dev_attr_port0_lun_table, &dev_attr_port1_lun_table, NULL }; /* * Device attributes */ static DEVICE_ATTR_RO(mode); static struct device_attribute *cxlflash_dev_attrs[] = { &dev_attr_mode, NULL }; /* * Host template */ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = CXLFLASH_ADAPTER_NAME, .info = cxlflash_driver_info, .ioctl = cxlflash_ioctl, .proc_name = CXLFLASH_NAME, .queuecommand = cxlflash_queuecommand, .eh_device_reset_handler = cxlflash_eh_device_reset_handler, .eh_host_reset_handler = cxlflash_eh_host_reset_handler, .change_queue_depth = cxlflash_change_queue_depth, .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, .can_queue = CXLFLASH_MAX_CMDS, .this_id = -1, .sg_tablesize = SG_NONE, /* No scatter gather support */ .max_sectors = CXLFLASH_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = cxlflash_host_attrs, .sdev_attrs = cxlflash_dev_attrs, }; /* * Device dependent values */ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; /* * PCI device binding table */ static struct pci_device_id cxlflash_pci_table[] = { {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, {} }; MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); /** * cxlflash_worker_thread() - work thread handler for the AFU * @work: Work structure contained within cxlflash associated with host. * * Handles the following events: * - Link reset which cannot be performed on interrupt context due to * blocking up to a few seconds * - Read AFU command room * - Rescan the host */ static void cxlflash_worker_thread(struct work_struct *work) { struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, work_q); struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; int port; ulong lock_flags; /* Avoid MMIO if the device has failed */ if (cfg->state != STATE_NORMAL) return; spin_lock_irqsave(cfg->host->host_lock, lock_flags); if (cfg->lr_state == LINK_RESET_REQUIRED) { port = cfg->lr_port; if (port < 0) dev_err(dev, "%s: invalid port index %d\n", __func__, port); else { spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); /* The reset can block... */ afu_link_reset(afu, port, &afu->afu_map->global.fc_regs[port][0]); spin_lock_irqsave(cfg->host->host_lock, lock_flags); } cfg->lr_state = LINK_RESET_COMPLETE; } if (afu->read_room) { atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); afu->read_room = false; } spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) scsi_scan_host(cfg->host); kref_put(&afu->mapcount, afu_unmap); } /** * cxlflash_probe() - PCI entry point to add host * @pdev: PCI device associated with the host. * @dev_id: PCI device id associated with device. * * Return: 0 on success, -errno on failure */ static int cxlflash_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct Scsi_Host *host; struct cxlflash_cfg *cfg = NULL; struct dev_dependent_vals *ddv; int rc = 0; dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", __func__, pdev->irq); ddv = (struct dev_dependent_vals *)dev_id->driver_data; driver_template.max_sectors = ddv->max_sectors; host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); if (!host) { dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", __func__); rc = -ENOMEM; goto out; } host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; host->max_channel = NUM_FC_PORTS - 1; host->unique_id = host->host_no; host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; cfg = (struct cxlflash_cfg *)host->hostdata; cfg->host = host; rc = alloc_mem(cfg); if (rc) { dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", __func__); rc = -ENOMEM; scsi_host_put(cfg->host); goto out; } cfg->init_state = INIT_STATE_NONE; cfg->dev = pdev; cfg->cxl_fops = cxlflash_cxl_fops; /* * The promoted LUNs move to the top of the LUN table. The rest stay * on the bottom half. The bottom half grows from the end * (index = 255), whereas the top half grows from the beginning * (index = 0). */ cfg->promote_lun_index = 0; cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; cfg->dev_id = (struct pci_device_id *)dev_id; init_waitqueue_head(&cfg->tmf_waitq); init_waitqueue_head(&cfg->reset_waitq); INIT_WORK(&cfg->work_q, cxlflash_worker_thread); cfg->lr_state = LINK_RESET_INVALID; cfg->lr_port = -1; spin_lock_init(&cfg->tmf_slock); mutex_init(&cfg->ctx_tbl_list_mutex); mutex_init(&cfg->ctx_recovery_mutex); init_rwsem(&cfg->ioctl_rwsem); INIT_LIST_HEAD(&cfg->ctx_err_recovery); INIT_LIST_HEAD(&cfg->lluns); pci_set_drvdata(pdev, cfg); cfg->cxl_afu = cxl_pci_to_afu(pdev); rc = init_pci(cfg); if (rc) { dev_err(&pdev->dev, "%s: call to init_pci " "failed rc=%d!\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_PCI; rc = init_afu(cfg); if (rc) { dev_err(&pdev->dev, "%s: call to init_afu " "failed rc=%d!\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_AFU; rc = init_scsi(cfg); if (rc) { dev_err(&pdev->dev, "%s: call to init_scsi " "failed rc=%d!\n", __func__, rc); goto out_remove; } cfg->init_state = INIT_STATE_SCSI; out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; out_remove: cxlflash_remove(pdev); goto out; } /** * drain_ioctls() - wait until all currently executing ioctls have completed * @cfg: Internal structure associated with the host. * * Obtain write access to read/write semaphore that wraps ioctl * handling to 'drain' ioctls currently executing. */ static void drain_ioctls(struct cxlflash_cfg *cfg) { down_write(&cfg->ioctl_rwsem); up_write(&cfg->ioctl_rwsem); } /** * cxlflash_pci_error_detected() - called when a PCI error is detected * @pdev: PCI device struct. * @state: PCI channel state. * * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { int rc = 0; struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); struct device *dev = &cfg->dev->dev; dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); switch (state) { case pci_channel_io_frozen: cfg->state = STATE_RESET; scsi_block_requests(cfg->host); drain_ioctls(cfg); rc = cxlflash_mark_contexts_error(cfg); if (unlikely(rc)) dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", __func__, rc); term_afu(cfg); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: cfg->state = STATE_FAILTERM; wake_up_all(&cfg->reset_waitq); scsi_unblock_requests(cfg->host); return PCI_ERS_RESULT_DISCONNECT; default: break; } return PCI_ERS_RESULT_NEED_RESET; } /** * cxlflash_pci_slot_reset() - called when PCI slot has been reset * @pdev: PCI device struct. * * This routine is called by the pci error recovery code after the PCI * slot has been reset, just before we should resume normal operations. * * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) { int rc = 0; struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); struct device *dev = &cfg->dev->dev; dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); rc = init_afu(cfg); if (unlikely(rc)) { dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_RECOVERED; } /** * cxlflash_pci_resume() - called when normal operation can resume * @pdev: PCI device struct */ static void cxlflash_pci_resume(struct pci_dev *pdev) { struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); struct device *dev = &cfg->dev->dev; dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); cfg->state = STATE_NORMAL; wake_up_all(&cfg->reset_waitq); scsi_unblock_requests(cfg->host); } static const struct pci_error_handlers cxlflash_err_handler = { .error_detected = cxlflash_pci_error_detected, .slot_reset = cxlflash_pci_slot_reset, .resume = cxlflash_pci_resume, }; /* * PCI device structure */ static struct pci_driver cxlflash_driver = { .name = CXLFLASH_NAME, .id_table = cxlflash_pci_table, .probe = cxlflash_probe, .remove = cxlflash_remove, .err_handler = &cxlflash_err_handler, }; /** * init_cxlflash() - module entry point * * Return: 0 on success, -errno on failure */ static int __init init_cxlflash(void) { pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); cxlflash_list_init(); return pci_register_driver(&cxlflash_driver); } /** * exit_cxlflash() - module exit point */ static void __exit exit_cxlflash(void) { cxlflash_term_global_luns(); cxlflash_free_errpage(); pci_unregister_driver(&cxlflash_driver); } module_init(init_cxlflash); module_exit(exit_cxlflash);
gpl-2.0
impl/rbppc-linux
net/ipv6/mcast.c
71
69948
/* * Multicast support for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes: * * yoshfuji : fix format of router-alert option * YOSHIFUJI Hideaki @USAGI: * Fixed source address for MLD message based on * <draft-ietf-magma-mld-source-05.txt>. * YOSHIFUJI Hideaki @USAGI: * - Ignore Queries for invalid addresses. * - MLD for link-local addresses. * David L Stevens <dlstevens@us.ibm.com>: * - MLDv2 support */ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/times.h> #include <linux/net.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/route.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/pkt_sched.h> #include <net/mld.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/inet_common.h> #include <net/ip6_checksum.h> /* Set to 3 to get tracing... */ #define MCAST_DEBUG 2 #if MCAST_DEBUG >= 3 #define MDBG(x) printk x #else #define MDBG(x) #endif /* Ensure that we have struct in6_addr aligned on 32bit word. */ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) }; static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; /* Big mc list lock for all the sockets */ static DEFINE_SPINLOCK(ipv6_sk_mc_lock); static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_timer_handler(unsigned long data); static void mld_gq_timer_expire(unsigned long data); static void mld_ifc_timer_expire(unsigned long data); static void mld_ifc_event(struct inet6_dev *idev); static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); static void mld_clear_delrec(struct inet6_dev *idev); static bool mld_in_v1_mode(const struct inet6_dev *idev); static int sf_setstate(struct ifmcaddr6 *pmc); static void sf_markstate(struct ifmcaddr6 *pmc); static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta); static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta); static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); #define MLD_QRV_DEFAULT 2 /* RFC3810, 9.2. Query Interval */ #define MLD_QI_DEFAULT (125 * HZ) /* RFC3810, 9.3. Query Response Interval */ #define MLD_QRI_DEFAULT (10 * HZ) /* RFC3810, 8.1 Query Version Distinctions */ #define MLD_V1_QUERY_LEN 24 #define MLD_V2_QUERY_LEN_MIN 28 #define IPV6_MLD_MAX_MSF 64 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; /* * socket join on multicast group */ #define for_each_pmc_rcu(np, pmc) \ for (pmc = rcu_dereference(np->ipv6_mc_list); \ pmc != NULL; \ pmc = rcu_dereference(pmc->next)) static int unsolicited_report_interval(struct inet6_dev *idev) { int iv; if (mld_in_v1_mode(idev)) iv = idev->cnf.mldv1_unsolicited_report_interval; else iv = idev->cnf.mldv2_unsolicited_report_interval; return iv > 0 ? iv : 1; } int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct net_device *dev = NULL; struct ipv6_mc_socklist *mc_lst; struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int err; if (!ipv6_addr_is_multicast(addr)) return -EINVAL; rcu_read_lock(); for_each_pmc_rcu(np, mc_lst) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { rcu_read_unlock(); return -EADDRINUSE; } } rcu_read_unlock(); mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); if (mc_lst == NULL) return -ENOMEM; mc_lst->next = NULL; mc_lst->addr = *addr; rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { dev = rt->dst.dev; ip6_rt_put(rt); } } else dev = dev_get_by_index_rcu(net, ifindex); if (dev == NULL) { rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; } mc_lst->ifindex = dev->ifindex; mc_lst->sfmode = MCAST_EXCLUDE; rwlock_init(&mc_lst->sflock); mc_lst->sflist = NULL; /* * now add/increase the group membership on the device */ err = ipv6_dev_mc_inc(dev, addr); if (err) { rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return err; } spin_lock(&ipv6_sk_mc_lock); mc_lst->next = np->ipv6_mc_list; rcu_assign_pointer(np->ipv6_mc_list, mc_lst); spin_unlock(&ipv6_sk_mc_lock); rcu_read_unlock(); return 0; } /* * socket leave on multicast group */ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; struct ipv6_mc_socklist __rcu **lnk; struct net *net = sock_net(sk); if (!ipv6_addr_is_multicast(addr)) return -EINVAL; spin_lock(&ipv6_sk_mc_lock); for (lnk = &np->ipv6_mc_list; (mc_lst = rcu_dereference_protected(*lnk, lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ; lnk = &mc_lst->next) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { struct net_device *dev; *lnk = mc_lst->next; spin_unlock(&ipv6_sk_mc_lock); rcu_read_lock(); dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev != NULL) { struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); return 0; } } spin_unlock(&ipv6_sk_mc_lock); return -EADDRNOTAVAIL; } /* called with rcu_read_lock() */ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, const struct in6_addr *group, int ifindex) { struct net_device *dev = NULL; struct inet6_dev *idev = NULL; if (ifindex == 0) { struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); if (rt) { dev = rt->dst.dev; ip6_rt_put(rt); } } else dev = dev_get_by_index_rcu(net, ifindex); if (!dev) return NULL; idev = __in6_dev_get(dev); if (!idev) return NULL; read_lock_bh(&idev->lock); if (idev->dead) { read_unlock_bh(&idev->lock); return NULL; } return idev; } void ipv6_sock_mc_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; struct net *net = sock_net(sk); if (!rcu_access_pointer(np->ipv6_mc_list)) return; spin_lock(&ipv6_sk_mc_lock); while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { struct net_device *dev; np->ipv6_mc_list = mc_lst->next; spin_unlock(&ipv6_sk_mc_lock); rcu_read_lock(); dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev) { struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); spin_lock(&ipv6_sk_mc_lock); } spin_unlock(&ipv6_sk_mc_lock); } int ip6_mc_source(int add, int omode, struct sock *sk, struct group_source_req *pgsr) { struct in6_addr *source, *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *psl; struct net *net = sock_net(sk); int i, j, rv; int leavegroup = 0; int pmclocked = 0; int err; source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = -EADDRNOTAVAIL; for_each_pmc_rcu(inet6, pmc) { if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) continue; if (ipv6_addr_equal(&pmc->addr, group)) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { if (pmc->sfmode != omode) { err = -EINVAL; goto done; } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip6_mc_add_src(idev, group, omode, 0, NULL, 0); ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sfmode = omode; } write_lock(&pmc->sflock); pmclocked = 1; psl = pmc->sflist; if (!add) { if (!psl) goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) break; } if (rv) /* source not found */ goto done; /* err = -EADDRNOTAVAIL */ /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { leavegroup = 1; goto done; } /* update the interface filter */ ip6_mc_del_src(idev, group, omode, 1, source, 1); for (j=i+1; j<psl->sl_count; j++) psl->sl_addr[j-1] = psl->sl_addr[j]; psl->sl_count--; err = 0; goto done; } /* else, add a new source to the filter */ if (psl && psl->sl_count >= sysctl_mld_max_msf) { err = -ENOBUFS; goto done; } if (!psl || psl->sl_count == psl->sl_max) { struct ip6_sf_socklist *newpsl; int count = IP6_SFBLOCK; if (psl) count += psl->sl_max; newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = count; newpsl->sl_count = count - IP6_SFBLOCK; if (psl) { for (i=0; i<psl->sl_count; i++) newpsl->sl_addr[i] = psl->sl_addr[i]; sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } pmc->sflist = psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ for (i=0; i<psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) /* There is an error in the address. */ goto done; } for (j=psl->sl_count-1; j>=i; j--) psl->sl_addr[j+1] = psl->sl_addr[j]; psl->sl_addr[i] = *source; psl->sl_count++; err = 0; /* update the interface list */ ip6_mc_add_src(idev, group, omode, 1, source, 1); done: if (pmclocked) write_unlock(&pmc->sflock); read_unlock_bh(&idev->lock); rcu_read_unlock(); if (leavegroup) return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); return err; } int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) { const struct in6_addr *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *newpsl, *psl; struct net *net = sock_net(sk); int leavegroup = 0; int i, err; group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; if (gsf->gf_fmode != MCAST_INCLUDE && gsf->gf_fmode != MCAST_EXCLUDE) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = 0; if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { leavegroup = 1; goto done; } for_each_pmc_rcu(inet6, pmc) { if (pmc->ifindex != gsf->gf_interface) continue; if (ipv6_addr_equal(&pmc->addr, group)) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } if (gsf->gf_numsrc) { newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; for (i=0; i<newpsl->sl_count; ++i) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; newpsl->sl_addr[i] = psin6->sin6_addr; } err = ip6_mc_add_src(idev, group, gsf->gf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); if (err) { sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max)); goto done; } } else { newpsl = NULL; (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); } write_lock(&pmc->sflock); psl = pmc->sflist; if (psl) { (void) ip6_mc_del_src(idev, group, pmc->sfmode, psl->sl_count, psl->sl_addr, 0); sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } else (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; write_unlock(&pmc->sflock); err = 0; done: read_unlock_bh(&idev->lock); rcu_read_unlock(); if (leavegroup) err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; } int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen) { int err, i, count, copycount; const struct in6_addr *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *psl; struct net *net = sock_net(sk); group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = -EADDRNOTAVAIL; /* * changes to the ipv6_mc_list require the socket lock and * a read lock on ip6_sk_mc_lock. We have the socket lock, * so reading the list is safe. */ for_each_pmc_rcu(inet6, pmc) { if (pmc->ifindex != gsf->gf_interface) continue; if (ipv6_addr_equal(group, &pmc->addr)) break; } if (!pmc) /* must have a prior join */ goto done; gsf->gf_fmode = pmc->sfmode; psl = pmc->sflist; count = psl ? psl->sl_count : 0; read_unlock_bh(&idev->lock); rcu_read_unlock(); copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { return -EFAULT; } /* changes to psl require the socket lock, a read lock on * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We * have the socket lock, so reading here is safe. */ for (i=0; i<copycount; i++) { struct sockaddr_in6 *psin6; struct sockaddr_storage ss; psin6 = (struct sockaddr_in6 *)&ss; memset(&ss, 0, sizeof(ss)); psin6->sin6_family = AF_INET6; psin6->sin6_addr = psl->sl_addr[i]; if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) return -EFAULT; } return 0; done: read_unlock_bh(&idev->lock); rcu_read_unlock(); return err; } bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, const struct in6_addr *src_addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc; struct ip6_sf_socklist *psl; bool rv = true; rcu_read_lock(); for_each_pmc_rcu(np, mc) { if (ipv6_addr_equal(&mc->addr, mc_addr)) break; } if (!mc) { rcu_read_unlock(); return true; } read_lock(&mc->sflock); psl = mc->sflist; if (!psl) { rv = mc->sfmode == MCAST_EXCLUDE; } else { int i; for (i=0; i<psl->sl_count; i++) { if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) break; } if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) rv = false; if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) rv = false; } read_unlock(&mc->sflock); rcu_read_unlock(); return rv; } static void ma_put(struct ifmcaddr6 *mc) { if (atomic_dec_and_test(&mc->mca_refcnt)) { in6_dev_put(mc->idev); kfree(mc); } } static void igmp6_group_added(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) return; spin_lock_bh(&mc->mca_lock); if (!(mc->mca_flags&MAF_LOADED)) { mc->mca_flags |= MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_add(dev, buf); } spin_unlock_bh(&mc->mca_lock); if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) return; if (mld_in_v1_mode(mc->idev)) { igmp6_join_group(mc); return; } /* else v2 */ mc->mca_crcount = mc->idev->mc_qrv; mld_ifc_event(mc->idev); } static void igmp6_group_dropped(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) return; spin_lock_bh(&mc->mca_lock); if (mc->mca_flags&MAF_LOADED) { mc->mca_flags &= ~MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_del(dev, buf); } if (mc->mca_flags & MAF_NOREPORT) goto done; spin_unlock_bh(&mc->mca_lock); if (!mc->idev->dead) igmp6_leave_group(mc); spin_lock_bh(&mc->mca_lock); if (del_timer(&mc->mca_timer)) atomic_dec(&mc->mca_refcnt); done: ip6_mc_clear_src(mc); spin_unlock_bh(&mc->mca_lock); } /* * deleted ifmcaddr6 manipulation */ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) { struct ifmcaddr6 *pmc; /* this is an "ifmcaddr6" for convenience; only the fields below * are actually used. In particular, the refcnt and users are not * used for management of the delete list. Using the same structure * for deleted items allows change reports to use common code with * non-deleted or query-response MCA's. */ pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); if (!pmc) return; spin_lock_bh(&im->mca_lock); spin_lock_init(&pmc->mca_lock); pmc->idev = im->idev; in6_dev_hold(idev); pmc->mca_addr = im->mca_addr; pmc->mca_crcount = idev->mc_qrv; pmc->mca_sfmode = im->mca_sfmode; if (pmc->mca_sfmode == MCAST_INCLUDE) { struct ip6_sf_list *psf; pmc->mca_tomb = im->mca_tomb; pmc->mca_sources = im->mca_sources; im->mca_tomb = im->mca_sources = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) psf->sf_crcount = pmc->mca_crcount; } spin_unlock_bh(&im->mca_lock); spin_lock_bh(&idev->mc_lock); pmc->next = idev->mc_tomb; idev->mc_tomb = pmc; spin_unlock_bh(&idev->mc_lock); } static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) { struct ifmcaddr6 *pmc, *pmc_prev; struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&idev->mc_lock); pmc_prev = NULL; for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { if (ipv6_addr_equal(&pmc->mca_addr, pmca)) break; pmc_prev = pmc; } if (pmc) { if (pmc_prev) pmc_prev->next = pmc->next; else idev->mc_tomb = pmc->next; } spin_unlock_bh(&idev->mc_lock); if (pmc) { for (psf=pmc->mca_tomb; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } in6_dev_put(pmc->idev); kfree(pmc); } } static void mld_clear_delrec(struct inet6_dev *idev) { struct ifmcaddr6 *pmc, *nextpmc; spin_lock_bh(&idev->mc_lock); pmc = idev->mc_tomb; idev->mc_tomb = NULL; spin_unlock_bh(&idev->mc_lock); for (; pmc; pmc = nextpmc) { nextpmc = pmc->next; ip6_mc_clear_src(pmc); in6_dev_put(pmc->idev); kfree(pmc); } /* clear dead sources, too */ read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&pmc->mca_lock); psf = pmc->mca_tomb; pmc->mca_tomb = NULL; spin_unlock_bh(&pmc->mca_lock); for (; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } } read_unlock_bh(&idev->lock); } /* * device multicast group inc (add if not found) */ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) { struct ifmcaddr6 *mc; struct inet6_dev *idev; /* we need to take a reference on idev */ idev = in6_dev_get(dev); if (idev == NULL) return -EINVAL; write_lock_bh(&idev->lock); if (idev->dead) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENODEV; } for (mc = idev->mc_list; mc; mc = mc->next) { if (ipv6_addr_equal(&mc->mca_addr, addr)) { mc->mca_users++; write_unlock_bh(&idev->lock); ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, NULL, 0); in6_dev_put(idev); return 0; } } /* * not found: create a new one. */ mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC); if (mc == NULL) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENOMEM; } setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); mc->mca_addr = *addr; mc->idev = idev; /* (reference taken) */ mc->mca_users = 1; /* mca_stamp should be updated upon changes */ mc->mca_cstamp = mc->mca_tstamp = jiffies; atomic_set(&mc->mca_refcnt, 2); spin_lock_init(&mc->mca_lock); /* initial mode is (EX, empty) */ mc->mca_sfmode = MCAST_EXCLUDE; mc->mca_sfcount[MCAST_EXCLUDE] = 1; if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) mc->mca_flags |= MAF_NOREPORT; mc->next = idev->mc_list; idev->mc_list = mc; write_unlock_bh(&idev->lock); mld_del_delrec(idev, &mc->mca_addr); igmp6_group_added(mc); ma_put(mc); return 0; } /* * device multicast group del */ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) { struct ifmcaddr6 *ma, **map; write_lock_bh(&idev->lock); for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { if (ipv6_addr_equal(&ma->mca_addr, addr)) { if (--ma->mca_users == 0) { *map = ma->next; write_unlock_bh(&idev->lock); igmp6_group_dropped(ma); ma_put(ma); return 0; } write_unlock_bh(&idev->lock); return 0; } } write_unlock_bh(&idev->lock); return -ENOENT; } int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev; int err; rcu_read_lock(); idev = __in6_dev_get(dev); if (!idev) err = -ENODEV; else err = __ipv6_dev_mc_dec(idev, addr); rcu_read_unlock(); return err; } /* * check if the interface/address pair is valid */ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, const struct in6_addr *src_addr) { struct inet6_dev *idev; struct ifmcaddr6 *mc; bool rv = false; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); for (mc = idev->mc_list; mc; mc=mc->next) { if (ipv6_addr_equal(&mc->mca_addr, group)) break; } if (mc) { if (src_addr && !ipv6_addr_any(src_addr)) { struct ip6_sf_list *psf; spin_lock_bh(&mc->mca_lock); for (psf=mc->mca_sources;psf;psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, src_addr)) break; } if (psf) rv = psf->sf_count[MCAST_INCLUDE] || psf->sf_count[MCAST_EXCLUDE] != mc->mca_sfcount[MCAST_EXCLUDE]; else rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; spin_unlock_bh(&mc->mca_lock); } else rv = true; /* don't filter unspecified source */ } read_unlock_bh(&idev->lock); } rcu_read_unlock(); return rv; } static void mld_gq_start_timer(struct inet6_dev *idev) { unsigned long tv = prandom_u32() % idev->mc_maxdelay; idev->mc_gq_running = 1; if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) in6_dev_hold(idev); } static void mld_gq_stop_timer(struct inet6_dev *idev) { idev->mc_gq_running = 0; if (del_timer(&idev->mc_gq_timer)) __in6_dev_put(idev); } static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) { unsigned long tv = prandom_u32() % delay; if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) in6_dev_hold(idev); } static void mld_ifc_stop_timer(struct inet6_dev *idev) { idev->mc_ifc_count = 0; if (del_timer(&idev->mc_ifc_timer)) __in6_dev_put(idev); } static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) { unsigned long tv = prandom_u32() % delay; if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2)) in6_dev_hold(idev); } static void mld_dad_stop_timer(struct inet6_dev *idev) { if (del_timer(&idev->mc_dad_timer)) __in6_dev_put(idev); } /* * IGMP handling (alias multicast ICMPv6 messages) */ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) { unsigned long delay = resptime; /* Do not start timer for these addresses */ if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) || IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) return; if (del_timer(&ma->mca_timer)) { atomic_dec(&ma->mca_refcnt); delay = ma->mca_timer.expires - jiffies; } if (delay >= resptime) delay = prandom_u32() % resptime; ma->mca_timer.expires = jiffies + delay; if (!mod_timer(&ma->mca_timer, jiffies + delay)) atomic_inc(&ma->mca_refcnt); ma->mca_flags |= MAF_TIMER_RUNNING; } /* mark EXCLUDE-mode sources */ static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) { /* skip inactive filters */ if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) break; if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { scount++; break; } } } pmc->mca_flags &= ~MAF_GSQUERY; if (scount == nsrcs) /* all sources excluded */ return false; return true; } static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; if (pmc->mca_sfmode == MCAST_EXCLUDE) return mld_xmarksources(pmc, nsrcs, srcs); /* mark INCLUDE-mode sources */ scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) { if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { psf->sf_gsresp = 1; scount++; break; } } } if (!scount) { pmc->mca_flags &= ~MAF_GSQUERY; return false; } pmc->mca_flags |= MAF_GSQUERY; return true; } static int mld_force_mld_version(const struct inet6_dev *idev) { /* Normally, both are 0 here. If enforcement to a particular is * being used, individual device enforcement will have a lower * precedence over 'all' device (.../conf/all/force_mld_version). */ if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; else return idev->cnf.force_mld_version; } static bool mld_in_v2_mode_only(const struct inet6_dev *idev) { return mld_force_mld_version(idev) == 2; } static bool mld_in_v1_mode_only(const struct inet6_dev *idev) { return mld_force_mld_version(idev) == 1; } static bool mld_in_v1_mode(const struct inet6_dev *idev) { if (mld_in_v2_mode_only(idev)) return false; if (mld_in_v1_mode_only(idev)) return true; if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) return true; return false; } static void mld_set_v1_mode(struct inet6_dev *idev) { /* RFC3810, relevant sections: * - 9.1. Robustness Variable * - 9.2. Query Interval * - 9.3. Query Response Interval * - 9.12. Older Version Querier Present Timeout */ unsigned long switchback; switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; idev->mc_v1_seen = jiffies + switchback; } static void mld_update_qrv(struct inet6_dev *idev, const struct mld2_query *mlh2) { /* RFC3810, relevant sections: * - 5.1.8. QRV (Querier's Robustness Variable) * - 9.1. Robustness Variable */ /* The value of the Robustness Variable MUST NOT be zero, * and SHOULD NOT be one. Catch this here if we ever run * into such a case in future. */ WARN_ON(idev->mc_qrv == 0); if (mlh2->mld2q_qrv > 0) idev->mc_qrv = mlh2->mld2q_qrv; if (unlikely(idev->mc_qrv < 2)) { net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", idev->mc_qrv, MLD_QRV_DEFAULT); idev->mc_qrv = MLD_QRV_DEFAULT; } } static void mld_update_qi(struct inet6_dev *idev, const struct mld2_query *mlh2) { /* RFC3810, relevant sections: * - 5.1.9. QQIC (Querier's Query Interval Code) * - 9.2. Query Interval * - 9.12. Older Version Querier Present Timeout * (the [Query Interval] in the last Query received) */ unsigned long mc_qqi; if (mlh2->mld2q_qqic < 128) { mc_qqi = mlh2->mld2q_qqic; } else { unsigned long mc_man, mc_exp; mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic); mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic); mc_qqi = (mc_man | 0x10) << (mc_exp + 3); } idev->mc_qi = mc_qqi * HZ; } static void mld_update_qri(struct inet6_dev *idev, const struct mld2_query *mlh2) { /* RFC3810, relevant sections: * - 5.1.3. Maximum Response Code * - 9.3. Query Response Interval */ idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); } static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, unsigned long *max_delay) { unsigned long mldv1_md; /* Ignore v1 queries */ if (mld_in_v2_mode_only(idev)) return -EINVAL; /* MLDv1 router present */ mldv1_md = ntohs(mld->mld_maxdelay); *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); mld_set_v1_mode(idev); /* cancel MLDv2 report timer */ mld_gq_stop_timer(idev); /* cancel the interface change timer */ mld_ifc_stop_timer(idev); /* clear deleted report items */ mld_clear_delrec(idev); return 0; } static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, unsigned long *max_delay) { /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ if (mld_in_v1_mode(idev)) return -EINVAL; *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); mld_update_qrv(idev, mld); mld_update_qi(idev, mld); mld_update_qri(idev, mld); idev->mc_maxdelay = *max_delay; return 0; } /* called with rcu_read_lock() */ int igmp6_event_query(struct sk_buff *skb) { struct mld2_query *mlh2 = NULL; struct ifmcaddr6 *ma; const struct in6_addr *group; unsigned long max_delay; struct inet6_dev *idev; struct mld_msg *mld; int group_type; int mark = 0; int len, err; if (!pskb_may_pull(skb, sizeof(struct in6_addr))) return -EINVAL; /* compute payload length excluding extension headers */ len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); len -= skb_network_header_len(skb); /* Drop queries with not link local source */ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) return -EINVAL; idev = __in6_dev_get(skb->dev); if (idev == NULL) return 0; mld = (struct mld_msg *)icmp6_hdr(skb); group = &mld->mld_mca; group_type = ipv6_addr_type(group); if (group_type != IPV6_ADDR_ANY && !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; if (len == MLD_V1_QUERY_LEN) { err = mld_process_v1(idev, mld, &max_delay); if (err < 0) return err; } else if (len >= MLD_V2_QUERY_LEN_MIN) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); err = mld_process_v2(idev, mlh2, &max_delay); if (err < 0) return err; if (group_type == IPV6_ADDR_ANY) { /* general query */ if (mlh2->mld2q_nsrcs) return -EINVAL; /* no sources allowed */ mld_gq_start_timer(idev); return 0; } /* mark sources to include, if group & source-specific */ if (mlh2->mld2q_nsrcs != 0) { if (!pskb_may_pull(skb, srcs_offset + ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } } else return -EINVAL; read_lock_bh(&idev->lock); if (group_type == IPV6_ADDR_ANY) { for (ma = idev->mc_list; ma; ma=ma->next) { spin_lock_bh(&ma->mca_lock); igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); } } else { for (ma = idev->mc_list; ma; ma=ma->next) { if (!ipv6_addr_equal(group, &ma->mca_addr)) continue; spin_lock_bh(&ma->mca_lock); if (ma->mca_flags & MAF_TIMER_RUNNING) { /* gsquery <- gsquery && mark */ if (!mark) ma->mca_flags &= ~MAF_GSQUERY; } else { /* gsquery <- mark */ if (mark) ma->mca_flags |= MAF_GSQUERY; else ma->mca_flags &= ~MAF_GSQUERY; } if (!(ma->mca_flags & MAF_GSQUERY) || mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); break; } } read_unlock_bh(&idev->lock); return 0; } /* called with rcu_read_lock() */ int igmp6_event_report(struct sk_buff *skb) { struct ifmcaddr6 *ma; struct inet6_dev *idev; struct mld_msg *mld; int addr_type; /* Our own report looped back. Ignore it. */ if (skb->pkt_type == PACKET_LOOPBACK) return 0; /* send our report if the MC router may not have heard this report */ if (skb->pkt_type != PACKET_MULTICAST && skb->pkt_type != PACKET_BROADCAST) return 0; if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) return -EINVAL; mld = (struct mld_msg *)icmp6_hdr(skb); /* Drop reports with not link local source */ addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); if (addr_type != IPV6_ADDR_ANY && !(addr_type&IPV6_ADDR_LINKLOCAL)) return -EINVAL; idev = __in6_dev_get(skb->dev); if (idev == NULL) return -ENODEV; /* * Cancel the timer for this group */ read_lock_bh(&idev->lock); for (ma = idev->mc_list; ma; ma=ma->next) { if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { spin_lock(&ma->mca_lock); if (del_timer(&ma->mca_timer)) atomic_dec(&ma->mca_refcnt); ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); spin_unlock(&ma->mca_lock); break; } } read_unlock_bh(&idev->lock); return 0; } static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, int gdeleted, int sdeleted) { switch (type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: if (gdeleted || sdeleted) return false; if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { if (pmc->mca_sfmode == MCAST_INCLUDE) return true; /* don't include if this source is excluded * in all filters */ if (psf->sf_count[MCAST_INCLUDE]) return type == MLD2_MODE_IS_INCLUDE; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; } return false; case MLD2_CHANGE_TO_INCLUDE: if (gdeleted || sdeleted) return false; return psf->sf_count[MCAST_INCLUDE] != 0; case MLD2_CHANGE_TO_EXCLUDE: if (gdeleted || sdeleted) return false; if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || psf->sf_count[MCAST_INCLUDE]) return false; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; case MLD2_ALLOW_NEW_SOURCES: if (gdeleted || !psf->sf_crcount) return false; return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; case MLD2_BLOCK_OLD_SOURCES: if (pmc->mca_sfmode == MCAST_INCLUDE) return gdeleted || (psf->sf_crcount && sdeleted); return psf->sf_crcount && !gdeleted && !sdeleted; } return false; } static int mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { struct ip6_sf_list *psf; int scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (!is_in(pmc, psf, type, gdeleted, sdeleted)) continue; scount++; } return scount; } static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, int proto, int len) { struct ipv6hdr *hdr; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; skb_reset_network_header(skb); skb_put(skb, sizeof(struct ipv6hdr)); hdr = ipv6_hdr(skb); ip6_flow_hdr(hdr, 0, 0); hdr->payload_len = htons(len); hdr->nexthdr = proto; hdr->hop_limit = inet6_sk(sk)->hop_limit; hdr->saddr = *saddr; hdr->daddr = *daddr; } static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) { struct net_device *dev = idev->dev; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct sk_buff *skb; struct mld2_report *pmr; struct in6_addr addr_buf; const struct in6_addr *saddr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); if (!skb) return NULL; skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. */ saddr = &in6addr_any; } else saddr = &addr_buf; ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); skb_put(skb, sizeof(*pmr)); pmr = (struct mld2_report *)skb_transport_header(skb); pmr->mld2r_type = ICMPV6_MLD2_REPORT; pmr->mld2r_resv1 = 0; pmr->mld2r_cksum = 0; pmr->mld2r_resv2 = 0; pmr->mld2r_ngrec = 0; return skb; } static void mld_sendpack(struct sk_buff *skb) { struct ipv6hdr *pip6 = ipv6_hdr(skb); struct mld2_report *pmr = (struct mld2_report *)skb_transport_header(skb); int payload_len, mldlen; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); int err; struct flowi6 fl6; struct dst_entry *dst; rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) - sizeof(*pip6); mldlen = skb_tail_pointer(skb) - skb_transport_header(skb); pip6->payload_len = htons(payload_len); pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), mldlen, 0)); icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); dst = icmp6_dst_alloc(skb->dev, &fl6); err = 0; if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; } skb_dst_set(skb, dst); if (err) goto err_out; payload_len = skb->len; err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); } else IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; err_out: kfree_skb(skb); goto out; } static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) { return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, struct mld2_grec **ppgr) { struct net_device *dev = pmc->idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr; if (!skb) skb = mld_newpack(pmc->idev, dev->mtu); if (!skb) return NULL; pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->mca_addr; /* structure copy */ pmr = (struct mld2_report *)skb_transport_header(skb); pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); *ppgr = pgr; return skb; } #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ skb_tailroom(skb)) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted, int crsend) { struct inet6_dev *idev = pmc->idev; struct net_device *dev = idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; if (pmc->mca_flags & MAF_NOREPORT) return skb; isquery = type == MLD2_MODE_IS_INCLUDE || type == MLD2_MODE_IS_EXCLUDE; truncate = type == MLD2_MODE_IS_EXCLUDE || type == MLD2_CHANGE_TO_EXCLUDE; stotal = scount = 0; psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; if (!*psf_list) goto empty_source; pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; /* EX and TO_EX get a fresh packet, if needed */ if (truncate) { if (pmr && pmr->mld2r_ngrec && AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); skb = mld_newpack(idev, dev->mtu); } } first = 1; psf_prev = NULL; for (psf=*psf_list; psf; psf=psf_next) { struct in6_addr *psrc; psf_next = psf->sf_next; if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { psf_prev = psf; continue; } /* clear marks on query responses */ if (isquery) psf->sf_gsresp = 0; if (AVAILABLE(skb) < sizeof(*psrc) + first*sizeof(struct mld2_grec)) { if (truncate && !first) break; /* truncate these */ if (pgr) pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); skb = mld_newpack(idev, dev->mtu); first = 1; scount = 0; } if (first) { skb = add_grhead(skb, pmc, type, &pgr); first = 0; } if (!skb) return NULL; psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); *psrc = psf->sf_addr; scount++; stotal++; if ((type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *psf_list = psf->sf_next; kfree(psf); continue; } } psf_prev = psf; } empty_source: if (!stotal) { if (type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) return skb; if (pmc->mca_crcount || isquery || crsend) { /* make sure we have room for group header */ if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { mld_sendpack(skb); skb = NULL; /* add_grhead will get a new one */ } skb = add_grhead(skb, pmc, type, &pgr); } } if (pgr) pgr->grec_nsrcs = htons(scount); if (isquery) pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */ return skb; } static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) { struct sk_buff *skb = NULL; int type; read_lock_bh(&idev->lock); if (!pmc) { for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_MODE_IS_EXCLUDE; else type = MLD2_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0, 0); spin_unlock_bh(&pmc->mca_lock); } } else { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_MODE_IS_EXCLUDE; else type = MLD2_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0, 0); spin_unlock_bh(&pmc->mca_lock); } read_unlock_bh(&idev->lock); if (skb) mld_sendpack(skb); } /* * remove zero-count source records from a source filter list */ static void mld_clear_zeros(struct ip6_sf_list **ppsf) { struct ip6_sf_list *psf_prev, *psf_next, *psf; psf_prev = NULL; for (psf=*ppsf; psf; psf = psf_next) { psf_next = psf->sf_next; if (psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *ppsf = psf->sf_next; kfree(psf); } else psf_prev = psf; } } static void mld_send_cr(struct inet6_dev *idev) { struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next; struct sk_buff *skb = NULL; int type, dtype; read_lock_bh(&idev->lock); spin_lock(&idev->mc_lock); /* deleted MCA's */ pmc_prev = NULL; for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) { pmc_next = pmc->next; if (pmc->mca_sfmode == MCAST_INCLUDE) { type = MLD2_BLOCK_OLD_SOURCES; dtype = MLD2_BLOCK_OLD_SOURCES; skb = add_grec(skb, pmc, type, 1, 0, 0); skb = add_grec(skb, pmc, dtype, 1, 1, 0); } if (pmc->mca_crcount) { if (pmc->mca_sfmode == MCAST_EXCLUDE) { type = MLD2_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 1, 0, 0); } pmc->mca_crcount--; if (pmc->mca_crcount == 0) { mld_clear_zeros(&pmc->mca_tomb); mld_clear_zeros(&pmc->mca_sources); } } if (pmc->mca_crcount == 0 && !pmc->mca_tomb && !pmc->mca_sources) { if (pmc_prev) pmc_prev->next = pmc_next; else idev->mc_tomb = pmc_next; in6_dev_put(pmc->idev); kfree(pmc); } else pmc_prev = pmc; } spin_unlock(&idev->mc_lock); /* change recs */ for (pmc=idev->mc_list; pmc; pmc=pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) { type = MLD2_BLOCK_OLD_SOURCES; dtype = MLD2_ALLOW_NEW_SOURCES; } else { type = MLD2_ALLOW_NEW_SOURCES; dtype = MLD2_BLOCK_OLD_SOURCES; } skb = add_grec(skb, pmc, type, 0, 0, 0); skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */ /* filter mode changes */ if (pmc->mca_crcount) { if (pmc->mca_sfmode == MCAST_EXCLUDE) type = MLD2_CHANGE_TO_EXCLUDE; else type = MLD2_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0, 0); pmc->mca_crcount--; } spin_unlock_bh(&pmc->mca_lock); } read_unlock_bh(&idev->lock); if (!skb) return; (void) mld_sendpack(skb); } static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) { struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct inet6_dev *idev; struct sk_buff *skb; struct mld_msg *hdr; const struct in6_addr *snd_addr, *saddr; struct in6_addr addr_buf; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; int err, len, payload_len, full_len; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; struct flowi6 fl6; struct dst_entry *dst; if (type == ICMPV6_MGM_REDUCTION) snd_addr = &in6addr_linklocal_allrouters; else snd_addr = addr; len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); payload_len = len + sizeof(ra); full_len = sizeof(struct ipv6hdr) + payload_len; rcu_read_lock(); IP6_UPD_PO_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUT, full_len); rcu_read_unlock(); skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); if (skb == NULL) { rcu_read_lock(); IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; } skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, hlen); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. */ saddr = &in6addr_any; } else saddr = &addr_buf; ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); memset(hdr, 0, sizeof(struct mld_msg)); hdr->mld_type = type; hdr->mld_mca = *addr; hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, IPPROTO_ICMPV6, csum_partial(hdr, len, 0)); rcu_read_lock(); idev = __in6_dev_get(skb->dev); icmpv6_flow_init(sk, &fl6, type, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); dst = icmp6_dst_alloc(skb->dev, &fl6); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err_out; } skb_dst_set(skb, dst); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len); } else IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; err_out: kfree_skb(skb); goto out; } static void mld_send_initial_cr(struct inet6_dev *idev) { struct sk_buff *skb; struct ifmcaddr6 *pmc; int type; if (mld_in_v1_mode(idev)) return; skb = NULL; read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_CHANGE_TO_EXCLUDE; else type = MLD2_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0, 1); spin_unlock_bh(&pmc->mca_lock); } read_unlock_bh(&idev->lock); if (skb) mld_sendpack(skb); } void ipv6_mc_dad_complete(struct inet6_dev *idev) { idev->mc_dad_count = idev->mc_qrv; if (idev->mc_dad_count) { mld_send_initial_cr(idev); idev->mc_dad_count--; if (idev->mc_dad_count) mld_dad_start_timer(idev, idev->mc_maxdelay); } } static void mld_dad_timer_expire(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; mld_send_initial_cr(idev); if (idev->mc_dad_count) { idev->mc_dad_count--; if (idev->mc_dad_count) mld_dad_start_timer(idev, idev->mc_maxdelay); } in6_dev_put(idev); } static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, const struct in6_addr *psfsrc) { struct ip6_sf_list *psf, *psf_prev; int rv = 0; psf_prev = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; } if (!psf || psf->sf_count[sfmode] == 0) { /* source filter not found, or count wrong => bug */ return -ESRCH; } psf->sf_count[sfmode]--; if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { struct inet6_dev *idev = pmc->idev; /* no more filters for this source */ if (psf_prev) psf_prev->sf_next = psf->sf_next; else pmc->mca_sources = psf->sf_next; if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && !mld_in_v1_mode(idev)) { psf->sf_crcount = idev->mc_qrv; psf->sf_next = pmc->mca_tomb; pmc->mca_tomb = psf; rv = 1; } else kfree(psf); } return rv; } static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta) { struct ifmcaddr6 *pmc; int changerec = 0; int i, err; if (!idev) return -ENODEV; read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } if (!pmc) { /* MCA not found?? bug */ read_unlock_bh(&idev->lock); return -ESRCH; } spin_lock_bh(&pmc->mca_lock); sf_markstate(pmc); if (!delta) { if (!pmc->mca_sfcount[sfmode]) { spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return -EINVAL; } pmc->mca_sfcount[sfmode]--; } err = 0; for (i=0; i<sfcount; i++) { int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); changerec |= rv > 0; if (!err && rv < 0) err = rv; } if (pmc->mca_sfmode == MCAST_EXCLUDE && pmc->mca_sfcount[MCAST_EXCLUDE] == 0 && pmc->mca_sfcount[MCAST_INCLUDE]) { struct ip6_sf_list *psf; /* filter mode change */ pmc->mca_sfmode = MCAST_INCLUDE; pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; for (psf=pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(pmc->idev); } else if (sf_setstate(pmc) || changerec) mld_ifc_event(pmc->idev); spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return err; } /* * Add multicast single-source filter to the interface list */ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, const struct in6_addr *psfsrc) { struct ip6_sf_list *psf, *psf_prev; psf_prev = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; } if (!psf) { psf = kzalloc(sizeof(*psf), GFP_ATOMIC); if (!psf) return -ENOBUFS; psf->sf_addr = *psfsrc; if (psf_prev) { psf_prev->sf_next = psf; } else pmc->mca_sources = psf; } psf->sf_count[sfmode]++; return 0; } static void sf_markstate(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) if (pmc->mca_sfcount[MCAST_EXCLUDE]) { psf->sf_oldin = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; } static int sf_setstate(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *dpsf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; int qrv = pmc->idev->mc_qrv; int new_in, rv; rv = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (pmc->mca_sfcount[MCAST_EXCLUDE]) { new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else new_in = psf->sf_count[MCAST_INCLUDE] != 0; if (new_in) { if (!psf->sf_oldin) { struct ip6_sf_list *prev = NULL; for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) { if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; prev = dpsf; } if (dpsf) { if (prev) prev->sf_next = dpsf->sf_next; else pmc->mca_tomb = dpsf->sf_next; kfree(dpsf); } psf->sf_crcount = qrv; rv++; } } else if (psf->sf_oldin) { psf->sf_crcount = 0; /* * add or update "delete" records if an active filter * is now inactive */ for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; if (!dpsf) { dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue; *dpsf = *psf; /* pmc->mca_lock held by callers */ dpsf->sf_next = pmc->mca_tomb; pmc->mca_tomb = dpsf; } dpsf->sf_crcount = qrv; rv++; } } return rv; } /* * Add multicast source filter list to the interface list */ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta) { struct ifmcaddr6 *pmc; int isexclude; int i, err; if (!idev) return -ENODEV; read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } if (!pmc) { /* MCA not found?? bug */ read_unlock_bh(&idev->lock); return -ESRCH; } spin_lock_bh(&pmc->mca_lock); sf_markstate(pmc); isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; if (!delta) pmc->mca_sfcount[sfmode]++; err = 0; for (i=0; i<sfcount; i++) { err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break; } if (err) { int j; if (!delta) pmc->mca_sfcount[sfmode]--; for (j=0; j<i; j++) ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { struct ip6_sf_list *psf; /* filter mode change */ if (pmc->mca_sfcount[MCAST_EXCLUDE]) pmc->mca_sfmode = MCAST_EXCLUDE; else if (pmc->mca_sfcount[MCAST_INCLUDE]) pmc->mca_sfmode = MCAST_INCLUDE; /* else no filters; keep old mode for reports */ pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; for (psf=pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(idev); } else if (sf_setstate(pmc)) mld_ifc_event(idev); spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return err; } static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *nextpsf; for (psf=pmc->mca_tomb; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_tomb = NULL; for (psf=pmc->mca_sources; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_sources = NULL; pmc->mca_sfmode = MCAST_EXCLUDE; pmc->mca_sfcount[MCAST_INCLUDE] = 0; pmc->mca_sfcount[MCAST_EXCLUDE] = 1; } static void igmp6_join_group(struct ifmcaddr6 *ma) { unsigned long delay; if (ma->mca_flags & MAF_NOREPORT) return; igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); delay = prandom_u32() % unsolicited_report_interval(ma->idev); spin_lock_bh(&ma->mca_lock); if (del_timer(&ma->mca_timer)) { atomic_dec(&ma->mca_refcnt); delay = ma->mca_timer.expires - jiffies; } if (!mod_timer(&ma->mca_timer, jiffies + delay)) atomic_inc(&ma->mca_refcnt); ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; spin_unlock_bh(&ma->mca_lock); } static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev) { int err; /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, * so no other readers or writers of iml or its sflist */ if (!iml->sflist) { /* any-source empty exclude case */ return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); } err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, iml->sflist->sl_count, iml->sflist->sl_addr, 0); sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); iml->sflist = NULL; return err; } static void igmp6_leave_group(struct ifmcaddr6 *ma) { if (mld_in_v1_mode(ma->idev)) { if (ma->mca_flags & MAF_LAST_REPORTER) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REDUCTION); } else { mld_add_delrec(ma->idev, ma); mld_ifc_event(ma->idev); } } static void mld_gq_timer_expire(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; idev->mc_gq_running = 0; mld_send_report(idev, NULL); in6_dev_put(idev); } static void mld_ifc_timer_expire(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; mld_send_cr(idev); if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) mld_ifc_start_timer(idev, idev->mc_maxdelay); } in6_dev_put(idev); } static void mld_ifc_event(struct inet6_dev *idev) { if (mld_in_v1_mode(idev)) return; idev->mc_ifc_count = idev->mc_qrv; mld_ifc_start_timer(idev, 1); } static void igmp6_timer_handler(unsigned long data) { struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data; if (mld_in_v1_mode(ma->idev)) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); else mld_send_report(ma->idev, ma); spin_lock(&ma->mca_lock); ma->mca_flags |= MAF_LAST_REPORTER; ma->mca_flags &= ~MAF_TIMER_RUNNING; spin_unlock(&ma->mca_lock); ma_put(ma); } /* Device changing type */ void ipv6_mc_unmap(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); for (i = idev->mc_list; i; i = i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); } void ipv6_mc_remap(struct inet6_dev *idev) { ipv6_mc_up(idev); } /* Device going down */ void ipv6_mc_down(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Withdraw multicast list */ read_lock_bh(&idev->lock); mld_ifc_stop_timer(idev); mld_gq_stop_timer(idev); mld_dad_stop_timer(idev); for (i = idev->mc_list; i; i=i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); mld_clear_delrec(idev); } /* Device going up */ void ipv6_mc_up(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); for (i = idev->mc_list; i; i=i->next) igmp6_group_added(i); read_unlock_bh(&idev->lock); } /* IPv6 device initialization. */ void ipv6_mc_init_dev(struct inet6_dev *idev) { write_lock_bh(&idev->lock); spin_lock_init(&idev->mc_lock); idev->mc_gq_running = 0; setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, (unsigned long)idev); idev->mc_tomb = NULL; idev->mc_ifc_count = 0; setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire, (unsigned long)idev); setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, (unsigned long)idev); idev->mc_qrv = MLD_QRV_DEFAULT; idev->mc_qi = MLD_QI_DEFAULT; idev->mc_qri = MLD_QRI_DEFAULT; idev->mc_maxdelay = unsolicited_report_interval(idev); idev->mc_v1_seen = 0; write_unlock_bh(&idev->lock); } /* * Device is about to be destroyed: clean up. */ void ipv6_mc_destroy_dev(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Deactivate timers */ ipv6_mc_down(idev); /* Delete all-nodes address. */ /* We cannot call ipv6_dev_mc_dec() directly, our caller in * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will * fail. */ __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); if (idev->cnf.forwarding) __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); write_lock_bh(&idev->lock); while ((i = idev->mc_list) != NULL) { idev->mc_list = i->next; write_unlock_bh(&idev->lock); igmp6_group_dropped(i); ma_put(i); write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); } #ifdef CONFIG_PROC_FS struct igmp6_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private) static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) { struct ifmcaddr6 *im = NULL; struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); struct net *net = seq_file_net(seq); state->idev = NULL; for_each_netdev_rcu(net, state->dev) { struct inet6_dev *idev; idev = __in6_dev_get(state->dev); if (!idev) continue; read_lock_bh(&idev->lock); im = idev->mc_list; if (im) { state->idev = idev; break; } read_unlock_bh(&idev->lock); } return im; } static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im) { struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); im = im->next; while (!im) { if (likely(state->idev != NULL)) read_unlock_bh(&state->idev->lock); state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; break; } state->idev = __in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); im = state->idev->mc_list; } return im; } static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) { struct ifmcaddr6 *im = igmp6_mc_get_first(seq); if (im) while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL) --pos; return pos ? NULL : im; } static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return igmp6_mc_get_idx(seq, *pos); } static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); ++*pos; return im; } static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); state->idev = NULL; } state->dev = NULL; rcu_read_unlock(); } static int igmp6_mc_seq_show(struct seq_file *seq, void *v) { struct ifmcaddr6 *im = (struct ifmcaddr6 *)v; struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); seq_printf(seq, "%-4d %-15s %pi6 %5d %08X %ld\n", state->dev->ifindex, state->dev->name, &im->mca_addr, im->mca_users, im->mca_flags, (im->mca_flags&MAF_TIMER_RUNNING) ? jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0); return 0; } static const struct seq_operations igmp6_mc_seq_ops = { .start = igmp6_mc_seq_start, .next = igmp6_mc_seq_next, .stop = igmp6_mc_seq_stop, .show = igmp6_mc_seq_show, }; static int igmp6_mc_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp6_mc_seq_ops, sizeof(struct igmp6_mc_iter_state)); } static const struct file_operations igmp6_mc_seq_fops = { .owner = THIS_MODULE, .open = igmp6_mc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct igmp6_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; struct ifmcaddr6 *im; }; #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private) static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) { struct ip6_sf_list *psf = NULL; struct ifmcaddr6 *im = NULL; struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); struct net *net = seq_file_net(seq); state->idev = NULL; state->im = NULL; for_each_netdev_rcu(net, state->dev) { struct inet6_dev *idev; idev = __in6_dev_get(state->dev); if (unlikely(idev == NULL)) continue; read_lock_bh(&idev->lock); im = idev->mc_list; if (likely(im != NULL)) { spin_lock_bh(&im->mca_lock); psf = im->mca_sources; if (likely(psf != NULL)) { state->im = im; state->idev = idev; break; } spin_unlock_bh(&im->mca_lock); } read_unlock_bh(&idev->lock); } return psf; } static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf) { struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); psf = psf->sf_next; while (!psf) { spin_unlock_bh(&state->im->mca_lock); state->im = state->im->next; while (!state->im) { if (likely(state->idev != NULL)) read_unlock_bh(&state->idev->lock); state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; goto out; } state->idev = __in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); state->im = state->idev->mc_list; } if (!state->im) break; spin_lock_bh(&state->im->mca_lock); psf = state->im->mca_sources; } out: return psf; } static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) { struct ip6_sf_list *psf = igmp6_mcf_get_first(seq); if (psf) while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL) --pos; return pos ? NULL : psf; } static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip6_sf_list *psf; if (v == SEQ_START_TOKEN) psf = igmp6_mcf_get_first(seq); else psf = igmp6_mcf_get_next(seq, v); ++*pos; return psf; } static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (likely(state->im != NULL)) { spin_unlock_bh(&state->im->mca_lock); state->im = NULL; } if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); state->idev = NULL; } state->dev = NULL; rcu_read_unlock(); } static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) { struct ip6_sf_list *psf = (struct ip6_sf_list *)v; struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { seq_printf(seq, "%3s %6s " "%32s %32s %6s %6s\n", "Idx", "Device", "Multicast Address", "Source Address", "INC", "EXC"); } else { seq_printf(seq, "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", state->dev->ifindex, state->dev->name, &state->im->mca_addr, &psf->sf_addr, psf->sf_count[MCAST_INCLUDE], psf->sf_count[MCAST_EXCLUDE]); } return 0; } static const struct seq_operations igmp6_mcf_seq_ops = { .start = igmp6_mcf_seq_start, .next = igmp6_mcf_seq_next, .stop = igmp6_mcf_seq_stop, .show = igmp6_mcf_seq_show, }; static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp6_mcf_seq_ops, sizeof(struct igmp6_mcf_iter_state)); } static const struct file_operations igmp6_mcf_seq_fops = { .owner = THIS_MODULE, .open = igmp6_mcf_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init igmp6_proc_init(struct net *net) { int err; err = -ENOMEM; if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops)) goto out; if (!proc_create("mcfilter6", S_IRUGO, net->proc_net, &igmp6_mcf_seq_fops)) goto out_proc_net_igmp6; err = 0; out: return err; out_proc_net_igmp6: remove_proc_entry("igmp6", net->proc_net); goto out; } static void __net_exit igmp6_proc_exit(struct net *net) { remove_proc_entry("mcfilter6", net->proc_net); remove_proc_entry("igmp6", net->proc_net); } #else static inline int igmp6_proc_init(struct net *net) { return 0; } static inline void igmp6_proc_exit(struct net *net) { } #endif static int __net_init igmp6_net_init(struct net *net) { int err; err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", err); goto out; } inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; err = igmp6_proc_init(net); if (err) goto out_sock_create; out: return err; out_sock_create: inet_ctl_sock_destroy(net->ipv6.igmp_sk); goto out; } static void __net_exit igmp6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.igmp_sk); igmp6_proc_exit(net); } static struct pernet_operations igmp6_net_ops = { .init = igmp6_net_init, .exit = igmp6_net_exit, }; int __init igmp6_init(void) { return register_pernet_subsys(&igmp6_net_ops); } void igmp6_cleanup(void) { unregister_pernet_subsys(&igmp6_net_ops); }
gpl-2.0
onetechgenius/XBMCast2TV
lib/enca/lib/lang_lv.c
71
3313
/* @(#) $Id: lang_lv.c,v 1.13 2005/12/01 10:08:53 yeti Exp $ encoding data and routines dependent on language; latvian Copyright (C) 2003 David Necas (Yeti) <yeti@physics.muni.cz> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif /* HAVE_CONFIG_H */ #include "enca.h" #include "internal.h" #include "data/latvian/latvian.h" /* Local prototypes. */ static int hook(EncaAnalyserState *analyser); static int hook_iso13win(EncaAnalyserState *analyser); static int hook_baltwin(EncaAnalyserState *analyser); /** * ENCA_LANGUAGE_LV: * * Latvian language. * * Everything the world out there needs to know about this language. **/ const EncaLanguageInfo ENCA_LANGUAGE_LV = { "lv", "latvian", NCHARSETS, CHARSET_NAMES, CHARSET_WEIGHTS, SIGNIFICANT, CHARSET_LETTERS, CHARSET_PAIRS, WEIGHT_SUM, &hook, NULL, NULL, NULL }; /** * hook: * @analyser: Analyser state whose charset ratings are to be modified. * * Launches language specific hooks for language "lv". * * Returns: Nonzero if charset ratigns have been actually modified, zero * otherwise. **/ static int hook(EncaAnalyserState *analyser) { int chg = 0; /* we may want to run both, and in this order */ chg += hook_baltwin(analyser); chg += hook_iso13win(analyser); return chg; } /** * hook_baltwin: * @analyser: Analyser state whose charset ratings are to be modified. * * Decides between cp1257, iso8859-13 and baltic charsets for language "lv". * * Returns: Nonzero if charset ratigns have been actually modified, zero * otherwise. **/ static int hook_baltwin(EncaAnalyserState *analyser) { static const unsigned char list_cp1257[] = { 0xe7, 0xf0, 0xf2, 0xfe, 0xd0, 0xde }; static const unsigned char list_iso885913[] = { 0xe7, 0xf0, 0xf2, 0xfe, 0xd0, 0xde }; static const unsigned char list_baltic[] = { 0xec, 0xf9, 0xf1, 0xea, 0xd9, 0xcc }; static EncaLanguageHookData1CS hookdata[] = { MAKE_HOOK_LINE(cp1257), MAKE_HOOK_LINE(iso885913), MAKE_HOOK_LINE(baltic), }; return enca_language_hook_ncs(analyser, ELEMENTS(hookdata), hookdata); } /** * hook_iso13win: * @analyser: Analyser state whose charset ratings are to be modified. * * Decides between iso8859-13 and cp1257 charsets for language "lv". * * Returns: Nonzero if charset ratigns have been actually modified, zero * otherwise. **/ static int hook_iso13win(EncaAnalyserState *analyser) { static EncaLanguageHookDataEOL hookdata[] = { { "iso885913", ENCA_SURFACE_EOL_LF, (size_t)-1 }, { "cp1257", ENCA_SURFACE_MASK_EOL, (size_t)-1 }, }; return enca_language_hook_eol(analyser, ELEMENTS(hookdata), hookdata); } /* vim: ts=2 */
gpl-2.0
shakalaca/ASUS_ZenFone_A450CG
external/valgrind/main/none/tests/amd64/amd64locked.c
71
30313
#include <stdio.h> #include <stdlib.h> #include <assert.h> #define VERBOSE 0 typedef unsigned int UInt; typedef unsigned char UChar; typedef unsigned long long int ULong; typedef signed long long int Long; typedef signed int Int; typedef unsigned short UShort; typedef unsigned long UWord; typedef char HChar; ///////////////////////////////////////////////////////////////// // BEGIN crc32 stuff // ///////////////////////////////////////////////////////////////// static const UInt crc32Table[256] = { /*-- Ugly, innit? --*/ 0x00000000L, 0x04c11db7L, 0x09823b6eL, 0x0d4326d9L, 0x130476dcL, 0x17c56b6bL, 0x1a864db2L, 0x1e475005L, 0x2608edb8L, 0x22c9f00fL, 0x2f8ad6d6L, 0x2b4bcb61L, 0x350c9b64L, 0x31cd86d3L, 0x3c8ea00aL, 0x384fbdbdL, 0x4c11db70L, 0x48d0c6c7L, 0x4593e01eL, 0x4152fda9L, 0x5f15adacL, 0x5bd4b01bL, 0x569796c2L, 0x52568b75L, 0x6a1936c8L, 0x6ed82b7fL, 0x639b0da6L, 0x675a1011L, 0x791d4014L, 0x7ddc5da3L, 0x709f7b7aL, 0x745e66cdL, 0x9823b6e0L, 0x9ce2ab57L, 0x91a18d8eL, 0x95609039L, 0x8b27c03cL, 0x8fe6dd8bL, 0x82a5fb52L, 0x8664e6e5L, 0xbe2b5b58L, 0xbaea46efL, 0xb7a96036L, 0xb3687d81L, 0xad2f2d84L, 0xa9ee3033L, 0xa4ad16eaL, 0xa06c0b5dL, 0xd4326d90L, 0xd0f37027L, 0xddb056feL, 0xd9714b49L, 0xc7361b4cL, 0xc3f706fbL, 0xceb42022L, 0xca753d95L, 0xf23a8028L, 0xf6fb9d9fL, 0xfbb8bb46L, 0xff79a6f1L, 0xe13ef6f4L, 0xe5ffeb43L, 0xe8bccd9aL, 0xec7dd02dL, 0x34867077L, 0x30476dc0L, 0x3d044b19L, 0x39c556aeL, 0x278206abL, 0x23431b1cL, 0x2e003dc5L, 0x2ac12072L, 0x128e9dcfL, 0x164f8078L, 0x1b0ca6a1L, 0x1fcdbb16L, 0x018aeb13L, 0x054bf6a4L, 0x0808d07dL, 0x0cc9cdcaL, 0x7897ab07L, 0x7c56b6b0L, 0x71159069L, 0x75d48ddeL, 0x6b93dddbL, 0x6f52c06cL, 0x6211e6b5L, 0x66d0fb02L, 0x5e9f46bfL, 0x5a5e5b08L, 0x571d7dd1L, 0x53dc6066L, 0x4d9b3063L, 0x495a2dd4L, 0x44190b0dL, 0x40d816baL, 0xaca5c697L, 0xa864db20L, 0xa527fdf9L, 0xa1e6e04eL, 0xbfa1b04bL, 0xbb60adfcL, 0xb6238b25L, 0xb2e29692L, 0x8aad2b2fL, 0x8e6c3698L, 0x832f1041L, 0x87ee0df6L, 0x99a95df3L, 0x9d684044L, 0x902b669dL, 0x94ea7b2aL, 0xe0b41de7L, 0xe4750050L, 0xe9362689L, 0xedf73b3eL, 0xf3b06b3bL, 0xf771768cL, 0xfa325055L, 0xfef34de2L, 0xc6bcf05fL, 0xc27dede8L, 0xcf3ecb31L, 0xcbffd686L, 0xd5b88683L, 0xd1799b34L, 0xdc3abdedL, 0xd8fba05aL, 0x690ce0eeL, 0x6dcdfd59L, 0x608edb80L, 0x644fc637L, 0x7a089632L, 0x7ec98b85L, 0x738aad5cL, 0x774bb0ebL, 0x4f040d56L, 0x4bc510e1L, 0x46863638L, 0x42472b8fL, 0x5c007b8aL, 0x58c1663dL, 0x558240e4L, 0x51435d53L, 0x251d3b9eL, 0x21dc2629L, 0x2c9f00f0L, 0x285e1d47L, 0x36194d42L, 0x32d850f5L, 0x3f9b762cL, 0x3b5a6b9bL, 0x0315d626L, 0x07d4cb91L, 0x0a97ed48L, 0x0e56f0ffL, 0x1011a0faL, 0x14d0bd4dL, 0x19939b94L, 0x1d528623L, 0xf12f560eL, 0xf5ee4bb9L, 0xf8ad6d60L, 0xfc6c70d7L, 0xe22b20d2L, 0xe6ea3d65L, 0xeba91bbcL, 0xef68060bL, 0xd727bbb6L, 0xd3e6a601L, 0xdea580d8L, 0xda649d6fL, 0xc423cd6aL, 0xc0e2d0ddL, 0xcda1f604L, 0xc960ebb3L, 0xbd3e8d7eL, 0xb9ff90c9L, 0xb4bcb610L, 0xb07daba7L, 0xae3afba2L, 0xaafbe615L, 0xa7b8c0ccL, 0xa379dd7bL, 0x9b3660c6L, 0x9ff77d71L, 0x92b45ba8L, 0x9675461fL, 0x8832161aL, 0x8cf30badL, 0x81b02d74L, 0x857130c3L, 0x5d8a9099L, 0x594b8d2eL, 0x5408abf7L, 0x50c9b640L, 0x4e8ee645L, 0x4a4ffbf2L, 0x470cdd2bL, 0x43cdc09cL, 0x7b827d21L, 0x7f436096L, 0x7200464fL, 0x76c15bf8L, 0x68860bfdL, 0x6c47164aL, 0x61043093L, 0x65c52d24L, 0x119b4be9L, 0x155a565eL, 0x18197087L, 0x1cd86d30L, 0x029f3d35L, 0x065e2082L, 0x0b1d065bL, 0x0fdc1becL, 0x3793a651L, 0x3352bbe6L, 0x3e119d3fL, 0x3ad08088L, 0x2497d08dL, 0x2056cd3aL, 0x2d15ebe3L, 0x29d4f654L, 0xc5a92679L, 0xc1683bceL, 0xcc2b1d17L, 0xc8ea00a0L, 0xd6ad50a5L, 0xd26c4d12L, 0xdf2f6bcbL, 0xdbee767cL, 0xe3a1cbc1L, 0xe760d676L, 0xea23f0afL, 0xeee2ed18L, 0xf0a5bd1dL, 0xf464a0aaL, 0xf9278673L, 0xfde69bc4L, 0x89b8fd09L, 0x8d79e0beL, 0x803ac667L, 0x84fbdbd0L, 0x9abc8bd5L, 0x9e7d9662L, 0x933eb0bbL, 0x97ffad0cL, 0xafb010b1L, 0xab710d06L, 0xa6322bdfL, 0xa2f33668L, 0xbcb4666dL, 0xb8757bdaL, 0xb5365d03L, 0xb1f740b4L }; #define UPDATE_CRC(crcVar,cha) \ { \ crcVar = (crcVar << 8) ^ \ crc32Table[(crcVar >> 24) ^ \ ((UChar)cha)]; \ } static UInt crcBytes ( UChar* bytes, UWord nBytes, UInt crcIn ) { UInt crc = crcIn; while (nBytes >= 4) { UPDATE_CRC(crc, bytes[0]); UPDATE_CRC(crc, bytes[1]); UPDATE_CRC(crc, bytes[2]); UPDATE_CRC(crc, bytes[3]); bytes += 4; nBytes -= 4; } while (nBytes >= 1) { UPDATE_CRC(crc, bytes[0]); bytes += 1; nBytes -= 1; } return crc; } static UInt crcFinalise ( UInt crc ) { return ~crc; } //////// static UInt theCRC = 0xFFFFFFFF; static HChar outBuf[1024]; // take output that's in outBuf, length as specified, and // update the running crc. static void send ( int nbytes ) { assert( ((unsigned int)nbytes) < sizeof(outBuf)-1); assert(outBuf[nbytes] == 0); theCRC = crcBytes( (UChar*)&outBuf[0], nbytes, theCRC ); if (VERBOSE) printf("SEND %08x %s", theCRC, outBuf); } ///////////////////////////////////////////////////////////////// // END crc32 stuff // ///////////////////////////////////////////////////////////////// #if 0 // full version #define NVALS 76 static ULong val[NVALS] = { 0x00ULL, 0x01ULL, 0x02ULL, 0x03ULL, 0x3FULL, 0x40ULL, 0x41ULL, 0x7EULL, 0x7FULL, 0x80ULL, 0x81ULL, 0x82ULL, 0xBFULL, 0xC0ULL, 0xC1ULL, 0xFCULL, 0xFDULL, 0xFEULL, 0xFFULL, 0xFF00ULL, 0xFF01ULL, 0xFF02ULL, 0xFF03ULL, 0xFF3FULL, 0xFF40ULL, 0xFF41ULL, 0xFF7EULL, 0xFF7FULL, 0xFF80ULL, 0xFF81ULL, 0xFF82ULL, 0xFFBFULL, 0xFFC0ULL, 0xFFC1ULL, 0xFFFCULL, 0xFFFDULL, 0xFFFEULL, 0xFFFFULL, 0xFFFFFF00ULL, 0xFFFFFF01ULL, 0xFFFFFF02ULL, 0xFFFFFF03ULL, 0xFFFFFF3FULL, 0xFFFFFF40ULL, 0xFFFFFF41ULL, 0xFFFFFF7EULL, 0xFFFFFF7FULL, 0xFFFFFF80ULL, 0xFFFFFF81ULL, 0xFFFFFF82ULL, 0xFFFFFFBFULL, 0xFFFFFFC0ULL, 0xFFFFFFC1ULL, 0xFFFFFFFCULL, 0xFFFFFFFDULL, 0xFFFFFFFEULL, 0xFFFFFFFFULL, 0xFFFFFFFFFFFFFF00ULL, 0xFFFFFFFFFFFFFF01ULL, 0xFFFFFFFFFFFFFF02ULL, 0xFFFFFFFFFFFFFF03ULL, 0xFFFFFFFFFFFFFF3FULL, 0xFFFFFFFFFFFFFF40ULL, 0xFFFFFFFFFFFFFF41ULL, 0xFFFFFFFFFFFFFF7EULL, 0xFFFFFFFFFFFFFF7FULL, 0xFFFFFFFFFFFFFF80ULL, 0xFFFFFFFFFFFFFF81ULL, 0xFFFFFFFFFFFFFF82ULL, 0xFFFFFFFFFFFFFFBFULL, 0xFFFFFFFFFFFFFFC0ULL, 0xFFFFFFFFFFFFFFC1ULL, 0xFFFFFFFFFFFFFFFCULL, 0xFFFFFFFFFFFFFFFDULL, 0xFFFFFFFFFFFFFFFEULL, 0xFFFFFFFFFFFFFFFFULL }; #else // shortened version, for use as valgrind regtest #define NVALS 36 static ULong val[NVALS] = { 0x00ULL, 0x01ULL, 0x3FULL, 0x40ULL, 0x7FULL, 0x80ULL, 0xBFULL, 0xC0ULL, 0xFFULL, 0xFF00ULL, 0xFF01ULL, 0xFF3FULL, 0xFF40ULL, 0xFF7FULL, 0xFF80ULL, 0xFFBFULL, 0xFFC0ULL, 0xFFFFULL, 0xFFFFFF00ULL, 0xFFFFFF01ULL, 0xFFFFFF3FULL, 0xFFFFFF40ULL, 0xFFFFFF7EULL, 0xFFFFFF7FULL, 0xFFFFFFBFULL, 0xFFFFFFC0ULL, 0xFFFFFFFFULL, 0xFFFFFFFFFFFFFF00ULL, 0xFFFFFFFFFFFFFF01ULL, 0xFFFFFFFFFFFFFF3FULL, 0xFFFFFFFFFFFFFF40ULL, 0xFFFFFFFFFFFFFF7FULL, 0xFFFFFFFFFFFFFF80ULL, 0xFFFFFFFFFFFFFFBFULL, 0xFFFFFFFFFFFFFFC0ULL, 0xFFFFFFFFFFFFFFFFULL }; #endif ///////////////////////////////////// #define CC_C 0x0001 #define CC_P 0x0004 #define CC_A 0x0010 #define CC_Z 0x0040 #define CC_S 0x0080 #define CC_O 0x0800 #define CC_MASK (CC_C | CC_P | CC_A | CC_Z | CC_S | CC_O) #define GEN_do_locked_G_E(_name,_eax) \ \ __attribute__((noinline)) void do_locked_G_E_##_name ( void ) \ { \ volatile Long e_val, g_val, e_val_before; \ Long o, s, z, a, c, p, v1, v2, flags_in; \ Long block[4]; \ \ for (v1 = 0; v1 < NVALS; v1++) { \ for (v2 = 0; v2 < NVALS; v2++) { \ \ for (o = 0; o < 2; o++) { \ for (s = 0; s < 2; s++) { \ for (z = 0; z < 2; z++) { \ for (a = 0; a < 2; a++) { \ for (c = 0; c < 2; c++) { \ for (p = 0; p < 2; p++) { \ \ flags_in = (o ? CC_O : 0) \ | (s ? CC_S : 0) \ | (z ? CC_Z : 0) \ | (a ? CC_A : 0) \ | (c ? CC_C : 0) \ | (p ? CC_P : 0); \ \ g_val = val[v1]; \ e_val = val[v2]; \ e_val_before = e_val; \ \ block[0] = flags_in; \ block[1] = g_val; \ block[2] = (long)&e_val; \ block[3] = 0; \ __asm__ __volatile__( \ "movq 0(%0), %%rax\n\t" \ "pushq %%rax\n\t" \ "popfq\n\t" \ "movq 8(%0), %%rax\n\t" \ "movq 16(%0), %%rbx\n\t" \ "lock; " #_name " %%" #_eax ",(%%rbx)\n\t" \ "pushfq\n\t" \ "popq %%rax\n\t" \ "movq %%rax, 24(%0)\n\t" \ : : "r"(&block[0]) : "rax","rbx","cc","memory" \ ); \ \ send( \ sprintf(outBuf, \ "%s G=%016llx E=%016llx CCIN=%08llx -> E=%016llx CCOUT=%08llx\n", \ #_name, g_val, e_val_before, flags_in, \ e_val, block[3] & CC_MASK)); \ \ }}}}}} \ \ }} \ } GEN_do_locked_G_E(addb,al) GEN_do_locked_G_E(addw,ax) GEN_do_locked_G_E(addl,eax) GEN_do_locked_G_E(addq,rax) GEN_do_locked_G_E(orb, al) GEN_do_locked_G_E(orw, ax) GEN_do_locked_G_E(orl, eax) GEN_do_locked_G_E(orq, rax) GEN_do_locked_G_E(adcb,al) GEN_do_locked_G_E(adcw,ax) GEN_do_locked_G_E(adcl,eax) GEN_do_locked_G_E(adcq,rax) GEN_do_locked_G_E(sbbb,al) GEN_do_locked_G_E(sbbw,ax) GEN_do_locked_G_E(sbbl,eax) GEN_do_locked_G_E(sbbq,rax) GEN_do_locked_G_E(andb,al) GEN_do_locked_G_E(andw,ax) GEN_do_locked_G_E(andl,eax) GEN_do_locked_G_E(andq,rax) GEN_do_locked_G_E(subb,al) GEN_do_locked_G_E(subw,ax) GEN_do_locked_G_E(subl,eax) GEN_do_locked_G_E(subq,rax) GEN_do_locked_G_E(xorb,al) GEN_do_locked_G_E(xorw,ax) GEN_do_locked_G_E(xorl,eax) GEN_do_locked_G_E(xorq,rax) #define GEN_do_locked_imm_E(_name,_eax,_imm) \ \ __attribute__((noinline)) void do_locked_imm_E_##_name##_##_imm ( void ) \ { \ volatile Long e_val, e_val_before; \ Long o, s, z, a, c, p, v2, flags_in; \ Long block[3]; \ \ for (v2 = 0; v2 < NVALS; v2++) { \ \ for (o = 0; o < 2; o++) { \ for (s = 0; s < 2; s++) { \ for (z = 0; z < 2; z++) { \ for (a = 0; a < 2; a++) { \ for (c = 0; c < 2; c++) { \ for (p = 0; p < 2; p++) { \ \ flags_in = (o ? CC_O : 0) \ | (s ? CC_S : 0) \ | (z ? CC_Z : 0) \ | (a ? CC_A : 0) \ | (c ? CC_C : 0) \ | (p ? CC_P : 0); \ \ e_val = val[v2]; \ e_val_before = e_val; \ \ block[0] = flags_in; \ block[1] = (long)&e_val; \ block[2] = 0; \ __asm__ __volatile__( \ "movq 0(%0), %%rax\n\t" \ "pushq %%rax\n\t" \ "popfq\n\t" \ "movq 8(%0), %%rbx\n\t" \ "lock; " #_name " $" #_imm ",(%%rbx)\n\t" \ "pushfq\n\t" \ "popq %%rax\n\t" \ "movq %%rax, 16(%0)\n\t" \ : : "r"(&block[0]) : "rax","rbx","cc","memory" \ ); \ \ send( \ sprintf(outBuf, \ "%s I=%s E=%016llx CCIN=%08llx -> E=%016llx CCOUT=%08llx\n", \ #_name, #_imm, e_val_before, flags_in, \ e_val, block[2] & CC_MASK)); \ \ }}}}}} \ \ } \ } GEN_do_locked_imm_E(addb,al,0x7F) GEN_do_locked_imm_E(addb,al,0xF1) GEN_do_locked_imm_E(addw,ax,0x7E) GEN_do_locked_imm_E(addw,ax,0x9325) GEN_do_locked_imm_E(addl,eax,0x7D) GEN_do_locked_imm_E(addl,eax,0x31415927) GEN_do_locked_imm_E(addq,rax,0x7D) GEN_do_locked_imm_E(addq,rax,0x31415927) GEN_do_locked_imm_E(orb,al,0x7F) GEN_do_locked_imm_E(orb,al,0xF1) GEN_do_locked_imm_E(orw,ax,0x7E) GEN_do_locked_imm_E(orw,ax,0x9325) GEN_do_locked_imm_E(orl,eax,0x7D) GEN_do_locked_imm_E(orl,eax,0x31415927) GEN_do_locked_imm_E(orq,rax,0x7D) GEN_do_locked_imm_E(orq,rax,0x31415927) GEN_do_locked_imm_E(adcb,al,0x7F) GEN_do_locked_imm_E(adcb,al,0xF1) GEN_do_locked_imm_E(adcw,ax,0x7E) GEN_do_locked_imm_E(adcw,ax,0x9325) GEN_do_locked_imm_E(adcl,eax,0x7D) GEN_do_locked_imm_E(adcl,eax,0x31415927) GEN_do_locked_imm_E(adcq,rax,0x7D) GEN_do_locked_imm_E(adcq,rax,0x31415927) GEN_do_locked_imm_E(sbbb,al,0x7F) GEN_do_locked_imm_E(sbbb,al,0xF1) GEN_do_locked_imm_E(sbbw,ax,0x7E) GEN_do_locked_imm_E(sbbw,ax,0x9325) GEN_do_locked_imm_E(sbbl,eax,0x7D) GEN_do_locked_imm_E(sbbl,eax,0x31415927) GEN_do_locked_imm_E(sbbq,rax,0x7D) GEN_do_locked_imm_E(sbbq,rax,0x31415927) GEN_do_locked_imm_E(andb,al,0x7F) GEN_do_locked_imm_E(andb,al,0xF1) GEN_do_locked_imm_E(andw,ax,0x7E) GEN_do_locked_imm_E(andw,ax,0x9325) GEN_do_locked_imm_E(andl,eax,0x7D) GEN_do_locked_imm_E(andl,eax,0x31415927) GEN_do_locked_imm_E(andq,rax,0x7D) GEN_do_locked_imm_E(andq,rax,0x31415927) GEN_do_locked_imm_E(subb,al,0x7F) GEN_do_locked_imm_E(subb,al,0xF1) GEN_do_locked_imm_E(subw,ax,0x7E) GEN_do_locked_imm_E(subw,ax,0x9325) GEN_do_locked_imm_E(subl,eax,0x7D) GEN_do_locked_imm_E(subl,eax,0x31415927) GEN_do_locked_imm_E(subq,rax,0x7D) GEN_do_locked_imm_E(subq,rax,0x31415927) GEN_do_locked_imm_E(xorb,al,0x7F) GEN_do_locked_imm_E(xorb,al,0xF1) GEN_do_locked_imm_E(xorw,ax,0x7E) GEN_do_locked_imm_E(xorw,ax,0x9325) GEN_do_locked_imm_E(xorl,eax,0x7D) GEN_do_locked_imm_E(xorl,eax,0x31415927) GEN_do_locked_imm_E(xorq,rax,0x7D) GEN_do_locked_imm_E(xorq,rax,0x31415927) #define GEN_do_locked_unary_E(_name,_eax) \ \ __attribute__((noinline)) void do_locked_unary_E_##_name ( void ) \ { \ volatile Long e_val, e_val_before; \ Long o, s, z, a, c, p, v2, flags_in; \ Long block[3]; \ \ for (v2 = 0; v2 < NVALS; v2++) { \ \ for (o = 0; o < 2; o++) { \ for (s = 0; s < 2; s++) { \ for (z = 0; z < 2; z++) { \ for (a = 0; a < 2; a++) { \ for (c = 0; c < 2; c++) { \ for (p = 0; p < 2; p++) { \ \ flags_in = (o ? CC_O : 0) \ | (s ? CC_S : 0) \ | (z ? CC_Z : 0) \ | (a ? CC_A : 0) \ | (c ? CC_C : 0) \ | (p ? CC_P : 0); \ \ e_val = val[v2]; \ e_val_before = e_val; \ \ block[0] = flags_in; \ block[1] = (long)&e_val; \ block[2] = 0; \ __asm__ __volatile__( \ "movq 0(%0), %%rax\n\t" \ "pushq %%rax\n\t" \ "popfq\n\t" \ "movq 8(%0), %%rbx\n\t" \ "lock; " #_name " (%%rbx)\n\t" \ "pushfq\n\t" \ "popq %%rax\n\t" \ "movq %%rax, 16(%0)\n\t" \ : : "r"(&block[0]) : "rax","rbx","cc","memory" \ ); \ \ send( \ sprintf(outBuf, \ "%s E=%016llx CCIN=%08llx -> E=%016llx CCOUT=%08llx\n", \ #_name, e_val_before, flags_in, \ e_val, block[2] & CC_MASK)); \ \ }}}}}} \ \ } \ } GEN_do_locked_unary_E(decb,al) GEN_do_locked_unary_E(decw,ax) GEN_do_locked_unary_E(decl,eax) GEN_do_locked_unary_E(decq,rax) GEN_do_locked_unary_E(incb,al) GEN_do_locked_unary_E(incw,ax) GEN_do_locked_unary_E(incl,eax) GEN_do_locked_unary_E(incq,rax) GEN_do_locked_unary_E(negb,al) GEN_do_locked_unary_E(negw,ax) GEN_do_locked_unary_E(negl,eax) GEN_do_locked_unary_E(negq,rax) GEN_do_locked_unary_E(notb,al) GEN_do_locked_unary_E(notw,ax) GEN_do_locked_unary_E(notl,eax) GEN_do_locked_unary_E(notq,rax) ///////////////////////////////////////////////////////////////// ULong btsq_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btsq\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno) : "rdx","cc","memory" ); /* Pretty meaningless to dereference base here, but that's what you have to do to get a btsl insn which refers to memory starting at base. */ return res; } ULong btsl_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btsl\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((UInt)bitno)); return res; } ULong btsw_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btsw\t%w2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong btrq_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btrq\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong btrl_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btrl\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((UInt)bitno)); return res; } ULong btrw_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btrw\t%w2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong btcq_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btcq\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong btcl_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btcl\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((UInt)bitno)); return res; } ULong btcw_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("lock; btcw\t%w2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong btq_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("btq\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno) : "cc", "memory"); return res; } ULong btl_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("btl\t%2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((UInt)bitno) : "cc", "memory"); return res; } ULong btw_mem ( UChar* base, int bitno ) { ULong res; __asm__ __volatile__("btw\t%w2, %0\n\t" "setc %%dl\n\t" "movzbq %%dl,%1\n" : "=m" (*base), "=r" (res) : "r" ((ULong)bitno)); return res; } ULong rol1 ( ULong x ) { return (x << 1) | (x >> 63); } void do_bt_G_E_tests ( void ) { ULong n, bitoff, op; ULong c; UChar* block; ULong carrydep, res;; /*------------------------ MEM-Q -----------------------*/ carrydep = 0; block = calloc(200,1); block += 100; /* Valid bit offsets are -800 .. 799 inclusive. */ for (n = 0; n < 10000; n++) { bitoff = (random() % 1600) - 800; op = random() % 4; c = 2; switch (op) { case 0: c = btsq_mem(block, bitoff); break; case 1: c = btrq_mem(block, bitoff); break; case 2: c = btcq_mem(block, bitoff); break; case 3: c = btq_mem(block, bitoff); break; } c &= 255; assert(c == 0 || c == 1); carrydep = c ? (rol1(carrydep) ^ (Long)bitoff) : carrydep; } /* Compute final result */ block -= 100; res = 0; for (n = 0; n < 200; n++) { UChar ch = block[n]; /* printf("%d ", (int)block[n]); */ res = rol1(res) ^ (ULong)ch; } send( sprintf(outBuf, "bt{s,r,c}q: final res 0x%llx, carrydep 0x%llx\n", res, carrydep)); free(block); /*------------------------ MEM-L -----------------------*/ carrydep = 0; block = calloc(200,1); block += 100; /* Valid bit offsets are -800 .. 799 inclusive. */ for (n = 0; n < 10000; n++) { bitoff = (random() % 1600) - 800; op = random() % 4; c = 2; switch (op) { case 0: c = btsl_mem(block, bitoff); break; case 1: c = btrl_mem(block, bitoff); break; case 2: c = btcl_mem(block, bitoff); break; case 3: c = btl_mem(block, bitoff); break; } c &= 255; assert(c == 0 || c == 1); carrydep = c ? (rol1(carrydep) ^ (Long)(Int)bitoff) : carrydep; } /* Compute final result */ block -= 100; res = 0; for (n = 0; n < 200; n++) { UChar ch = block[n]; /* printf("%d ", (int)block[n]); */ res = rol1(res) ^ (ULong)ch; } send( sprintf(outBuf, "bt{s,r,c}l: final res 0x%llx, carrydep 0x%llx\n", res, carrydep)); free(block); /*------------------------ MEM-W -----------------------*/ carrydep = 0; block = calloc(200,1); block += 100; /* Valid bit offsets are -800 .. 799 inclusive. */ for (n = 0; n < 10000; n++) { bitoff = (random() % 1600) - 800; op = random() % 4; c = 2; switch (op) { case 0: c = btsw_mem(block, bitoff); break; case 1: c = btrw_mem(block, bitoff); break; case 2: c = btcw_mem(block, bitoff); break; case 3: c = btw_mem(block, bitoff); break; } c &= 255; assert(c == 0 || c == 1); carrydep = c ? (rol1(carrydep) ^ (Long)(Int)bitoff) : carrydep; } /* Compute final result */ block -= 100; res = 0; for (n = 0; n < 200; n++) { UChar ch = block[n]; /* printf("%d ", (int)block[n]); */ res = rol1(res) ^ (ULong)ch; } send(sprintf(outBuf, "bt{s,r,c}w: final res 0x%llx, carrydep 0x%llx\n", res, carrydep)); free(block); } ///////////////////////////////////////////////////////////////// /* Given a word, do bt/bts/btr/btc on bits 0, 1, 2 and 3 of it, and also reconstruct the original bits 0, 1, 2, 3 by looking at the carry flag. Returned result has mashed bits 0-3 at the bottom and the reconstructed original bits 0-3 as 4-7. */ ULong mash_mem_Q ( ULong* origp ) { ULong reconstructed, mashed; __asm__ __volatile__ ( "movq %2, %%rdx\n\t" "" "movq $0, %%rax\n\t" "\n\t" "btq $0, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btsq $1, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $1, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btrq $2, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $2, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btcq $3, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $3, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "movq %%rax, %0\n\t" "movq (%%rdx), %1" : "=r" (reconstructed), "=r" (mashed) : "r" (origp) : "rax", "rcx", "rdx", "cc"); return (mashed & 0xF) | ((reconstructed & 0xF) << 4); } ULong mash_mem_L ( UInt* origp ) { ULong reconstructed; UInt mashed; __asm__ __volatile__ ( "movq %2, %%rdx\n\t" "" "movq $0, %%rax\n\t" "\n\t" "btl $0, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btsl $1, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $1, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btrl $2, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $2, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btcl $3, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $3, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "movq %%rax, %0\n\t" "movl (%%rdx), %1" : "=r" (reconstructed), "=r" (mashed) : "r" (origp) : "rax", "rcx", "rdx", "cc"); return (mashed & 0xF) | ((reconstructed & 0xF) << 4); } ULong mash_mem_W ( UShort* origp ) { ULong reconstructed, mashed; __asm__ __volatile__ ( "movq %2, %%rdx\n\t" "" "movq $0, %%rax\n\t" "\n\t" "btw $0, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btsw $1, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $1, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btrw $2, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $2, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "lock; btcw $3, (%%rdx)\n\t" "setb %%cl\n\t" "movzbq %%cl, %%rcx\n\t" "shlq $3, %%rcx\n\t" "orq %%rcx, %%rax\n\t" "\n\t" "movq %%rax, %0\n\t" "movzwq (%%rdx), %1" : "=r" (reconstructed), "=r" (mashed) : "r" (origp) : "rax", "rcx", "rdx", "cc"); return (mashed & 0xF) | ((reconstructed & 0xF) << 4); } void do_bt_imm_E_tests( void ) { ULong i; ULong* iiq = malloc(sizeof(ULong)); UInt* iil = malloc(sizeof(UInt)); UShort* iiw = malloc(sizeof(UShort)); for (i = 0; i < 0x10; i++) { *iiq = i; *iil = i; *iiw = i; send(sprintf(outBuf,"0x%llx -> 0x%02llx 0x%02llx 0x%02llx\n", i, mash_mem_Q(iiq), mash_mem_L(iil), mash_mem_W(iiw))); } free(iiq); free(iil); free(iiw); } ///////////////////////////////////////////////////////////////// int main ( void ) { do_locked_G_E_addb(); do_locked_G_E_addw(); do_locked_G_E_addl(); do_locked_G_E_addq(); do_locked_G_E_orb(); do_locked_G_E_orw(); do_locked_G_E_orl(); do_locked_G_E_orq(); do_locked_G_E_adcb(); do_locked_G_E_adcw(); do_locked_G_E_adcl(); do_locked_G_E_adcq(); do_locked_G_E_sbbb(); do_locked_G_E_sbbw(); do_locked_G_E_sbbl(); do_locked_G_E_sbbq(); do_locked_G_E_andb(); do_locked_G_E_andw(); do_locked_G_E_andl(); do_locked_G_E_andq(); do_locked_G_E_subb(); do_locked_G_E_subw(); do_locked_G_E_subl(); do_locked_G_E_subq(); do_locked_G_E_xorb(); do_locked_G_E_xorw(); do_locked_G_E_xorl(); do_locked_G_E_xorq(); // 4 * 7 do_locked_imm_E_addb_0x7F(); do_locked_imm_E_addb_0xF1(); do_locked_imm_E_addw_0x7E(); do_locked_imm_E_addw_0x9325(); do_locked_imm_E_addl_0x7D(); do_locked_imm_E_addl_0x31415927(); do_locked_imm_E_addq_0x7D(); do_locked_imm_E_addq_0x31415927(); do_locked_imm_E_orb_0x7F(); do_locked_imm_E_orb_0xF1(); do_locked_imm_E_orw_0x7E(); do_locked_imm_E_orw_0x9325(); do_locked_imm_E_orl_0x7D(); do_locked_imm_E_orl_0x31415927(); do_locked_imm_E_orq_0x7D(); do_locked_imm_E_orq_0x31415927(); do_locked_imm_E_adcb_0x7F(); do_locked_imm_E_adcb_0xF1(); do_locked_imm_E_adcw_0x7E(); do_locked_imm_E_adcw_0x9325(); do_locked_imm_E_adcl_0x7D(); do_locked_imm_E_adcl_0x31415927(); do_locked_imm_E_adcq_0x7D(); do_locked_imm_E_adcq_0x31415927(); do_locked_imm_E_sbbb_0x7F(); do_locked_imm_E_sbbb_0xF1(); do_locked_imm_E_sbbw_0x7E(); do_locked_imm_E_sbbw_0x9325(); do_locked_imm_E_sbbl_0x7D(); do_locked_imm_E_sbbl_0x31415927(); do_locked_imm_E_sbbq_0x7D(); do_locked_imm_E_sbbq_0x31415927(); do_locked_imm_E_andb_0x7F(); do_locked_imm_E_andb_0xF1(); do_locked_imm_E_andw_0x7E(); do_locked_imm_E_andw_0x9325(); do_locked_imm_E_andl_0x7D(); do_locked_imm_E_andl_0x31415927(); do_locked_imm_E_andq_0x7D(); do_locked_imm_E_andq_0x31415927(); do_locked_imm_E_subb_0x7F(); do_locked_imm_E_subb_0xF1(); do_locked_imm_E_subw_0x7E(); do_locked_imm_E_subw_0x9325(); do_locked_imm_E_subl_0x7D(); do_locked_imm_E_subl_0x31415927(); do_locked_imm_E_subq_0x7D(); do_locked_imm_E_subq_0x31415927(); do_locked_imm_E_xorb_0x7F(); do_locked_imm_E_xorb_0xF1(); do_locked_imm_E_xorw_0x7E(); do_locked_imm_E_xorw_0x9325(); do_locked_imm_E_xorl_0x7D(); do_locked_imm_E_xorl_0x31415927(); do_locked_imm_E_xorq_0x7D(); do_locked_imm_E_xorq_0x31415927(); // 4 * 7 + 8 * 7 == 84 do_locked_unary_E_decb(); do_locked_unary_E_decw(); do_locked_unary_E_decl(); do_locked_unary_E_decq(); do_locked_unary_E_incb(); do_locked_unary_E_incw(); do_locked_unary_E_incl(); do_locked_unary_E_incq(); do_locked_unary_E_negb(); do_locked_unary_E_negw(); do_locked_unary_E_negl(); do_locked_unary_E_negq(); do_locked_unary_E_notb(); do_locked_unary_E_notw(); do_locked_unary_E_notl(); do_locked_unary_E_notq(); // 100 do_bt_G_E_tests(); // 109 do_bt_imm_E_tests(); // 118 // So there should be 118 lock-prefixed instructions in the // disassembly of this compilation unit. // confirm with // objdump -d ./amd64locked | grep lock | grep -v do_lock | grep -v elf64 | wc { UInt crcExpd = 0x1F677629; theCRC = crcFinalise( theCRC ); if (theCRC == crcExpd) { printf("amd64locked: PASS: CRCs actual 0x%08X expected 0x%08X\n", theCRC, crcExpd); } else { printf("amd64locked: FAIL: CRCs actual 0x%08X expected 0x%08X\n", theCRC, crcExpd); printf("amd64locked: set #define VERBOSE 1 to diagnose\n"); } } return 0; }
gpl-2.0
JonnyXDA/android_kernel_ulefone_metal
drivers/media/pci/cx88/cx88-video.c
327
54846
/* * * device driver for Conexant 2388x based TV cards * video4linux video interface * * (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/kthread.h> #include <asm/div64.h> #include "cx88.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include <media/wm8775.h> MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); /* ------------------------------------------------------------------ */ static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr,"video device numbers"); MODULE_PARM_DESC(vbi_nr,"vbi device numbers"); MODULE_PARM_DESC(radio_nr,"radio device numbers"); static unsigned int video_debug; module_param(video_debug,int,0644); MODULE_PARM_DESC(video_debug,"enable debug messages [video]"); static unsigned int irq_debug; module_param(irq_debug,int,0644); MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]"); static unsigned int vid_limit = 16; module_param(vid_limit,int,0644); MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes"); #define dprintk(level,fmt, arg...) if (video_debug >= level) \ printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg) /* ------------------------------------------------------------------- */ /* static data */ static const struct cx8800_fmt formats[] = { { .name = "8 bpp, gray", .fourcc = V4L2_PIX_FMT_GREY, .cxformat = ColorFormatY8, .depth = 8, .flags = FORMAT_FLAGS_PACKED, },{ .name = "15 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB555, .cxformat = ColorFormatRGB15, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "15 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB555X, .cxformat = ColorFormatRGB15 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB565, .cxformat = ColorFormatRGB16, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB565X, .cxformat = ColorFormatRGB16 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "24 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR24, .cxformat = ColorFormatRGB24, .depth = 24, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR32, .cxformat = ColorFormatRGB32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB32, .cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP, .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .cxformat = ColorFormatYUY2, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .cxformat = ColorFormatYUY2 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, }; static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) if (formats[i].fourcc == fourcc) return formats+i; return NULL; } /* ------------------------------------------------------------------- */ struct cx88_ctrl { /* control information */ u32 id; s32 minimum; s32 maximum; u32 step; s32 default_value; /* control register information */ u32 off; u32 reg; u32 sreg; u32 mask; u32 shift; }; static const struct cx88_ctrl cx8800_vid_ctls[] = { /* --- video --- */ { .id = V4L2_CID_BRIGHTNESS, .minimum = 0x00, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 128, .reg = MO_CONTR_BRIGHT, .mask = 0x00ff, .shift = 0, },{ .id = V4L2_CID_CONTRAST, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x3f, .off = 0, .reg = MO_CONTR_BRIGHT, .mask = 0xff00, .shift = 8, },{ .id = V4L2_CID_HUE, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 128, .reg = MO_HUE, .mask = 0x00ff, .shift = 0, },{ /* strictly, this only describes only U saturation. * V saturation is handled specially through code. */ .id = V4L2_CID_SATURATION, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 0, .reg = MO_UV_SATURATION, .mask = 0x00ff, .shift = 0, }, { .id = V4L2_CID_SHARPNESS, .minimum = 0, .maximum = 4, .step = 1, .default_value = 0x0, .off = 0, /* NOTE: the value is converted and written to both even and odd registers in the code */ .reg = MO_FILTER_ODD, .mask = 7 << 7, .shift = 7, }, { .id = V4L2_CID_CHROMA_AGC, .minimum = 0, .maximum = 1, .default_value = 0x1, .reg = MO_INPUT_FORMAT, .mask = 1 << 10, .shift = 10, }, { .id = V4L2_CID_COLOR_KILLER, .minimum = 0, .maximum = 1, .default_value = 0x1, .reg = MO_INPUT_FORMAT, .mask = 1 << 9, .shift = 9, }, { .id = V4L2_CID_BAND_STOP_FILTER, .minimum = 0, .maximum = 1, .step = 1, .default_value = 0x0, .off = 0, .reg = MO_HTOTAL, .mask = 3 << 11, .shift = 11, } }; static const struct cx88_ctrl cx8800_aud_ctls[] = { { /* --- audio --- */ .id = V4L2_CID_AUDIO_MUTE, .minimum = 0, .maximum = 1, .default_value = 1, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = (1 << 6), .shift = 6, },{ .id = V4L2_CID_AUDIO_VOLUME, .minimum = 0, .maximum = 0x3f, .step = 1, .default_value = 0x3f, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = 0x3f, .shift = 0, },{ .id = V4L2_CID_AUDIO_BALANCE, .minimum = 0, .maximum = 0x7f, .step = 1, .default_value = 0x40, .reg = AUD_BAL_CTL, .sreg = SHADOW_AUD_BAL_CTL, .mask = 0x7f, .shift = 0, } }; enum { CX8800_VID_CTLS = ARRAY_SIZE(cx8800_vid_ctls), CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls), }; /* ------------------------------------------------------------------- */ /* resource management */ static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bit) { struct cx88_core *core = dev->core; if (fh->resources & bit) /* have it already allocated */ return 1; /* is it free? */ mutex_lock(&core->lock); if (dev->resources & bit) { /* no, someone else uses it */ mutex_unlock(&core->lock); return 0; } /* it's free, grab it */ fh->resources |= bit; dev->resources |= bit; dprintk(1,"res: get %d\n",bit); mutex_unlock(&core->lock); return 1; } static int res_check(struct cx8800_fh *fh, unsigned int bit) { return (fh->resources & bit); } static int res_locked(struct cx8800_dev *dev, unsigned int bit) { return (dev->resources & bit); } static void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits) { struct cx88_core *core = dev->core; BUG_ON((fh->resources & bits) != bits); mutex_lock(&core->lock); fh->resources &= ~bits; dev->resources &= ~bits; dprintk(1,"res: put %d\n",bits); mutex_unlock(&core->lock); } /* ------------------------------------------------------------------ */ int cx88_video_mux(struct cx88_core *core, unsigned int input) { /* struct cx88_core *core = dev->core; */ dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n", input, INPUT(input).vmux, INPUT(input).gpio0,INPUT(input).gpio1, INPUT(input).gpio2,INPUT(input).gpio3); core->input = input; cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14); cx_write(MO_GP3_IO, INPUT(input).gpio3); cx_write(MO_GP0_IO, INPUT(input).gpio0); cx_write(MO_GP1_IO, INPUT(input).gpio1); cx_write(MO_GP2_IO, INPUT(input).gpio2); switch (INPUT(input).type) { case CX88_VMUX_SVIDEO: cx_set(MO_AFECFG_IO, 0x00000001); cx_set(MO_INPUT_FORMAT, 0x00010010); cx_set(MO_FILTER_EVEN, 0x00002020); cx_set(MO_FILTER_ODD, 0x00002020); break; default: cx_clear(MO_AFECFG_IO, 0x00000001); cx_clear(MO_INPUT_FORMAT, 0x00010010); cx_clear(MO_FILTER_EVEN, 0x00002020); cx_clear(MO_FILTER_ODD, 0x00002020); break; } /* if there are audioroutes defined, we have an external ADC to deal with audio */ if (INPUT(input).audioroute) { /* The wm8775 module has the "2" route hardwired into the initialization. Some boards may use different routes for different inputs. HVR-1300 surely does */ if (core->sd_wm8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } /* cx2388's C-ADC is connected to the tuner only. When used with S-Video, that ADC is busy dealing with chroma, so an external must be used for baseband audio */ if (INPUT(input).type != CX88_VMUX_TELEVISION && INPUT(input).type != CX88_VMUX_CABLE) { /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* Normal mode */ cx_write(AUD_I2SCNTL, 0x0); cx_clear(AUD_CTL, EN_I2SIN_ENABLE); } } return 0; } EXPORT_SYMBOL(cx88_video_mux); /* ------------------------------------------------------------------ */ static int start_video_dma(struct cx8800_dev *dev, struct cx88_dmaqueue *q, struct cx88_buffer *buf) { struct cx88_core *core = dev->core; /* setup fifo + format */ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], buf->bpl, buf->risc.dma); cx88_set_scale(core, buf->vb.width, buf->vb.height, buf->vb.field); cx_write(MO_COLOR_CTRL, buf->fmt->cxformat | ColorFormatGamma); /* reset counter */ cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET); q->count = 1; /* enable irqs */ cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT); /* Enables corresponding bits at PCI_INT_STAT: bits 0 to 4: video, audio, transport stream, VIP, Host bit 7: timer bits 8 and 9: DMA complete for: SRC, DST bits 10 and 11: BERR signal asserted for RISC: RD, WR bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB */ cx_set(MO_VID_INTMSK, 0x0f0011); /* enable capture */ cx_set(VID_CAPTURE_CONTROL,0x06); /* start dma */ cx_set(MO_DEV_CNTRL2, (1<<5)); cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */ return 0; } #ifdef CONFIG_PM static int stop_video_dma(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; /* stop dma */ cx_clear(MO_VID_DMACNTRL, 0x11); /* disable capture */ cx_clear(VID_CAPTURE_CONTROL,0x06); /* disable irqs */ cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT); cx_clear(MO_VID_INTMSK, 0x0f0011); return 0; } #endif static int restart_video_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q) { struct cx88_core *core = dev->core; struct cx88_buffer *buf, *prev; if (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); dprintk(2,"restart_queue [%p/%d]: restart dma\n", buf, buf->vb.i); start_video_dma(dev, q, buf); list_for_each_entry(buf, &q->active, vb.queue) buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); return 0; } prev = NULL; for (;;) { if (list_empty(&q->queued)) return 0; buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue); if (NULL == prev) { list_move_tail(&buf->vb.queue, &q->active); start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2,"[%p/%d] restart_queue - first active\n", buf,buf->vb.i); } else if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_move_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(2,"[%p/%d] restart_queue - move to active\n", buf,buf->vb.i); } else { return 0; } prev = buf; } } /* ------------------------------------------------------------------ */ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx8800_fh *fh = q->priv_data; struct cx8800_dev *dev = fh->dev; *size = dev->fmt->depth * dev->width * dev->height >> 3; if (0 == *count) *count = 32; if (*size * *count > vid_limit * 1024 * 1024) *count = (vid_limit * 1024 * 1024) / *size; return 0; } static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx8800_fh *fh = q->priv_data; struct cx8800_dev *dev = fh->dev; struct cx88_core *core = dev->core; struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); int rc, init_buffer = 0; BUG_ON(NULL == dev->fmt); if (dev->width < 48 || dev->width > norm_maxw(core->tvnorm) || dev->height < 32 || dev->height > norm_maxh(core->tvnorm)) return -EINVAL; buf->vb.size = (dev->width * dev->height * dev->fmt->depth) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != dev->fmt || buf->vb.width != dev->width || buf->vb.height != dev->height || buf->vb.field != field) { buf->fmt = dev->fmt; buf->vb.width = dev->width; buf->vb.height = dev->height; buf->vb.field = field; init_buffer = 1; } if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { init_buffer = 1; if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL))) goto fail; } if (init_buffer) { buf->bpl = buf->vb.width * buf->fmt->depth >> 3; switch (buf->vb.field) { case V4L2_FIELD_TOP: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, UNSET, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_BOTTOM: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, UNSET, 0, buf->bpl, 0, buf->vb.height); break; case V4L2_FIELD_INTERLACED: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->bpl, buf->bpl, buf->bpl, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_TB: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->bpl * (buf->vb.height >> 1), buf->bpl, 0, buf->vb.height >> 1); break; case V4L2_FIELD_SEQ_BT: cx88_risc_buffer(dev->pci, &buf->risc, dma->sglist, buf->bpl * (buf->vb.height >> 1), 0, buf->bpl, 0, buf->vb.height >> 1); break; default: BUG(); } } dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", buf, buf->vb.i, dev->width, dev->height, dev->fmt->depth, dev->fmt->name, (unsigned long)buf->risc.dma); buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: cx88_free_buffer(q,buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); struct cx88_buffer *prev; struct cx8800_fh *fh = vq->priv_data; struct cx8800_dev *dev = fh->dev; struct cx88_core *core = dev->core; struct cx88_dmaqueue *q = &dev->vidq; /* add jump to stopper */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); if (!list_empty(&q->queued)) { list_add_tail(&buf->vb.queue,&q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2,"[%p/%d] buffer_queue - append to queued\n", buf, buf->vb.i); } else if (list_empty(&q->active)) { list_add_tail(&buf->vb.queue,&q->active); start_video_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2,"[%p/%d] buffer_queue - first active\n", buf, buf->vb.i); } else { prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue); if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_add_tail(&buf->vb.queue,&q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(2,"[%p/%d] buffer_queue - append to active\n", buf, buf->vb.i); } else { list_add_tail(&buf->vb.queue,&q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2,"[%p/%d] buffer_queue - first queued\n", buf, buf->vb.i); } } } static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); cx88_free_buffer(q,buf); } static const struct videobuf_queue_ops cx8800_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* ------------------------------------------------------------------ */ /* ------------------------------------------------------------------ */ static struct videobuf_queue *get_queue(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx8800_fh *fh = file->private_data; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: return &fh->vidq; case VFL_TYPE_VBI: return &fh->vbiq; default: BUG(); } } static int get_resource(struct file *file) { struct video_device *vdev = video_devdata(file); switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: return RESOURCE_VIDEO; case VFL_TYPE_VBI: return RESOURCE_VBI; default: BUG(); } } static int video_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; struct cx8800_fh *fh; enum v4l2_buf_type type = 0; int radio = 0; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: type = V4L2_BUF_TYPE_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: type = V4L2_BUF_TYPE_VBI_CAPTURE; break; case VFL_TYPE_RADIO: radio = 1; break; } dprintk(1, "open dev=%s radio=%d type=%s\n", video_device_node_name(vdev), radio, v4l2_type_names[type]); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh),GFP_KERNEL); if (unlikely(!fh)) return -ENOMEM; v4l2_fh_init(&fh->fh, vdev); file->private_data = fh; fh->dev = dev; mutex_lock(&core->lock); videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx88_buffer), fh, NULL); videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct cx88_buffer), fh, NULL); if (vdev->vfl_type == VFL_TYPE_RADIO) { dprintk(1,"video_open: setting radio device\n"); cx_write(MO_GP3_IO, core->board.radio.gpio3); cx_write(MO_GP0_IO, core->board.radio.gpio0); cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { if (core->sd_wm8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* FM Mode */ core->tvaudio = WW_FM; cx88_set_tvaudio(core); cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1); } call_all(core, tuner, s_radio); } core->users++; mutex_unlock(&core->lock); v4l2_fh_add(&fh->fh); return 0; } static ssize_t video_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); struct cx8800_fh *fh = file->private_data; switch (vdev->vfl_type) { case VFL_TYPE_GRABBER: if (res_locked(fh->dev,RESOURCE_VIDEO)) return -EBUSY; return videobuf_read_one(&fh->vidq, data, count, ppos, file->f_flags & O_NONBLOCK); case VFL_TYPE_VBI: if (!res_get(fh->dev,fh,RESOURCE_VBI)) return -EBUSY; return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1, file->f_flags & O_NONBLOCK); default: BUG(); } } static unsigned int video_poll(struct file *file, struct poll_table_struct *wait) { struct video_device *vdev = video_devdata(file); struct cx8800_fh *fh = file->private_data; struct cx88_buffer *buf; unsigned int rc = v4l2_ctrl_poll(file, wait); if (vdev->vfl_type == VFL_TYPE_VBI) { if (!res_get(fh->dev,fh,RESOURCE_VBI)) return rc | POLLERR; return rc | videobuf_poll_stream(file, &fh->vbiq, wait); } mutex_lock(&fh->vidq.vb_lock); if (res_check(fh,RESOURCE_VIDEO)) { /* streaming capture */ if (list_empty(&fh->vidq.stream)) goto done; buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream); } else { /* read() capture */ buf = (struct cx88_buffer*)fh->vidq.read_buf; if (NULL == buf) goto done; } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) rc |= POLLIN|POLLRDNORM; done: mutex_unlock(&fh->vidq.vb_lock); return rc; } static int video_release(struct file *file) { struct cx8800_fh *fh = file->private_data; struct cx8800_dev *dev = fh->dev; /* turn off overlay */ if (res_check(fh, RESOURCE_OVERLAY)) { /* FIXME */ res_free(dev,fh,RESOURCE_OVERLAY); } /* stop video capture */ if (res_check(fh, RESOURCE_VIDEO)) { videobuf_queue_cancel(&fh->vidq); res_free(dev,fh,RESOURCE_VIDEO); } if (fh->vidq.read_buf) { buffer_release(&fh->vidq,fh->vidq.read_buf); kfree(fh->vidq.read_buf); } /* stop vbi capture */ if (res_check(fh, RESOURCE_VBI)) { videobuf_stop(&fh->vbiq); res_free(dev,fh,RESOURCE_VBI); } videobuf_mmap_free(&fh->vidq); videobuf_mmap_free(&fh->vbiq); mutex_lock(&dev->core->lock); v4l2_fh_del(&fh->fh); v4l2_fh_exit(&fh->fh); file->private_data = NULL; kfree(fh); dev->core->users--; if (!dev->core->users) call_all(dev->core, core, s_power, 0); mutex_unlock(&dev->core->lock); return 0; } static int video_mmap(struct file *file, struct vm_area_struct * vma) { return videobuf_mmap_mapper(get_queue(file), vma); } /* ------------------------------------------------------------------ */ /* VIDEO CTRL IOCTLS */ static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl) { struct cx88_core *core = container_of(ctrl->handler, struct cx88_core, video_hdl); const struct cx88_ctrl *cc = ctrl->priv; u32 value, mask; mask = cc->mask; switch (ctrl->id) { case V4L2_CID_SATURATION: /* special v_sat handling */ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; if (core->tvnorm & V4L2_STD_SECAM) { /* For SECAM, both U and V sat should be equal */ value = value << 8 | value; } else { /* Keeps U Saturation proportional to V Sat */ value = (value * 0x5a) / 0x7f << 8 | value; } mask = 0xffff; break; case V4L2_CID_SHARPNESS: /* 0b000, 0b100, 0b101, 0b110, or 0b111 */ value = (ctrl->val < 1 ? 0 : ((ctrl->val + 3) << 7)); /* needs to be set for both fields */ cx_andor(MO_FILTER_EVEN, mask, value); break; case V4L2_CID_CHROMA_AGC: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; default: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; } dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctrl->id, ctrl->name, ctrl->val, cc->reg, value, mask, cc->sreg ? " [shadowed]" : ""); if (cc->sreg) cx_sandor(cc->sreg, cc->reg, mask, value); else cx_andor(cc->reg, mask, value); return 0; } static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl) { struct cx88_core *core = container_of(ctrl->handler, struct cx88_core, audio_hdl); const struct cx88_ctrl *cc = ctrl->priv; u32 value,mask; /* Pass changes onto any WM8775 */ if (core->sd_wm8775) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: wm8775_s_ctrl(core, ctrl->id, ctrl->val); break; case V4L2_CID_AUDIO_VOLUME: wm8775_s_ctrl(core, ctrl->id, (ctrl->val) ? (0x90 + ctrl->val) << 8 : 0); break; case V4L2_CID_AUDIO_BALANCE: wm8775_s_ctrl(core, ctrl->id, ctrl->val << 9); break; default: break; } } mask = cc->mask; switch (ctrl->id) { case V4L2_CID_AUDIO_BALANCE: value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40); break; case V4L2_CID_AUDIO_VOLUME: value = 0x3f - (ctrl->val & 0x3f); break; default: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; } dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctrl->id, ctrl->name, ctrl->val, cc->reg, value, mask, cc->sreg ? " [shadowed]" : ""); if (cc->sreg) cx_sandor(cc->sreg, cc->reg, mask, value); else cx_andor(cc->reg, mask, value); return 0; } /* ------------------------------------------------------------------ */ /* VIDEO IOCTLS */ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.field = fh->vidq.field; f->fmt.pix.pixelformat = dev->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * dev->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; const struct cx8800_fmt *fmt; enum v4l2_field field; unsigned int maxw, maxh; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; field = f->fmt.pix.field; maxw = norm_maxw(core->tvnorm); maxh = norm_maxh(core->tvnorm); if (V4L2_FIELD_ANY == field) { field = (f->fmt.pix.height > maxh/2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; } switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: maxh = maxh / 2; break; case V4L2_FIELD_INTERLACED: break; default: return -EINVAL; } f->fmt.pix.field = field; v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, &f->fmt.pix.height, 32, maxh, 0, 0); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; int err = vidioc_try_fmt_vid_cap (file,priv,f); if (0 != err) return err; dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat); dev->width = f->fmt.pix.width; dev->height = f->fmt.pix.height; fh->vidq.field = f->fmt.pix.field; return 0; } void cx88_querycap(struct file *file, struct cx88_core *core, struct v4l2_capability *cap) { struct video_device *vdev = video_devdata(file); strlcpy(cap->card, core->board.name, sizeof(cap->card)); cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (UNSET != core->board.tuner_type) cap->device_caps |= V4L2_CAP_TUNER; switch (vdev->vfl_type) { case VFL_TYPE_RADIO: cap->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER; break; case VFL_TYPE_GRABBER: cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE; break; case VFL_TYPE_VBI: cap->device_caps |= V4L2_CAP_VBI_CAPTURE; break; } cap->capabilities = cap->device_caps | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_DEVICE_CAPS; if (core->board.radio.type == CX88_RADIO) cap->capabilities |= V4L2_CAP_RADIO; } EXPORT_SYMBOL(cx88_querycap); static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev; struct cx88_core *core = dev->core; strcpy(cap->driver, "cx8800"); sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); cx88_querycap(file, core, cap); return 0; } static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(formats))) return -EINVAL; strlcpy(f->description,formats[f->index].name,sizeof(f->description)); f->pixelformat = formats[f->index].fourcc; return 0; } static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p) { return videobuf_reqbufs(get_queue(file), p); } static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p) { return videobuf_querybuf(get_queue(file), p); } static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p) { return videobuf_qbuf(get_queue(file), p); } static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p) { return videobuf_dqbuf(get_queue(file), p, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct video_device *vdev = video_devdata(file); struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) || (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; if (unlikely(!res_get(dev, fh, get_resource(file)))) return -EBUSY; return videobuf_streamon(get_queue(file)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct video_device *vdev = video_devdata(file); struct cx8800_fh *fh = priv; struct cx8800_dev *dev = fh->dev; int err, res; if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) || (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE)) return -EINVAL; res = get_resource(file); err = videobuf_streamoff(get_queue(file)); if (err < 0) return err; res_free(dev,fh,res); return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; *tvnorm = core->tvnorm; return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; mutex_lock(&core->lock); cx88_set_tvnorm(core, tvnorms); mutex_unlock(&core->lock); return 0; } /* only one input in this sample driver */ int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i) { static const char * const iname[] = { [ CX88_VMUX_COMPOSITE1 ] = "Composite1", [ CX88_VMUX_COMPOSITE2 ] = "Composite2", [ CX88_VMUX_COMPOSITE3 ] = "Composite3", [ CX88_VMUX_COMPOSITE4 ] = "Composite4", [ CX88_VMUX_SVIDEO ] = "S-Video", [ CX88_VMUX_TELEVISION ] = "Television", [ CX88_VMUX_CABLE ] = "Cable TV", [ CX88_VMUX_DVB ] = "DVB", [ CX88_VMUX_DEBUG ] = "for debug only", }; unsigned int n = i->index; if (n >= 4) return -EINVAL; if (0 == INPUT(n).type) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name,iname[INPUT(n).type]); if ((CX88_VMUX_TELEVISION == INPUT(n).type) || (CX88_VMUX_CABLE == INPUT(n).type)) { i->type = V4L2_INPUT_TYPE_TUNER; } i->std = CX88_NORMS; return 0; } EXPORT_SYMBOL(cx88_enum_input); static int vidioc_enum_input (struct file *file, void *priv, struct v4l2_input *i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; return cx88_enum_input (core,i); } static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; *i = core->input; return 0; } static int vidioc_s_input (struct file *file, void *priv, unsigned int i) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (i >= 4) return -EINVAL; if (0 == INPUT(i).type) return -EINVAL; mutex_lock(&core->lock); cx88_newstation(core); cx88_video_mux(core,i); mutex_unlock(&core->lock); return 0; } static int vidioc_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; u32 reg; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; call_all(core, tuner, g_tuner, t); cx88_get_stereo(core ,t); reg = cx_read(MO_DEVICE_STATUS); t->signal = (reg & (1<<5)) ? 0xffff : 0x0000; return 0; } static int vidioc_s_tuner (struct file *file, void *priv, const struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (UNSET == core->board.tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; cx88_set_stereo(core, t->audmode, 1); return 0; } static int vidioc_g_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8800_fh *fh = priv; struct cx88_core *core = fh->dev->core; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (f->tuner) return -EINVAL; f->frequency = core->freq; call_all(core, tuner, g_frequency, f); return 0; } int cx88_set_freq (struct cx88_core *core, const struct v4l2_frequency *f) { struct v4l2_frequency new_freq = *f; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; mutex_lock(&core->lock); cx88_newstation(core); call_all(core, tuner, s_frequency, f); call_all(core, tuner, g_frequency, &new_freq); core->freq = new_freq.frequency; /* When changing channels it is required to reset TVAUDIO */ msleep (10); cx88_set_tvaudio(core); mutex_unlock(&core->lock); return 0; } EXPORT_SYMBOL(cx88_set_freq); static int vidioc_s_frequency (struct file *file, void *priv, const struct v4l2_frequency *f) { struct cx8800_fh *fh = priv; struct cx88_core *core = fh->dev->core; return cx88_set_freq(core, f); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register (struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; /* cx2388x has a 24-bit register space */ reg->val = cx_read(reg->reg & 0xfffffc); reg->size = 4; return 0; } static int vidioc_s_register (struct file *file, void *fh, const struct v4l2_dbg_register *reg) { struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; cx_write(reg->reg & 0xfffffc, reg->val); return 0; } #endif /* ----------------------------------------------------------- */ /* RADIO ESPECIFIC IOCTLS */ /* ----------------------------------------------------------- */ static int radio_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (unlikely(t->index > 0)) return -EINVAL; strcpy(t->name, "Radio"); call_all(core, tuner, g_tuner, t); return 0; } static int radio_s_tuner (struct file *file, void *priv, const struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; if (0 != t->index) return -EINVAL; call_all(core, tuner, s_tuner, t); return 0; } /* ----------------------------------------------------------- */ static void cx8800_vid_timeout(unsigned long data) { struct cx8800_dev *dev = (struct cx8800_dev*)data; struct cx88_core *core = dev->core; struct cx88_dmaqueue *q = &dev->vidq; struct cx88_buffer *buf; unsigned long flags; cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]); cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); spin_lock_irqsave(&dev->slock,flags); while (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", core->name, buf, buf->vb.i, (unsigned long)buf->risc.dma); } restart_video_queue(dev,q); spin_unlock_irqrestore(&dev->slock,flags); } static const char *cx88_vid_irqs[32] = { "y_risci1", "u_risci1", "v_risci1", "vbi_risc1", "y_risci2", "u_risci2", "v_risci2", "vbi_risc2", "y_oflow", "u_oflow", "v_oflow", "vbi_oflow", "y_sync", "u_sync", "v_sync", "vbi_sync", "opc_err", "par_err", "rip_err", "pci_abort", }; static void cx8800_vid_irq(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; u32 status, mask, count; status = cx_read(MO_VID_INTSTAT); mask = cx_read(MO_VID_INTMSK); if (0 == (status & mask)) return; cx_write(MO_VID_INTSTAT, status); if (irq_debug || (status & mask & ~0xff)) cx88_print_irqbits(core->name, "irq vid", cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs), status, mask); /* risc op code error */ if (status & (1 << 16)) { printk(KERN_WARNING "%s/0: video risc op code error\n",core->name); cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]); } /* risc1 y */ if (status & 0x01) { spin_lock(&dev->slock); count = cx_read(MO_VIDY_GPCNT); cx88_wakeup(core, &dev->vidq, count); spin_unlock(&dev->slock); } /* risc1 vbi */ if (status & 0x08) { spin_lock(&dev->slock); count = cx_read(MO_VBI_GPCNT); cx88_wakeup(core, &dev->vbiq, count); spin_unlock(&dev->slock); } /* risc2 y */ if (status & 0x10) { dprintk(2,"stopper video\n"); spin_lock(&dev->slock); restart_video_queue(dev,&dev->vidq); spin_unlock(&dev->slock); } /* risc2 vbi */ if (status & 0x80) { dprintk(2,"stopper vbi\n"); spin_lock(&dev->slock); cx8800_restart_vbi_queue(dev,&dev->vbiq); spin_unlock(&dev->slock); } } static irqreturn_t cx8800_irq(int irq, void *dev_id) { struct cx8800_dev *dev = dev_id; struct cx88_core *core = dev->core; u32 status; int loop, handled = 0; for (loop = 0; loop < 10; loop++) { status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | PCI_INT_VIDINT); if (0 == status) goto out; cx_write(MO_PCI_INTSTAT, status); handled = 1; if (status & core->pci_irqmask) cx88_core_irq(core,status); if (status & PCI_INT_VIDINT) cx8800_vid_irq(dev); } if (10 == loop) { printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n", core->name); cx_write(MO_PCI_INTMSK,0); } out: return IRQ_RETVAL(handled); } /* ----------------------------------------------------------- */ /* exported stuff */ static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .read = video_read, .poll = video_poll, .mmap = video_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_video_template = { .name = "cx8800-video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, .tvnorms = CX88_NORMS, }; static const struct v4l2_ioctl_ops vbi_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_vbi_template = { .name = "cx8800-vbi", .fops = &video_fops, .ioctl_ops = &vbi_ioctl_ops, .tvnorms = CX88_NORMS, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = video_open, .poll = v4l2_ctrl_poll, .release = video_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_s_tuner = radio_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_radio_template = { .name = "cx8800-radio", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = { .s_ctrl = cx8800_s_vid_ctrl, }; static const struct v4l2_ctrl_ops cx8800_ctrl_aud_ops = { .s_ctrl = cx8800_s_aud_ctrl, }; /* ----------------------------------------------------------- */ static void cx8800_unregister_video(struct cx8800_dev *dev) { if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } if (dev->vbi_dev) { if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; } if (dev->video_dev) { if (video_is_registered(dev->video_dev)) video_unregister_device(dev->video_dev); else video_device_release(dev->video_dev); dev->video_dev = NULL; } } static int cx8800_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct cx8800_dev *dev; struct cx88_core *core; int err; int i; dev = kzalloc(sizeof(*dev),GFP_KERNEL); if (NULL == dev) return -ENOMEM; /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail_free; } core = cx88_core_get(dev->pci); if (NULL == core) { err = -EINVAL; goto fail_free; } dev->core = core; /* print pci info */ dev->pci_rev = pci_dev->revision; pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", core->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); pci_set_master(pci_dev); if (!pci_dma_supported(pci_dev,DMA_BIT_MASK(32))) { printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); err = -EIO; goto fail_core; } /* initialize driver struct */ spin_lock_init(&dev->slock); core->tvnorm = V4L2_STD_NTSC_M; /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); dev->vidq.timeout.function = cx8800_vid_timeout; dev->vidq.timeout.data = (unsigned long)dev; init_timer(&dev->vidq.timeout); cx88_risc_stopper(dev->pci,&dev->vidq.stopper, MO_VID_DMACNTRL,0x11,0x00); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbiq.active); INIT_LIST_HEAD(&dev->vbiq.queued); dev->vbiq.timeout.function = cx8800_vbi_timeout; dev->vbiq.timeout.data = (unsigned long)dev; init_timer(&dev->vbiq.timeout); cx88_risc_stopper(dev->pci,&dev->vbiq.stopper, MO_VID_DMACNTRL,0x88,0x00); /* get irq */ err = request_irq(pci_dev->irq, cx8800_irq, IRQF_SHARED, core->name, dev); if (err < 0) { printk(KERN_ERR "%s/0: can't get IRQ %d\n", core->name,pci_dev->irq); goto fail_core; } cx_set(MO_PCI_INTMSK, core->pci_irqmask); for (i = 0; i < CX8800_AUD_CTLS; i++) { const struct cx88_ctrl *cc = &cx8800_aud_ctls[i]; struct v4l2_ctrl *vc; vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops, cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value); if (vc == NULL) { err = core->audio_hdl.error; goto fail_core; } vc->priv = (void *)cc; } for (i = 0; i < CX8800_VID_CTLS; i++) { const struct cx88_ctrl *cc = &cx8800_vid_ctls[i]; struct v4l2_ctrl *vc; vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops, cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value); if (vc == NULL) { err = core->video_hdl.error; goto fail_core; } vc->priv = (void *)cc; if (vc->id == V4L2_CID_CHROMA_AGC) core->chroma_agc = vc; } v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL); /* load and configure helper modules */ if (core->board.audio_chip == CX88_AUDIO_WM8775) { struct i2c_board_info wm8775_info = { .type = "wm8775", .addr = 0x36 >> 1, .platform_data = &core->wm8775_data, }; struct v4l2_subdev *sd; if (core->boardnr == CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1) core->wm8775_data.is_nova_s = true; else core->wm8775_data.is_nova_s = false; sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap, &wm8775_info, NULL); if (sd != NULL) { core->sd_wm8775 = sd; sd->grp_id = WM8775_GID; } } if (core->board.audio_chip == CX88_AUDIO_TVAUDIO) { /* This probes for a tda9874 as is used on some Pixelview Ultra boards. */ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD: case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: { static const struct i2c_board_info rtc_info = { I2C_BOARD_INFO("isl1208", 0x6f) }; request_module("rtc-isl1208"); core->i2c_rtc = i2c_new_device(&core->i2c_adap, &rtc_info); } /* break intentionally omitted */ case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: request_module("ir-kbd-i2c"); } /* Sets device info at pci_dev */ pci_set_drvdata(pci_dev, dev); dev->width = 320; dev->height = 240; dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24); /* initial device configuration */ mutex_lock(&core->lock); cx88_set_tvnorm(core, core->tvnorm); v4l2_ctrl_handler_setup(&core->video_hdl); v4l2_ctrl_handler_setup(&core->audio_hdl); cx88_video_mux(core, 0); /* register v4l devices */ dev->video_dev = cx88_vdev_init(core,dev->pci, &cx8800_video_template,"video"); video_set_drvdata(dev->video_dev, dev); dev->video_dev->ctrl_handler = &core->video_hdl; err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, video_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register video device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s [v4l2]\n", core->name, video_device_node_name(dev->video_dev)); dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); video_set_drvdata(dev->vbi_dev, dev); err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, vbi_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register vbi device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s\n", core->name, video_device_node_name(dev->vbi_dev)); if (core->board.radio.type == CX88_RADIO) { dev->radio_dev = cx88_vdev_init(core,dev->pci, &cx8800_radio_template,"radio"); video_set_drvdata(dev->radio_dev, dev); dev->radio_dev->ctrl_handler = &core->audio_hdl; err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO, radio_nr[core->nr]); if (err < 0) { printk(KERN_ERR "%s/0: can't register radio device\n", core->name); goto fail_unreg; } printk(KERN_INFO "%s/0: registered device %s\n", core->name, video_device_node_name(dev->radio_dev)); } /* start tvaudio thread */ if (core->board.tuner_type != TUNER_ABSENT) { core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio"); if (IS_ERR(core->kthread)) { err = PTR_ERR(core->kthread); printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n", core->name, err); } } mutex_unlock(&core->lock); return 0; fail_unreg: cx8800_unregister_video(dev); free_irq(pci_dev->irq, dev); mutex_unlock(&core->lock); fail_core: cx88_core_put(core,dev->pci); fail_free: kfree(dev); return err; } static void cx8800_finidev(struct pci_dev *pci_dev) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; /* stop thread */ if (core->kthread) { kthread_stop(core->kthread); core->kthread = NULL; } if (core->ir) cx88_ir_stop(core); cx88_shutdown(core); /* FIXME */ pci_disable_device(pci_dev); /* unregister stuff */ free_irq(pci_dev->irq, dev); cx8800_unregister_video(dev); /* free memory */ btcx_riscmem_free(dev->pci,&dev->vidq.stopper); cx88_core_put(core,dev->pci); kfree(dev); } #ifdef CONFIG_PM static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; unsigned long flags; /* stop video+vbi capture */ spin_lock_irqsave(&dev->slock, flags); if (!list_empty(&dev->vidq.active)) { printk("%s/0: suspend video\n", core->name); stop_video_dma(dev); del_timer(&dev->vidq.timeout); } if (!list_empty(&dev->vbiq.active)) { printk("%s/0: suspend vbi\n", core->name); cx8800_stop_vbi_dma(dev); del_timer(&dev->vbiq.timeout); } spin_unlock_irqrestore(&dev->slock, flags); if (core->ir) cx88_ir_stop(core); /* FIXME -- shutdown device */ cx88_shutdown(core); pci_save_state(pci_dev); if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) { pci_disable_device(pci_dev); dev->state.disabled = 1; } return 0; } static int cx8800_resume(struct pci_dev *pci_dev) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; unsigned long flags; int err; if (dev->state.disabled) { err=pci_enable_device(pci_dev); if (err) { printk(KERN_ERR "%s/0: can't enable device\n", core->name); return err; } dev->state.disabled = 0; } err= pci_set_power_state(pci_dev, PCI_D0); if (err) { printk(KERN_ERR "%s/0: can't set power state\n", core->name); pci_disable_device(pci_dev); dev->state.disabled = 1; return err; } pci_restore_state(pci_dev); /* FIXME: re-initialize hardware */ cx88_reset(core); if (core->ir) cx88_ir_start(core); cx_set(MO_PCI_INTMSK, core->pci_irqmask); /* restart video+vbi capture */ spin_lock_irqsave(&dev->slock, flags); if (!list_empty(&dev->vidq.active)) { printk("%s/0: resume video\n", core->name); restart_video_queue(dev,&dev->vidq); } if (!list_empty(&dev->vbiq.active)) { printk("%s/0: resume vbi\n", core->name); cx8800_restart_vbi_queue(dev,&dev->vbiq); } spin_unlock_irqrestore(&dev->slock, flags); return 0; } #endif /* ----------------------------------------------------------- */ static const struct pci_device_id cx8800_pci_tbl[] = { { .vendor = 0x14f1, .device = 0x8800, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, },{ /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, cx8800_pci_tbl); static struct pci_driver cx8800_pci_driver = { .name = "cx8800", .id_table = cx8800_pci_tbl, .probe = cx8800_initdev, .remove = cx8800_finidev, #ifdef CONFIG_PM .suspend = cx8800_suspend, .resume = cx8800_resume, #endif }; module_pci_driver(cx8800_pci_driver);
gpl-2.0
synapse-wireless/linux-at91
drivers/base/devcoredump.c
327
7161
/* * This file is provided under the GPLv2 license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * Author: Johannes Berg <johannes@sipsolutions.net> */ #include <linux/module.h> #include <linux/device.h> #include <linux/devcoredump.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/workqueue.h> /* if data isn't read by userspace after 5 minutes then delete it */ #define DEVCD_TIMEOUT (HZ * 60 * 5) struct devcd_entry { struct device devcd_dev; const void *data; size_t datalen; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, const void *data, size_t datalen); void (*free)(const void *data); struct delayed_work del_wk; struct device *failing_dev; }; static struct devcd_entry *dev_to_devcd(struct device *dev) { return container_of(dev, struct devcd_entry, devcd_dev); } static void devcd_dev_release(struct device *dev) { struct devcd_entry *devcd = dev_to_devcd(dev); devcd->free(devcd->data); module_put(devcd->owner); /* * this seems racy, but I don't see a notifier or such on * a struct device to know when it goes away? */ if (devcd->failing_dev->kobj.sd) sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj, "devcoredump"); put_device(devcd->failing_dev); kfree(devcd); } static void devcd_del(struct work_struct *wk) { struct devcd_entry *devcd; devcd = container_of(wk, struct devcd_entry, del_wk.work); device_del(&devcd->devcd_dev); put_device(&devcd->devcd_dev); } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); } static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); mod_delayed_work(system_wq, &devcd->del_wk, 0); return count; } static struct bin_attribute devcd_attr_data = { .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = devcd_data_read, .write = devcd_data_write, }; static struct bin_attribute *devcd_dev_bin_attrs[] = { &devcd_attr_data, NULL, }; static const struct attribute_group devcd_dev_group = { .bin_attrs = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { &devcd_dev_group, NULL, }; static struct class devcd_class = { .name = "devcoredump", .owner = THIS_MODULE, .dev_release = devcd_dev_release, .dev_groups = devcd_dev_groups, }; static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, const void *data, size_t datalen) { if (offset > datalen) return -EINVAL; if (offset + count > datalen) count = datalen - offset; if (count) memcpy(buffer, ((u8 *)data) + offset, count); return count; } /** * dev_coredumpv - create device coredump with vmalloc data * @dev: the struct device for the crashed device * @data: vmalloc data containing the device coredump * @datalen: length of the data * @gfp: allocation flags * * This function takes ownership of the vmalloc'ed data and will free * it when it is no longer used. See dev_coredumpm() for more information. */ void dev_coredumpv(struct device *dev, const void *data, size_t datalen, gfp_t gfp) { dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, vfree); } EXPORT_SYMBOL_GPL(dev_coredumpv); static int devcd_match_failing(struct device *dev, const void *failing) { struct devcd_entry *devcd = dev_to_devcd(dev); return devcd->failing_dev == failing; } /** * dev_coredumpm - create device coredump with read/free methods * @dev: the struct device for the crashed device * @owner: the module that contains the read/free functions, use %THIS_MODULE * @data: data cookie for the @read/@free functions * @datalen: length of the data * @gfp: allocation flags * @read: function to read from the given buffer * @free: function to free the given buffer * * Creates a new device coredump for the given device. If a previous one hasn't * been read yet, the new coredump is discarded. The data lifetime is determined * by the device coredump framework and when it is no longer needed the @free * function will be called to free the data. */ void dev_coredumpm(struct device *dev, struct module *owner, const void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, const void *data, size_t datalen), void (*free)(const void *data)) { static atomic_t devcd_count = ATOMIC_INIT(0); struct devcd_entry *devcd; struct device *existing; existing = class_find_device(&devcd_class, NULL, dev, devcd_match_failing); if (existing) { put_device(existing); goto free; } if (!try_module_get(owner)) goto free; devcd = kzalloc(sizeof(*devcd), gfp); if (!devcd) goto put_module; devcd->owner = owner; devcd->data = data; devcd->datalen = datalen; devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); device_initialize(&devcd->devcd_dev); dev_set_name(&devcd->devcd_dev, "devcd%d", atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; if (device_add(&devcd->devcd_dev)) goto put_device; if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, "failing_device")) /* nothing - symlink will be missing */; if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, "devcoredump")) /* nothing - symlink will be missing */; INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); return; put_device: put_device(&devcd->devcd_dev); put_module: module_put(owner); free: free(data); } EXPORT_SYMBOL_GPL(dev_coredumpm); static int __init devcoredump_init(void) { return class_register(&devcd_class); } __initcall(devcoredump_init); static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); flush_delayed_work(&devcd->del_wk); return 0; } static void __exit devcoredump_exit(void) { class_for_each_device(&devcd_class, NULL, NULL, devcd_free); class_unregister(&devcd_class); } __exitcall(devcoredump_exit);
gpl-2.0
rettigs/linux-yocto-3.14
drivers/scsi/bfa/bfa_fcs.c
583
41916
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_fcs.c BFA FCS main */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, FCS); /* * FCS sub-modules */ struct bfa_fcs_mod_s { void (*attach) (struct bfa_fcs_s *fcs); void (*modinit) (struct bfa_fcs_s *fcs); void (*modexit) (struct bfa_fcs_s *fcs); }; #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { { bfa_fcs_port_attach, NULL, NULL }, { bfa_fcs_uf_attach, NULL, NULL }, { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, bfa_fcs_fabric_modexit }, }; /* * fcs_api BFA FCS API */ static void bfa_fcs_exit_comp(void *fcs_cbarg) { struct bfa_fcs_s *fcs = fcs_cbarg; struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } /* * fcs_api BFA FCS API */ /* * fcs attach -- called once to initialize data structures at driver attach time */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { int i; struct bfa_fcs_mod_s *mod; fcs->bfa = bfa; fcs->bfad = bfad; fcs->min_cfg = min_cfg; fcs->num_rport_logins = 0; bfa->fcs = BFA_TRUE; fcbuild_init(); for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->attach) mod->attach(fcs); } } /* * fcs initialization, called once after bfa initialization is complete */ void bfa_fcs_init(struct bfa_fcs_s *fcs) { int i; struct bfa_fcs_mod_s *mod; for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); } } /* * FCS update cfg - reset the pwwn/nwwn of fabric base logical port * with values learned during bfa_init firmware GETATTR REQ. */ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric = &fcs->fabric; struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc; port_cfg->nwwn = ioc->attr->nwwn; port_cfg->pwwn = ioc->attr->pwwn; } /* * Stop FCS operations. */ void bfa_fcs_stop(struct bfa_fcs_s *fcs) { bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); bfa_wc_up(&fcs->wc); bfa_fcs_fabric_modstop(fcs); bfa_wc_wait(&fcs->wc); } /* * fcs pbc vport initialization */ void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs) { int i, npbc_vports; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } /* * brief * FCS driver details initialization. * * param[in] fcs FCS instance * param[in] driver_info Driver Details * * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info) { fcs->driver_info = *driver_info; bfa_fcs_fabric_psymb_init(&fcs->fabric); bfa_fcs_fabric_nsymb_init(&fcs->fabric); } /* * brief * FCS instance cleanup and exit. * * param[in] fcs FCS instance * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { struct bfa_fcs_mod_s *mod; int nmods, i; bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); for (i = 0; i < nmods; i++) { mod = &fcs_modules[i]; if (mod->modexit) { bfa_wc_up(&fcs->wc); mod->modexit(fcs); } } bfa_wc_wait(&fcs->wc); } /* * Fabric module implementation. */ #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PORT_TOPOLOGY_P2P) { \ if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \ } else \ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* * forward declarations */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delay(void *cbarg); static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delete_comp(void *cbarg); static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_stop_comp(void *cbarg); static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); break; } if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { fabric->fab_type = BFA_FCS_FABRIC_LOOP; fabric->bport.pid = bfa_fcport_get_myalpa(bfa); fabric->bport.pid = bfa_hton3b(fabric->bport.pid); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_set_opertype(fabric); bfa_fcs_lport_online(&fabric->bport); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; } fabric->fab_type = BFA_FCS_FABRIC_LOOP; fabric->bport.pid = bfa_fcport_get_myalpa(bfa); fabric->bport.pid = bfa_hton3b(fabric->bport.pid); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_set_opertype(fabric); bfa_fcs_lport_online(&fabric->bport); break; case BFA_FCS_FABRIC_SM_RETRY_OP: case BFA_FCS_FABRIC_SM_LOOPBACK: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_STOP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); bfa_fcs_fabric_stop(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * FLOGI is in progress, awaiting FLOGI reply. */ static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); bfa_trc(fabric->fcs, event); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); } break; case BFA_FCS_FABRIC_SM_RETRY_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, bfa_fcs_fabric_delay, fabric, BFA_FCS_FABRIC_RETRY_DELAY); break; case BFA_FCS_FABRIC_SM_LOOPBACK: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_set_opertype(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELAYED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_timer_stop(&fabric->delay_timer); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_timer_stop(&fabric->delay_timer); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication is in progress, awaiting authentication results. */ static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); break; case BFA_FCS_FABRIC_SM_PERF_EVFP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication failed */ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Port is in loopback mode. */ void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; case BFA_FCS_FABRIC_SM_RETRY_OP: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is online - normal operating state. */ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { bfa_fcs_lport_offline(&fabric->bport); } else { bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); } break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_STOP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping); bfa_fcs_fabric_stop(fabric); break; case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Exchanging virtual fabric parameters. */ static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); break; case BFA_FCS_FABRIC_SM_ISOLATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * EVFP exchange complete and VFT tagging is enabled. */ static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); } /* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Port is isolated due to VF_ID mismatch. " "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", pwwn_ptr, fabric->fcs->port_vfid, fabric->event_arg.swp_vfid); } /* * Fabric is being deleted, awaiting vport delete completions. */ static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_fcs_fabric_notify_offline(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is being stopped, awaiting vport stop completions. */ static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_STOPCOMP: if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); } break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); else bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is being stopped, cleanup without FLOGO */ static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_STOPCOMP: case BFA_FCS_FABRIC_SM_LOGOCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_wc_down(&(fabric->fcs)->wc); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: /* * Ignore - can get this event if we get notified about IOC down * before the fabric completion callbk is done. */ break; default: bfa_sm_fault(fabric->fcs, event); } } /* * fcs_fabric_private fabric private functions */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; } /* * Port Symbolic Name Creation for base port. */ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strncpy((char *)&port_cfg->sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Driver Version */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, BFA_FCS_PORT_SYMBNAME_VERSION_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Host machine name */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_machine_name, BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* * Host OS Info : * If OS Patch Info is not there, do not truncate any bytes from the * OS name string and instead copy the entire OS info string (64 bytes). */ if (driver_info->host_os_patch[0] == '\0') { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); } else { strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_name, BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Append host OS Patch Info */ strncat((char *)&port_cfg->sym_name, (char *)driver_info->host_os_patch, BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); } /* null terminate */ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * Node Symbolic Name Creation for base port and all vports */ void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strncpy((char *)&port_cfg->node_sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->node_sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Driver Version */ strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version, BFA_FCS_PORT_SYMBNAME_VERSION_SZ); strncat((char *)&port_cfg->node_sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* Host machine name */ strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->host_machine_name, BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); strncat((char *)&port_cfg->node_sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); /* null terminate */ port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * bfa lps login completion callback */ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, status); switch (status) { case BFA_STATUS_OK: fabric->stats.flogi_accepts++; break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ fabric->stats.flogi_acc_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_EPROTOCOL: switch (fabric->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: fabric->stats.flogi_acc_err++; break; case BFA_EPROTO_UNKNOWN_RSP: fabric->stats.flogi_unknown_rsp++; break; default: break; } bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; default: fabric->stats.flogi_rsp_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; } fabric->bb_credit = fabric->lps->pr_bbcred; bfa_trc(fabric->fcs, fabric->bb_credit); if (!(fabric->lps->brcd_switch)) fabric->fabric_name = fabric->lps->pr_nwwn; /* * Check port type. It should be 1 = F-port. */ if (fabric->lps->fport) { fabric->bport.pid = fabric->lps->lp_pid; fabric->is_npiv = fabric->lps->npiv_en; fabric->is_auth = fabric->lps->auth_req; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); } else { /* * Nport-2-Nport direct attached */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } bfa_trc(fabric->fcs, fabric->bport.pid); bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } /* * Allocate and send FLOGI. */ static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_online(vport); } } static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_offline(vport); } bfa_fcs_lport_offline(&fabric->bport); fabric->fabric_name = 0; fabric->fabric_ip_addr[0] = 0; } static void bfa_fcs_fabric_delay(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } /* * Stop all vports and wait for vport stop completions. */ static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_wc_up(&fabric->stop_wc); bfa_fcs_vport_fcs_stop(vport); } bfa_wc_up(&fabric->stop_wc); bfa_fcs_lport_stop(&fabric->bport); bfa_wc_wait(&fabric->stop_wc); } /* * Delete all vports and wait for vport delete completions. */ static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_fcs_delete(vport); } bfa_fcs_lport_delete(&fabric->bport); bfa_wc_wait(&fabric->wc); } static void bfa_fcs_fabric_delete_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } static void bfa_fcs_fabric_stop_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP); } /* * fcs_fabric_public fabric public functions */ /* * Attach time initialization. */ void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; fabric = &fcs->fabric; memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); /* * Initialize base fabric. */ fabric->fcs = fcs; INIT_LIST_HEAD(&fabric->vport_q); INIT_LIST_HEAD(&fabric->vf_q); fabric->lps = bfa_lps_alloc(fcs->bfa); WARN_ON(!fabric->lps); /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) { bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } /* * Module cleanup */ void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); /* * Cleanup base fabric. */ fabric = &fcs->fabric; bfa_lps_delete(fabric->lps); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); } /* * Fabric module stop -- stop FCS actions */ void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP); } /* * Fabric module start -- kick starts FCS actions */ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } /* * Link up notification from BFA physical port module. */ void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } /* * Link down notification from BFA physical port module. */ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } /* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports * belonging to a fabric is maintained to propagate link events. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. * param[in] vport - Vport being created. * * @return None (always succeeds) */ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); list_add_tail(&vport->qe, &fabric->vport_q); fabric->num_vports++; bfa_wc_up(&fabric->wc); } /* * A child vport is being deleted from fabric. * * Vport is being deleted. */ void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { list_del(&vport->qe); fabric->num_vports--; bfa_wc_down(&fabric->wc); } /* * Lookup for a vport within a fabric given its pwwn */ struct bfa_fcs_vport_s * bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) { struct bfa_fcs_vport_s *vport; struct list_head *qe; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) return vport; } return NULL; } /* * Get OUI of the attached switch. * * Note : Use of this function should be avoided as much as possible. * This function should be used only if there is any requirement * to check for FOS version below 6.3. * To check if the attached fabric is a brocade fabric, use * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 * or above only. */ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) { wwn_t fab_nwwn; u8 *tmp; u16 oui; fab_nwwn = fabric->lps->pr_nwwn; tmp = (u8 *)&fab_nwwn; oui = (tmp[3] << 8) | tmp[4]; return oui; } /* * Unsolicited frame receive handling. */ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { u32 pid = fchs->d_id; struct bfa_fcs_vport_s *vport; struct list_head *qe; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. */ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && (els_cmd->els_code == FC_ELS_FLOGI) && (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); return; } /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { bfa_trc(fabric->fcs, pid); bfa_fcs_fabric_process_uf(fabric, fchs, len); return; } if (fabric->bport.pid == pid) { /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_AUTH) { bfa_trc(fabric->fcs, els_cmd->els_code); return; } bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); return; } /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == pid) { bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); return; } } if (!bfa_fcs_fabric_is_switched(fabric)) bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); bfa_trc(fabric->fcs, fchs->type); } /* * Unsolicited frames to be processed by fabric. */ static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(fabric->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_FLOGI: bfa_fcs_fabric_process_flogi(fabric, fchs, len); break; default: /* * need to generate a LS_RJT */ break; } } /* * Process incoming FLOGI */ static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); struct bfa_fcs_lport_s *bport = &fabric->bport; bfa_trc(fabric->fcs, fchs->s_id); fabric->stats.flogi_rcvd++; /* * Check port type. It should be 0 = n-port. */ if (flogi->csp.port_type) { /* * @todo: may need to send a LS_RJT */ bfa_trc(fabric->fcs, flogi->port_name); fabric->stats.flogi_rejected++; return; } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; /* * Send a Flogi Acc */ bfa_fcs_fabric_send_flogi_acc(fabric); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_fcxp_s *fcxp; u16 reqlen; struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE); /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) return; reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), bfa_fcport_get_rx_bbcredit(bfa), 0); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); } /* * Flogi Acc completion callback. */ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_trc(fabric->fcs, status); } /* * Send AEN notification */ static void bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, enum bfa_port_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port); aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port); /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, BFA_AEN_CAT_PORT, event); } /* * * @param[in] fabric - fabric * @param[in] wwn_t - new fabric name * * @return - none */ void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; char fwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric_name); if (fabric->fabric_name == 0) { /* * With BRCD switches, we don't get Fabric Name in FLOGI. * Don't generate a fabric name change event in this case. */ fabric->fabric_name = fabric_name; } else { fabric->fabric_name = fabric_name; wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); wwn2str(fwwn_ptr, bfa_fcs_lport_get_fabric_name(&fabric->bport)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); bfa_fcs_fabric_aen_post(&fabric->bport, BFA_PORT_AEN_FABRIC_NAME_CHANGE); } } void bfa_cb_lps_flogo_comp(void *bfad, void *uarg) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP); } /* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID * * return * If lookup succeeds, retuns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) return &fcs->fabric; return NULL; } /* * Return the list of local logical ports present in the given VF. * * @param[in] vf vf for which logical ports are returned * @param[out] lpwwn returned logical port wwn list * @param[in,out] nlports in:size of lpwwn list; * out:total elements present, * actual elements returned is limited by the size */ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) { struct list_head *qe; struct bfa_fcs_vport_s *vport; int i = 0; struct bfa_fcs_s *fcs; if (vf == NULL || lpwwn == NULL || *nlports == 0) return; fcs = vf->fcs; bfa_trc(fcs, vf->vf_id); bfa_trc(fcs, (uint32_t) *nlports); lpwwn[i++] = vf->bport.port_cfg.pwwn; list_for_each(qe, &vf->vport_q) { if (i >= *nlports) break; vport = (struct bfa_fcs_vport_s *) qe; lpwwn[i++] = vport->lport.port_cfg.pwwn; } bfa_trc(fcs, i); *nlports = i; } /* * BFA FCS PPORT ( physical port) */ static void bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) { struct bfa_fcs_s *fcs = cbarg; bfa_trc(fcs, event); switch (event) { case BFA_PORT_LINKUP: bfa_fcs_fabric_link_up(&fcs->fabric); break; case BFA_PORT_LINKDOWN: bfa_fcs_fabric_link_down(&fcs->fabric); break; default: WARN_ON(1); } } void bfa_fcs_port_attach(struct bfa_fcs_s *fcs) { bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); } /* * BFA FCS UF ( Unsolicited Frames) */ /* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler * @param[in] uf unsolicited frame descriptor * * @return None */ static void bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) { struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); u16 len = bfa_uf_get_frmlen(uf); struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && fchs->cat_info == FC_CAT_VFT_HDR) { bfa_stats(fcs, uf.tagged); vft = bfa_uf_get_frmbuf(uf); if (fcs->port_vfid == vft->vf_id) fabric = &fcs->fabric; else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); /* * drop frame if vfid is unknown */ if (!fabric) { WARN_ON(1); bfa_stats(fcs, uf.vfid_unknown); bfa_uf_free(uf); return; } /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); len -= sizeof(struct fc_vft_s); bfa_trc(fcs, vft->vf_id); } else { bfa_stats(fcs, uf.untagged); fabric = &fcs->fabric; } bfa_trc(fcs, ((u32 *) fchs)[0]); bfa_trc(fcs, ((u32 *) fchs)[1]); bfa_trc(fcs, ((u32 *) fchs)[2]); bfa_trc(fcs, ((u32 *) fchs)[3]); bfa_trc(fcs, ((u32 *) fchs)[4]); bfa_trc(fcs, ((u32 *) fchs)[5]); bfa_trc(fcs, len); bfa_fcs_fabric_uf_recv(fabric, fchs, len); bfa_uf_free(uf); } void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) { bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); }
gpl-2.0
kyasu/android_kernel_samsung_ks01lte
arch/arm/mvp/commkm/comm_transp_mvp.c
839
21446
/* * Linux 2.6.32 and later Kernel module for VMware MVP Guest Communications * * Copyright (C) 2010-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #line 5 /** * @file * * @brief Generic shared memory transport API. */ #include <linux/wait.h> #include "comm_os.h" #include "comm_transp_impl.h" #include "mvp_types.h" #include "qp.h" /* * Opaque CommTransp structure. See comm_transp.h */ struct CommTranspPriv { QPHandle *qp; CommTranspEvent event; unsigned int peerEvID; unsigned int writeSize; unsigned int readSize; uint32 backRef; CommOSWork work; CommOSAtomic raiseInline; }; /* * Transport table object accounting */ typedef struct TranspTableEntry { CommOSAtomic holds; CommTransp transp; CommOSWaitQueue wq; } TranspTableEntry; TranspTableEntry transpTable[QP_MAX_QUEUE_PAIRS]; static CommOSSpinlock_Define(transpTableLock); /** * @brief Destroy the transport object * @param transp transport object to destroy * @sideeffects detaches from queue pair */ static void DestroyTransp(CommTransp transp) { CommTranspID transpID; int32 rc; if (!transp) { CommOS_Debug(("Failed to close channel: Bad handle\n")); return; } CommOS_Log(("%s: Detaching channel [%u:%u]\n", __func__, transp->qp->id.context, transp->qp->id.resource)); transpID.d32[0] = transp->qp->id.context; transpID.d32[1] = transp->qp->id.resource; #if !defined(COMM_BUILDING_SERVER) /* * Tell the host to detach, will block in the host * until the host has unmapped memory. Once the * host has unmapped, it is safe to free. */ CommTranspEvent_Raise(transp->peerEvID, &transpID, COMM_TRANSP_IO_DETACH); #endif rc = QP_Detach(transp->qp); #if defined(COMM_BUILDING_SERVER) /* * Wake up waiters now that unmapping is complete */ CommOS_WakeUp(&transpTable[transp->backRef].wq); #endif CommOS_Kfree(transp); if (rc != QP_SUCCESS) CommOS_Log(("%s: Failed to detach. rc: %d\n", __func__, rc)); else CommOS_Log(("%s: Channel detached.\n", __func__)); } /** * @brief Initialize the transport object table */ static void TranspTableInit(void) { uint32 i; CommOS_SpinLock(&transpTableLock); for (i = 0; i < QP_MAX_QUEUE_PAIRS; i++) { CommOS_WriteAtomic(&transpTable[i].holds, -1); transpTable[i].transp = NULL; } CommOS_SpinUnlock(&transpTableLock); } /** * @brief Add a transport object into the table * @param transp handle to the transport object * @return 0 on success, -1 otherwise * @sideeffects increments entry refcount */ static inline int32 TranspTableAdd(CommTransp transp) { uint32 i; if (!transp) return -1; CommOS_SpinLock(&transpTableLock); for (i = 0; i < QP_MAX_QUEUE_PAIRS; i++) { if ((transpTable[i].transp) == NULL) { transpTable[i].transp = transp; CommOS_WriteAtomic(&transpTable[i].holds, 1); CommOS_WaitQueueInit(&transpTable[i].wq); transp->backRef = i; break; } } CommOS_SpinUnlock(&transpTableLock); return 0; } /** * @brief retrieve a transport object and increment its ref count * @param id transport id to retrieve * @return transport object, or NULL if not found * @sideeffects increments entry ref count */ static inline CommTransp TranspTableGet(CommTranspID *id) { CommTransp transp; uint32 i; if (!id) return NULL; for (i = 0; i < QP_MAX_QUEUE_PAIRS; i++) { transp = transpTable[i].transp; if (transp && (transp->qp->id.context == id->d32[0]) && (transp->qp->id.resource == id->d32[1])) { CommOS_AddReturnAtomic(&transpTable[i].holds, 1); return transp; } } CommOS_Debug(("%s: couldn't find transport object\n", __func__)); return NULL; } /** * @brief Puts back a previously TranspGet-ed transport object. * @param transp the transport object. * @sideeffects decrements the transport reference count. * frees object if refcount now zero */ static inline void TranspTablePut(CommTransp transp) { int32 holds; int32 backRef; if (!transp) return; backRef = transp->backRef; BUG_ON(backRef >= QP_MAX_QUEUE_PAIRS); holds = CommOS_SubReturnAtomic(&transpTable[backRef].holds, 1); if (holds > 0) return; BUG_ON(holds < 0); CommOS_SpinLock(&transpTableLock); CommOS_WriteAtomic(&transpTable[backRef].holds, -1); transpTable[backRef].transp = NULL; CommOS_SpinUnlock(&transpTableLock); DestroyTransp(transp); } /** * @brief Puts back a previously TranspGet-ed transport object. * @param transp the transport object. * @sideeffects decrements the transport reference count. * asserts that remaining count > 0 */ static inline void TranspTablePutNF(CommTransp transp) { int32 holds; int32 backRef; if (!transp) return; backRef = transp->backRef; BUG_ON(backRef >= QP_MAX_QUEUE_PAIRS); holds = CommOS_SubReturnAtomic(&transpTable[backRef].holds, 1); BUG_ON(holds <= 0); } /** * @brief Raises INOUT event in-line or out-of-band. Note that this function * expects the transport object to be held prior to being called. * @param arg work item of transport object. */ static void RaiseEvent(CommOSWork *arg) { #if !defined(__linux__) #error "RaiseEvent() is only supported on linux. Port 'container_of'!" #endif CommTransp transp = container_of(arg, struct CommTranspPriv, work); CommTranspID transpID = { { .d32 = { [0] = transp->qp->id.context, [1] = transp->qp->id.resource } } }; CommTranspEvent_Raise(transp->peerEvID, &transpID, COMM_TRANSP_IO_INOUT); TranspTablePut(transp); } /** * @brief Requests events be posted in-line after the function completes. * @param transp transport object. * @return current number of requests for inline event posting. * @sideeffects posts an event on the first transition to in-line processing. */ unsigned int CommTransp_RequestInlineEvents(CommTransp transp) { unsigned int res = CommOS_AddReturnAtomic(&transp->raiseInline, 1); if (res == 1) { /* * On the first (effective) transition, make sure an * event is raised. */ CommOS_AddReturnAtomic(&transpTable[transp->backRef].holds, 1); RaiseEvent(&transp->work); } return res; } /** * @brief Requests events be posted out-of-band after the function completes. * @param transp transport object. * @return current number of requests for inline event posting. */ unsigned int CommTransp_ReleaseInlineEvents(CommTransp transp) { return CommOS_SubReturnAtomic(&transp->raiseInline, 1); } /* * Comm Offload server callbacks. */ #if defined(COMM_BUILDING_SERVER) #define COMM_MAX_LISTENERS QP_MAX_LISTENERS static int32 NotifyCB(const QPInitArgs *args); static void DetachCB(void *data); static CommOSSpinlock_Define(listenersLock); static CommTranspListener listeners[COMM_MAX_LISTENERS]; static uint32 numListeners; /** * @brief Notify callback when guests attach to queue pairs. Notifies any * registered listeners (e.g. Comm layer). * @param args Initialization arguments used by the guest to initialize * its queue pair * @return 0 on success, <0 otherwise. see qp.h for error codes. */ static int32 NotifyCB(const QPInitArgs *args) { CommTranspInitArgs transpArgs; uint32 i; int32 rc = -1; if (!args) return QP_ERROR_INVALID_ARGS; transpArgs.id.d32[0] = args->id.context; transpArgs.id.d32[1] = args->id.resource; transpArgs.capacity = args->capacity; transpArgs.type = args->type; CommOS_SpinLock(&listenersLock); for (i = 0; i < COMM_MAX_LISTENERS; i++) { if (listeners[i].probe && (listeners[i].probe(&transpArgs, listeners[i].probeData) == 0)) { CommOS_Debug(("%s: Delivered notify event to " \ "listener %u\n", __func__, i)); rc = 0; break; } } CommOS_SpinUnlock(&listenersLock); return rc; } /** * @brief Detach callback when guests detach from queue pairs. Notifies * any registered listeners (e.g. CommComm layer). * @param data Transport object passed when the callback was registered */ static void DetachCB(void *data) { CommTransp transp = data; if (!transp || !(transp->event.ioEvent)) return; CommOS_Debug(("%s: Guest detached from [%u:%u]\n", __func__, transp->qp->id.context, transp->qp->id.resource)); transp->event.ioEvent(transp, COMM_TRANSP_IO_DETACH, transp->event.ioEventData); } #endif /** * @brief Performs one-time initialization of mvp transport provider. * @return 0 on success, < 0 otherwise. */ int CommTransp_Init(void) { int32 rc; TranspTableInit(); rc = CommTranspEvent_Init(); #if defined(COMM_BUILDING_SERVER) if (!rc) QP_RegisterListener(NotifyCB); #endif return rc; } /** * @brief Performs clean-up of mvp transport provider. */ void CommTransp_Exit(void) { CommTranspEvent_Exit(); #if defined(COMM_BUILDING_SERVER) QP_UnregisterListener(NotifyCB); #endif } #if defined(COMM_BUILDING_SERVER) /** * @brief Checks for a successful detach from Comm * @param arg1 back reference index for channel in transport table * @param arg2 ignored * @return 1 if detach completed, 0 otherwise */ static int DetachCondition(void *arg1, void *arg2) { uint32 backRef = (uint32)arg1; return (CommOS_ReadAtomic(&transpTable[backRef].holds) == -1); } #endif /** * @brief Processes a raised signal event. This is a callback function called * from a comm_transp_ev plugin when a signal is received. Delivers an event * to one or more channels. If id->d32[1] == COMM_TRANSP_ID_32_ANY, the event * will be delivered to all registered channels associated with vmID * id->d32[0]. * @param id identifies a transport object to signal. * @param event type of event. * @return 0 if delivered to at least one channel, -1 on failure. */ int CommTranspEvent_Process(CommTranspID *id, CommTranspIOEvent event) { int rc = 0; unsigned int delivered = 0; unsigned int backRef; int i = 0; CommTransp transp; uint32 raiseOnAllChannels = (id->d32[1] == COMM_TRANSP_ID_32_ANY); uint32 channels = raiseOnAllChannels ? QP_MAX_QUEUE_PAIRS : 1; while (channels--) { if (raiseOnAllChannels) id->d32[1] = i++; transp = TranspTableGet(id); if (transp) { if (transp->event.ioEvent) transp->event.ioEvent(transp, event, transp->event.ioEventData); backRef = transp->backRef; TranspTablePut(transp); #if defined(COMM_BUILDING_SERVER) /* * Wait for unmap on IO_DETACH, return to monitor. */ if (event == COMM_TRANSP_IO_DETACH) { unsigned long long timeout = 30000; rc = CommOS_Wait(&transpTable[backRef].wq, DetachCondition, (void *)backRef, NULL, &timeout); switch (rc) { case 1: /* Memory successfully unmapped */ rc = 0; break; default: /* Timed out or other error. */ return -1; } } #endif delivered++; } } rc = (delivered > 0) ? 0 : -1; return rc; } /** * @brief Register a listener to be notified when guests attach to the Comm * offload server * @param listener the listener to be notified * @return 0 on success, -1 on failure */ int CommTransp_Register(const CommTranspListener *listener) { int32 rc = -1; #if defined(COMM_BUILDING_SERVER) uint32 i; if (!listener) return -1; CommOS_SpinLock(&listenersLock); for (i = 0; i < COMM_MAX_LISTENERS; i++) { if ((listeners[i].probe == NULL) && (listeners[i].probeData == NULL)) { listeners[i] = *listener; numListeners++; rc = 0; CommOS_Debug(("%s: Registered listener %u\n", __func__, i)); break; } } CommOS_SpinUnlock(&listenersLock); #endif return rc; } /** * @brief Unregisters a listener from the transport event notification system * @param listener listener to unregister * @return 0 on success */ void CommTransp_Unregister(const CommTranspListener *listener) { #if defined(COMM_BUILDING_SERVER) uint32 i; if (!listener || !listener->probe) return; CommOS_SpinLock(&listenersLock); for (i = 0; i < COMM_MAX_LISTENERS; i++) { if ((listeners[i].probe == listener->probe) && (listeners[i].probeData == listener->probeData)) { listeners[i].probe = NULL; listeners[i].probeData = NULL; numListeners--; CommOS_Debug(("%s: Unregistered listener %u\n", __func__, i)); } } CommOS_SpinUnlock(&listenersLock); #endif } /** * @brief Allocates and initializes a transport object * @param[in,out] transp handle to the transport to allocate and initialize * @param transpArgs initialization arguments (see pvtcpTransp.h) * @param transpEvent event callback to be delivered when events occur (e.g. * detach events) * @return 0 on success, <0 otherwise. See qp.h for error codes. * @sideeffects Allocates memory */ int CommTransp_Open(CommTransp *transp, CommTranspInitArgs *transpArgs, CommTranspEvent *transpEvent) { int32 rc = -1; QPHandle *qp = NULL; CommTransp transpOut = NULL; QPInitArgs qpInitArgs; if (!transp || !transpArgs) return -1; CommOS_Log(("%s: Attaching to [%u:%u]. Capacity: %u\n", __func__, transpArgs->id.d32[1], transpArgs->id.d32[0], transpArgs->capacity)); qpInitArgs.id.context = transpArgs->id.d32[0]; qpInitArgs.id.resource = transpArgs->id.d32[1]; qpInitArgs.capacity = transpArgs->capacity; qpInitArgs.type = transpArgs->type; transpOut = CommOS_Kmalloc(sizeof(*transpOut)); if (!transpOut) { rc = -1; goto out; } /* * Attach to the queue pair. */ rc = QP_Attach(&qpInitArgs, &qp); if (rc < 0) { rc = -1; goto out; } transpOut->qp = qp; /* * Reassign ID so Comm knows what ID was actually given */ transpArgs->id.d32[0] = qp->id.context; transpArgs->id.d32[1] = qp->id.resource; if (transpEvent) { transpOut->event = *transpEvent; } else { transpOut->event.ioEvent = NULL; transpOut->event.ioEventData = NULL; } #if defined(COMM_BUILDING_SERVER) CommOS_Debug(("%s: Registering detach CB on id %u...\n", __func__, transpArgs->id.d32[1])); QP_RegisterDetachCB(transpOut->qp, DetachCB, transpOut); #endif transpOut->peerEvID = COMM_TRANSP_ID_32_ANY; transpOut->writeSize = 0; transpOut->readSize = 0; CommOS_InitWork(&transpOut->work, RaiseEvent); CommOS_WriteAtomic(&transpOut->raiseInline, 0); if (TranspTableAdd(transpOut)) { CommOS_Log(("%s: Exceeded max limit of transport objects!\n", __func__)); DestroyTransp(transpOut); rc = -1; goto out; } *transp = transpOut; rc = 0; CommOS_Log(("%s: Channel attached.\n", __func__)); out: if (rc && transpOut) { CommOS_Log(("%s: Failed to attach: %d\n", __func__, rc)); CommOS_Kfree(transpOut); } return rc; } /** * @brief Tear down the transport channel, destroy the object if the refcount * drops to zero * @param transp handle to the transport channel * @sideeffects decrements the entry's refcount */ void CommTransp_Close(CommTransp transp) { if (!transp) return; CommOS_FlushAIOWork(&transp->work); TranspTablePut(transp); } /** * @brief Returns available space for enqueue, in bytes * @param transp handle to the transport object * @return available space in the queue for enqueue operations, <0 * on error conditions. see qp.h for error codes. */ int CommTransp_EnqueueSpace(CommTransp transp) { if (!transp) return -1; return QP_EnqueueSpace(transp->qp); } /** * @brief Discards any pending enqueues * @param transp handle to the transport object * @return 0 on success, <0 otherwise. see qp.h for error codes */ int CommTransp_EnqueueReset(CommTransp transp) { if (!transp) return -1; transp->writeSize = 0; return QP_EnqueueReset(transp->qp); } /** * @brief Enqueues a segment of data into the transport object * @param transp handle to the transport object * @param buf data to enqueue * @param bufLen number of bytes to enqueue * @param kern != 0 if copying kernel data * @return number of bytes enqueued on success, <0 otherwise. see qp.h * for error codes */ int CommTransp_EnqueueSegment(CommTransp transp, const void *buf, unsigned int bufLen, int kern) { int rc; if (!transp) return -1; rc = QP_EnqueueSegment(transp->qp, (void *)buf, bufLen, kern); if (rc >= 0) transp->writeSize += (unsigned int)rc; else transp->writeSize = 0; return rc; } /** * @brief Commits any previous EnqueueSegment operations to the transport * object. * @param transp handle to the transport object. * @return 0 on success, < 0 otherwise. */ int CommTransp_EnqueueCommit(CommTransp transp) { int rc; if (!transp) return -1; rc = QP_EnqueueCommit(transp->qp); if (rc >= 0) { const unsigned int fudge = 4; int writable = CommTransp_EnqueueSpace(transp); if ((writable >= 0) && ((transp->writeSize + (unsigned int)writable + fudge) >= transp->qp->queueSize)) { /* * If bytes written since last commit + writable space * 'almost' equal write queue size, then signal. * The 'almost' fudge factor accounts for a possibly * inaccurate CommTransp_EnqueueSpace() return value. * Most of the time, this is inconsequential. In rare, * borderline occasions, it results in a few extra * signals. * The scheme essentially means this: if this is the * first packet to be write-committed, we signal. * Otherwise, the remote end is supposed to keep * going for as long as it can read. */ BUG_ON(transp->backRef >= QP_MAX_QUEUE_PAIRS); CommOS_AddReturnAtomic( &transpTable[transp->backRef].holds, 1); if (CommOS_ReadAtomic(&transp->raiseInline)) RaiseEvent(&transp->work); else if (CommOS_ScheduleAIOWork(&transp->work)) TranspTablePutNF(transp); } rc = 0; } else { rc = -1; } transp->writeSize = 0; return rc; } /** * @brief Returns any available bytes for dequeue * @param transp handle to the transport object * @return available bytes for dequeue, <0 otherwise. see qp.h for error codes */ int CommTransp_DequeueSpace(CommTransp transp) { if (!transp) return -1; return QP_DequeueSpace(transp->qp); } /** * @brief Discards any pending dequeues * @param transp handle to the transport object * @return 0 on success, <0 otherwise, see qp.h for error codes */ int CommTransp_DequeueReset(CommTransp transp) { if (!transp) return -1; transp->readSize = 0; return QP_DequeueReset(transp->qp); } /** * @brief Dequeues a segment of data from the consumer queue into * a buffer * @param transp handle to the transport object * @param[out] buf buffer to copy to * @param bufLen number of bytes to dequeue * @param kern != 0 if copying kernel data * @return number of bytes dequeued on success, <0 otherwise, * see qp.h for error codes */ int CommTransp_DequeueSegment(CommTransp transp, void *buf, unsigned bufLen, int kern) { int rc; if (!transp) return -1; rc = QP_DequeueSegment(transp->qp, buf, bufLen, kern); if (rc >= 0) transp->readSize += (unsigned int)rc; else transp->readSize = 0; return rc; } /** * @brief Commits any previous DequeueSegment operations to the * transport object. * @param transp handle to the transport object. * @return 0 on success, < 0 otherwise. */ int CommTransp_DequeueCommit(CommTransp transp) { int rc; if (!transp) return -1; rc = QP_DequeueCommit(transp->qp); if (rc >= 0) { int readable = CommTransp_DequeueSpace(transp); const unsigned int limit = transp->qp->queueSize / 2; if ((readable >= 0) && (transp->readSize + (unsigned int)readable >= limit) && ((unsigned int)readable < limit)) { /* * Minimize the number of likely 'peer write OK' * signalling: only do it, if reading crossed * half-way down. */ BUG_ON(transp->backRef >= QP_MAX_QUEUE_PAIRS); CommOS_AddReturnAtomic( &transpTable[transp->backRef].holds, 1); if (CommOS_ReadAtomic(&transp->raiseInline)) RaiseEvent(&transp->work); else if (CommOS_ScheduleAIOWork(&transp->work)) TranspTablePut(transp); } rc = 0; } else { rc = -1; } /* coverity[deref_after_free] */ transp->readSize = 0; return rc; } /** * @brief Notify any registered listeners for the given queue pair * @param notificationCenterID noop, unused on MVP * @param transpArgs initialization arguments used by the guest for this * channel * @sideeffects the host may attach to the queue pair */ int CommTransp_Notify(const CommTranspID *notificationCenterID, CommTranspInitArgs *transpArgs) { QPInitArgs args; args.id.context = transpArgs->id.d32[0]; args.id.resource = transpArgs->id.d32[1]; args.capacity = transpArgs->capacity; args.type = transpArgs->type; CommOS_Debug(("%s: d32[0]: %u d32[1]: %u\n", __func__, transpArgs->id.d32[0], transpArgs->id.d32[1])); QP_Notify(&args); return 0; }
gpl-2.0
sjkoon/SJKernel-gn2
kernel/relay.c
1095
33536
/* * Public API and common code for kernel->userspace relay file support. * * See Documentation/filesystems/relay.txt for an overview. * * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) * * Moved to kernel/relay.c by Paul Mundt, 2006. * November 2006 - CPU hotplug support by Mathieu Desnoyers * (mathieu.desnoyers@polymtl.ca) * * This file is released under the GPL. */ #include <linux/errno.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/relay.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/splice.h> /* list of open channels, for cpu hotplug */ static DEFINE_MUTEX(relay_channels_mutex); static LIST_HEAD(relay_channels); /* * close() vm_op implementation for relay file mapping. */ static void relay_file_mmap_close(struct vm_area_struct *vma) { struct rchan_buf *buf = vma->vm_private_data; buf->chan->cb->buf_unmapped(buf, vma->vm_file); } /* * fault() vm_op implementation for relay file mapping. */ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; struct rchan_buf *buf = vma->vm_private_data; pgoff_t pgoff = vmf->pgoff; if (!buf) return VM_FAULT_OOM; page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } /* * vm_ops for relay file mappings. */ static const struct vm_operations_struct relay_file_mmap_ops = { .fault = relay_buf_fault, .close = relay_file_mmap_close, }; /* * allocate an array of pointers of struct page */ static struct page **relay_alloc_page_array(unsigned int n_pages) { const size_t pa_size = n_pages * sizeof(struct page *); if (pa_size > PAGE_SIZE) return vzalloc(pa_size); return kzalloc(pa_size, GFP_KERNEL); } /* * free an array of pointers of struct page */ static void relay_free_page_array(struct page **array) { if (is_vmalloc_addr(array)) vfree(array); else kfree(array); } /** * relay_mmap_buf: - mmap channel buffer to process address space * @buf: relay channel buffer * @vma: vm_area_struct describing memory to be mapped * * Returns 0 if ok, negative on error * * Caller should already have grabbed mmap_sem. */ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) { unsigned long length = vma->vm_end - vma->vm_start; struct file *filp = vma->vm_file; if (!buf) return -EBADF; if (length != (unsigned long)buf->chan->alloc_size) return -EINVAL; vma->vm_ops = &relay_file_mmap_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = buf; buf->chan->cb->buf_mapped(buf, filp); return 0; } /** * relay_alloc_buf - allocate a channel buffer * @buf: the buffer struct * @size: total size of the buffer * * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The * passed in size will get page aligned, if it isn't already. */ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) { void *mem; unsigned int i, j, n_pages; *size = PAGE_ALIGN(*size); n_pages = *size >> PAGE_SHIFT; buf->page_array = relay_alloc_page_array(n_pages); if (!buf->page_array) return NULL; for (i = 0; i < n_pages; i++) { buf->page_array[i] = alloc_page(GFP_KERNEL); if (unlikely(!buf->page_array[i])) goto depopulate; set_page_private(buf->page_array[i], (unsigned long)buf); } mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); if (!mem) goto depopulate; memset(mem, 0, *size); buf->page_count = n_pages; return mem; depopulate: for (j = 0; j < i; j++) __free_page(buf->page_array[j]); relay_free_page_array(buf->page_array); return NULL; } /** * relay_create_buf - allocate and initialize a channel buffer * @chan: the relay channel * * Returns channel buffer if successful, %NULL otherwise. */ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); if (!buf) return NULL; buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); if (!buf->padding) goto free_buf; buf->start = relay_alloc_buf(buf, &chan->alloc_size); if (!buf->start) goto free_buf; buf->chan = chan; kref_get(&buf->chan->kref); return buf; free_buf: kfree(buf->padding); kfree(buf); return NULL; } /** * relay_destroy_channel - free the channel struct * @kref: target kernel reference that contains the relay channel * * Should only be called from kref_put(). */ static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); kfree(chan); } /** * relay_destroy_buf - destroy an rchan_buf struct and associated buffer * @buf: the buffer struct */ static void relay_destroy_buf(struct rchan_buf *buf) { struct rchan *chan = buf->chan; unsigned int i; if (likely(buf->start)) { vunmap(buf->start); for (i = 0; i < buf->page_count; i++) __free_page(buf->page_array[i]); relay_free_page_array(buf->page_array); } chan->buf[buf->cpu] = NULL; kfree(buf->padding); kfree(buf); kref_put(&chan->kref, relay_destroy_channel); } /** * relay_remove_buf - remove a channel buffer * @kref: target kernel reference that contains the relay buffer * * Removes the file from the fileystem, which also frees the * rchan_buf_struct and the channel buffer. Should only be called from * kref_put(). */ static void relay_remove_buf(struct kref *kref) { struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref); buf->chan->cb->remove_buf_file(buf->dentry); relay_destroy_buf(buf); } /** * relay_buf_empty - boolean, is the channel buffer empty? * @buf: channel buffer * * Returns 1 if the buffer is empty, 0 otherwise. */ static int relay_buf_empty(struct rchan_buf *buf) { return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1; } /** * relay_buf_full - boolean, is the channel buffer full? * @buf: channel buffer * * Returns 1 if the buffer is full, 0 otherwise. */ int relay_buf_full(struct rchan_buf *buf) { size_t ready = buf->subbufs_produced - buf->subbufs_consumed; return (ready >= buf->chan->n_subbufs) ? 1 : 0; } EXPORT_SYMBOL_GPL(relay_buf_full); /* * High-level relay kernel API and associated functions. */ /* * rchan_callback implementations defining default channel behavior. Used * in place of corresponding NULL values in client callback struct. */ /* * subbuf_start() default callback. Does nothing. */ static int subbuf_start_default_callback (struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { if (relay_buf_full(buf)) return 0; return 1; } /* * buf_mapped() default callback. Does nothing. */ static void buf_mapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * buf_unmapped() default callback. Does nothing. */ static void buf_unmapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * create_buf_file_create() default callback. Does nothing. */ static struct dentry *create_buf_file_default_callback(const char *filename, struct dentry *parent, int mode, struct rchan_buf *buf, int *is_global) { return NULL; } /* * remove_buf_file() default callback. Does nothing. */ static int remove_buf_file_default_callback(struct dentry *dentry) { return -EINVAL; } /* relay channel default callbacks */ static struct rchan_callbacks default_channel_callbacks = { .subbuf_start = subbuf_start_default_callback, .buf_mapped = buf_mapped_default_callback, .buf_unmapped = buf_unmapped_default_callback, .create_buf_file = create_buf_file_default_callback, .remove_buf_file = remove_buf_file_default_callback, }; /** * wakeup_readers - wake up readers waiting on a channel * @data: contains the channel buffer * * This is the timer function used to defer reader waking. */ static void wakeup_readers(unsigned long data) { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); } /** * __relay_reset - reset a channel buffer * @buf: the channel buffer * @init: 1 if this is a first-time initialization * * See relay_reset() for description of effect. */ static void __relay_reset(struct rchan_buf *buf, unsigned int init) { size_t i; if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); } else del_timer_sync(&buf->timer); buf->subbufs_produced = 0; buf->subbufs_consumed = 0; buf->bytes_consumed = 0; buf->finalized = 0; buf->data = buf->start; buf->offset = 0; for (i = 0; i < buf->chan->n_subbufs; i++) buf->padding[i] = 0; buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0); } /** * relay_reset - reset the channel * @chan: the channel * * This has the effect of erasing all data from all channel buffers * and restarting the channel in its initial state. The buffers * are not freed, so any mappings are still in effect. * * NOTE. Care should be taken that the channel isn't actually * being used by anything when this call is made. */ void relay_reset(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { __relay_reset(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) __relay_reset(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_reset); static inline void relay_set_buf_dentry(struct rchan_buf *buf, struct dentry *dentry) { buf->dentry = dentry; buf->dentry->d_inode->i_size = buf->early_bytes; } static struct dentry *relay_create_buf_file(struct rchan *chan, struct rchan_buf *buf, unsigned int cpu) { struct dentry *dentry; char *tmpname; tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); if (!tmpname) return NULL; snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu); /* Create file in fs */ dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR, buf, &chan->is_global); kfree(tmpname); return dentry; } /* * relay_open_buf - create a new relay channel buffer * * used by relay_open() and CPU hotplug. */ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) { struct rchan_buf *buf = NULL; struct dentry *dentry; if (chan->is_global) return chan->buf[0]; buf = relay_create_buf(chan); if (!buf) return NULL; if (chan->has_base_filename) { dentry = relay_create_buf_file(chan, buf, cpu); if (!dentry) goto free_buf; relay_set_buf_dentry(buf, dentry); } buf->cpu = cpu; __relay_reset(buf, 1); if(chan->is_global) { chan->buf[0] = buf; buf->cpu = 0; } return buf; free_buf: relay_destroy_buf(buf); return NULL; } /** * relay_close_buf - close a channel buffer * @buf: channel buffer * * Marks the buffer finalized and restores the default callbacks. * The channel buffer and channel buffer data structure are then freed * automatically when the last reference is given up. */ static void relay_close_buf(struct rchan_buf *buf) { buf->finalized = 1; del_timer_sync(&buf->timer); kref_put(&buf->kref, relay_remove_buf); } static void setup_callbacks(struct rchan *chan, struct rchan_callbacks *cb) { if (!cb) { chan->cb = &default_channel_callbacks; return; } if (!cb->subbuf_start) cb->subbuf_start = subbuf_start_default_callback; if (!cb->buf_mapped) cb->buf_mapped = buf_mapped_default_callback; if (!cb->buf_unmapped) cb->buf_unmapped = buf_unmapped_default_callback; if (!cb->create_buf_file) cb->create_buf_file = create_buf_file_default_callback; if (!cb->remove_buf_file) cb->remove_buf_file = remove_buf_file_default_callback; chan->cb = cb; } /** * relay_hotcpu_callback - CPU hotplug callback * @nb: notifier block * @action: hotplug action to take * @hcpu: CPU number * * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) */ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; struct rchan *chan; switch(action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&relay_channels_mutex); list_for_each_entry(chan, &relay_channels, list) { if (chan->buf[hotcpu]) continue; chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); if(!chan->buf[hotcpu]) { printk(KERN_ERR "relay_hotcpu_callback: cpu %d buffer " "creation failed\n", hotcpu); mutex_unlock(&relay_channels_mutex); return notifier_from_errno(-ENOMEM); } } mutex_unlock(&relay_channels_mutex); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* No need to flush the cpu : will be flushed upon * final relay_flush() call. */ break; } return NOTIFY_OK; } /** * relay_open - create a new relay channel * @base_filename: base name of files to create, %NULL for buffering only * @parent: dentry of parent directory, %NULL for root directory or buffer * @subbuf_size: size of sub-buffers * @n_subbufs: number of sub-buffers * @cb: client callback functions * @private_data: user-defined data * * Returns channel pointer if successful, %NULL otherwise. * * Creates a channel buffer for each cpu using the sizes and * attributes specified. The created channel buffer files * will be named base_filename0...base_filenameN-1. File * permissions will be %S_IRUSR. */ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data) { unsigned int i; struct rchan *chan; if (!(subbuf_size && n_subbufs)) return NULL; if (subbuf_size > UINT_MAX / n_subbufs) return NULL; chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); if (!chan) return NULL; chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); chan->parent = parent; chan->private_data = private_data; if (base_filename) { chan->has_base_filename = 1; strlcpy(chan->base_filename, base_filename, NAME_MAX); } setup_callbacks(chan, cb); kref_init(&chan->kref); mutex_lock(&relay_channels_mutex); for_each_online_cpu(i) { chan->buf[i] = relay_open_buf(chan, i); if (!chan->buf[i]) goto free_bufs; } list_add(&chan->list, &relay_channels); mutex_unlock(&relay_channels_mutex); return chan; free_bufs: for_each_possible_cpu(i) { if (chan->buf[i]) relay_close_buf(chan->buf[i]); } kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); return NULL; } EXPORT_SYMBOL_GPL(relay_open); struct rchan_percpu_buf_dispatcher { struct rchan_buf *buf; struct dentry *dentry; }; /* Called in atomic context. */ static void __relay_set_buf_dentry(void *info) { struct rchan_percpu_buf_dispatcher *p = info; relay_set_buf_dentry(p->buf, p->dentry); } /** * relay_late_setup_files - triggers file creation * @chan: channel to operate on * @base_filename: base name of files to create * @parent: dentry of parent directory, %NULL for root directory * * Returns 0 if successful, non-zero otherwise. * * Use to setup files for a previously buffer-only channel. * Useful to do early tracing in kernel, before VFS is up, for example. */ int relay_late_setup_files(struct rchan *chan, const char *base_filename, struct dentry *parent) { int err = 0; unsigned int i, curr_cpu; unsigned long flags; struct dentry *dentry; struct rchan_percpu_buf_dispatcher disp; if (!chan || !base_filename) return -EINVAL; strlcpy(chan->base_filename, base_filename, NAME_MAX); mutex_lock(&relay_channels_mutex); /* Is chan already set up? */ if (unlikely(chan->has_base_filename)) { mutex_unlock(&relay_channels_mutex); return -EEXIST; } chan->has_base_filename = 1; chan->parent = parent; curr_cpu = get_cpu(); /* * The CPU hotplug notifier ran before us and created buffers with * no files associated. So it's safe to call relay_setup_buf_file() * on all currently online CPUs. */ for_each_online_cpu(i) { if (unlikely(!chan->buf[i])) { WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); err = -EINVAL; break; } dentry = relay_create_buf_file(chan, chan->buf[i], i); if (unlikely(!dentry)) { err = -EINVAL; break; } if (curr_cpu == i) { local_irq_save(flags); relay_set_buf_dentry(chan->buf[i], dentry); local_irq_restore(flags); } else { disp.buf = chan->buf[i]; disp.dentry = dentry; smp_mb(); /* relay_channels_mutex must be held, so wait. */ err = smp_call_function_single(i, __relay_set_buf_dentry, &disp, 1); } if (unlikely(err)) break; } put_cpu(); mutex_unlock(&relay_channels_mutex); return err; } /** * relay_switch_subbuf - switch to a new sub-buffer * @buf: channel buffer * @length: size of current event * * Returns either the length passed in or 0 if full. * * Performs sub-buffer-switch tasks such as invoking callbacks, * updating padding counts, waking up readers, etc. */ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) { void *old, *new; size_t old_subbuf, new_subbuf; if (unlikely(length > buf->chan->subbuf_size)) goto toobig; if (buf->offset != buf->chan->subbuf_size + 1) { buf->prev_padding = buf->chan->subbuf_size - buf->offset; old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; buf->padding[old_subbuf] = buf->prev_padding; buf->subbufs_produced++; if (buf->dentry) buf->dentry->d_inode->i_size += buf->chan->subbuf_size - buf->padding[old_subbuf]; else buf->early_bytes += buf->chan->subbuf_size - buf->padding[old_subbuf]; smp_mb(); if (waitqueue_active(&buf->read_wait)) /* * Calling wake_up_interruptible() from here * will deadlock if we happen to be logging * from the scheduler (trying to re-grab * rq->lock), so defer it. */ mod_timer(&buf->timer, jiffies + 1); } old = buf->data; new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; new = buf->start + new_subbuf * buf->chan->subbuf_size; buf->offset = 0; if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) { buf->offset = buf->chan->subbuf_size + 1; return 0; } buf->data = new; buf->padding[new_subbuf] = 0; if (unlikely(length + buf->offset > buf->chan->subbuf_size)) goto toobig; return length; toobig: buf->chan->last_toobig = length; return 0; } EXPORT_SYMBOL_GPL(relay_switch_subbuf); /** * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count * @chan: the channel * @cpu: the cpu associated with the channel buffer to update * @subbufs_consumed: number of sub-buffers to add to current buf's count * * Adds to the channel buffer's consumed sub-buffer count. * subbufs_consumed should be the number of sub-buffers newly consumed, * not the total consumed. * * NOTE. Kernel clients don't need to call this function if the channel * mode is 'overwrite'. */ void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t subbufs_consumed) { struct rchan_buf *buf; if (!chan) return; if (cpu >= NR_CPUS || !chan->buf[cpu] || subbufs_consumed > chan->n_subbufs) return; buf = chan->buf[cpu]; if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) buf->subbufs_consumed = buf->subbufs_produced; else buf->subbufs_consumed += subbufs_consumed; } EXPORT_SYMBOL_GPL(relay_subbufs_consumed); /** * relay_close - close the channel * @chan: the channel * * Closes all channel buffers and frees the channel. */ void relay_close(struct rchan *chan) { unsigned int i; if (!chan) return; mutex_lock(&relay_channels_mutex); if (chan->is_global && chan->buf[0]) relay_close_buf(chan->buf[0]); else for_each_possible_cpu(i) if (chan->buf[i]) relay_close_buf(chan->buf[i]); if (chan->last_toobig) printk(KERN_WARNING "relay: one or more items not logged " "[item size (%Zd) > sub-buffer size (%Zd)]\n", chan->last_toobig, chan->subbuf_size); list_del(&chan->list); kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_close); /** * relay_flush - close the channel * @chan: the channel * * Flushes all channel buffers, i.e. forces buffer switch. */ void relay_flush(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { relay_switch_subbuf(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) relay_switch_subbuf(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_flush); /** * relay_file_open - open file op for relay files * @inode: the inode * @filp: the file * * Increments the channel buffer refcount. */ static int relay_file_open(struct inode *inode, struct file *filp) { struct rchan_buf *buf = inode->i_private; kref_get(&buf->kref); filp->private_data = buf; return nonseekable_open(inode, filp); } /** * relay_file_mmap - mmap file op for relay files * @filp: the file * @vma: the vma describing what to map * * Calls upon relay_mmap_buf() to map the file into user space. */ static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) { struct rchan_buf *buf = filp->private_data; return relay_mmap_buf(buf, vma); } /** * relay_file_poll - poll file op for relay files * @filp: the file * @wait: poll table * * Poll implemention. */ static unsigned int relay_file_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct rchan_buf *buf = filp->private_data; if (buf->finalized) return POLLERR; if (filp->f_mode & FMODE_READ) { poll_wait(filp, &buf->read_wait, wait); if (!relay_buf_empty(buf)) mask |= POLLIN | POLLRDNORM; } return mask; } /** * relay_file_release - release file op for relay files * @inode: the inode * @filp: the file * * Decrements the channel refcount, as the filesystem is * no longer using it. */ static int relay_file_release(struct inode *inode, struct file *filp) { struct rchan_buf *buf = filp->private_data; kref_put(&buf->kref, relay_remove_buf); return 0; } /* * relay_file_read_consume - update the consumed count for the buffer */ static void relay_file_read_consume(struct rchan_buf *buf, size_t read_pos, size_t bytes_consumed) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t read_subbuf; if (buf->subbufs_produced == buf->subbufs_consumed && buf->offset == buf->bytes_consumed) return; if (buf->bytes_consumed + bytes_consumed > subbuf_size) { relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } buf->bytes_consumed += bytes_consumed; if (!read_pos) read_subbuf = buf->subbufs_consumed % n_subbufs; else read_subbuf = read_pos / buf->chan->subbuf_size; if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) { if ((read_subbuf == buf->subbufs_produced % n_subbufs) && (buf->offset == subbuf_size)) return; relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } } /* * relay_file_read_avail - boolean, are there unconsumed bytes available? */ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t produced = buf->subbufs_produced; size_t consumed = buf->subbufs_consumed; relay_file_read_consume(buf, read_pos, 0); consumed = buf->subbufs_consumed; if (unlikely(buf->offset > subbuf_size)) { if (produced == consumed) return 0; return 1; } if (unlikely(produced - consumed >= n_subbufs)) { consumed = produced - n_subbufs + 1; buf->subbufs_consumed = consumed; buf->bytes_consumed = 0; } produced = (produced % n_subbufs) * subbuf_size + buf->offset; consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed; if (consumed > produced) produced += n_subbufs * subbuf_size; if (consumed == produced) { if (buf->offset == subbuf_size && buf->subbufs_produced > buf->subbufs_consumed) return 1; return 0; } return 1; } /** * relay_file_read_subbuf_avail - return bytes available in sub-buffer * @read_pos: file read position * @buf: relay channel buffer */ static size_t relay_file_read_subbuf_avail(size_t read_pos, struct rchan_buf *buf) { size_t padding, avail = 0; size_t read_subbuf, read_offset, write_subbuf, write_offset; size_t subbuf_size = buf->chan->subbuf_size; write_subbuf = (buf->data - buf->start) / subbuf_size; write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; read_subbuf = read_pos / subbuf_size; read_offset = read_pos % subbuf_size; padding = buf->padding[read_subbuf]; if (read_subbuf == write_subbuf) { if (read_offset + padding < write_offset) avail = write_offset - (read_offset + padding); } else avail = (subbuf_size - padding) - read_offset; return avail; } /** * relay_file_read_start_pos - find the first available byte to read * @read_pos: file read position * @buf: relay channel buffer * * If the @read_pos is in the middle of padding, return the * position of the first actually available byte, otherwise * return the original value. */ static size_t relay_file_read_start_pos(size_t read_pos, struct rchan_buf *buf) { size_t read_subbuf, padding, padding_start, padding_end; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t consumed = buf->subbufs_consumed % n_subbufs; if (!read_pos) read_pos = consumed * subbuf_size + buf->bytes_consumed; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; padding_start = (read_subbuf + 1) * subbuf_size - padding; padding_end = (read_subbuf + 1) * subbuf_size; if (read_pos >= padding_start && read_pos < padding_end) { read_subbuf = (read_subbuf + 1) % n_subbufs; read_pos = read_subbuf * subbuf_size; } return read_pos; } /** * relay_file_read_end_pos - return the new read position * @read_pos: file read position * @buf: relay channel buffer * @count: number of bytes to be read */ static size_t relay_file_read_end_pos(struct rchan_buf *buf, size_t read_pos, size_t count) { size_t read_subbuf, padding, end_pos; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; if (read_pos % subbuf_size + count + padding == subbuf_size) end_pos = (read_subbuf + 1) * subbuf_size; else end_pos = read_pos + count; if (end_pos >= subbuf_size * n_subbufs) end_pos = 0; return end_pos; } /* * subbuf_read_actor - read up to one subbuf's worth of data */ static int subbuf_read_actor(size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor) { void *from; int ret = 0; from = buf->start + read_start; ret = avail; if (copy_to_user(desc->arg.buf, from, avail)) { desc->error = -EFAULT; ret = 0; } desc->arg.data += ret; desc->written += ret; desc->count -= ret; return ret; } typedef int (*subbuf_actor_t) (size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor); /* * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries */ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, subbuf_actor_t subbuf_actor, read_actor_t actor, read_descriptor_t *desc) { struct rchan_buf *buf = filp->private_data; size_t read_start, avail; int ret; if (!desc->count) return 0; mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); do { if (!relay_file_read_avail(buf, *ppos)) break; read_start = relay_file_read_start_pos(*ppos, buf); avail = relay_file_read_subbuf_avail(read_start, buf); if (!avail) break; avail = min(desc->count, avail); ret = subbuf_actor(read_start, buf, avail, desc, actor); if (desc->error < 0) break; if (ret) { relay_file_read_consume(buf, read_start, ret); *ppos = relay_file_read_end_pos(buf, read_start, ret); } } while (desc->count && ret); mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex); return desc->written; } static ssize_t relay_file_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { read_descriptor_t desc; desc.written = 0; desc.count = count; desc.arg.buf = buffer; desc.error = 0; return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, NULL, &desc); } static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) { rbuf->bytes_consumed += bytes_consumed; if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) { relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1); rbuf->bytes_consumed %= rbuf->chan->subbuf_size; } } static void relay_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct rchan_buf *rbuf; rbuf = (struct rchan_buf *)page_private(buf->page); relay_consume_bytes(rbuf, buf->private); } static const struct pipe_buf_operations relay_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = relay_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i) { } /* * subbuf_splice_actor - splice up to one subbuf's worth of data */ static ssize_t subbuf_splice_actor(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags, int *nonpad_ret) { unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; struct rchan_buf *rbuf = in->private_data; unsigned int subbuf_size = rbuf->chan->subbuf_size; uint64_t pos = (uint64_t) *ppos; uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size; size_t read_start = (size_t) do_div(pos, alloc_size); size_t read_subbuf = read_start / subbuf_size; size_t padding = rbuf->padding[read_subbuf]; size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .nr_pages = 0, .partial = partial, .flags = flags, .ops = &relay_pipe_buf_ops, .spd_release = relay_page_release, }; ssize_t ret; if (rbuf->subbufs_produced == rbuf->subbufs_consumed) return 0; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; /* * Adjust read len, if longer than what is available */ if (len > (subbuf_size - read_start % subbuf_size)) len = subbuf_size - read_start % subbuf_size; subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; pidx = (read_start / PAGE_SIZE) % subbuf_pages; poff = read_start & ~PAGE_MASK; nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { unsigned int this_len, this_end, private; unsigned int cur_pos = read_start + total_len; if (!len) break; this_len = min_t(unsigned long, len, PAGE_SIZE - poff); private = this_len; spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; spd.partial[spd.nr_pages].offset = poff; this_end = cur_pos + this_len; if (this_end >= nonpad_end) { this_len = nonpad_end - cur_pos; private = this_len + padding; } spd.partial[spd.nr_pages].len = this_len; spd.partial[spd.nr_pages].private = private; len -= this_len; total_len += this_len; poff = 0; pidx = (pidx + 1) % subbuf_pages; if (this_end >= nonpad_end) { spd.nr_pages++; break; } } ret = 0; if (!spd.nr_pages) goto out; ret = *nonpad_ret = splice_to_pipe(pipe, &spd); if (ret < 0 || ret < total_len) goto out; if (read_start + ret == nonpad_end) ret += padding; out: splice_shrink_spd(pipe, &spd); return ret; } static ssize_t relay_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t spliced; int ret; int nonpad_ret = 0; ret = 0; spliced = 0; while (len && !spliced) { ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); if (ret < 0) break; else if (!ret) { if (flags & SPLICE_F_NONBLOCK) ret = -EAGAIN; break; } *ppos += ret; if (ret > len) len = 0; else len -= ret; spliced += nonpad_ret; nonpad_ret = 0; } if (spliced) return spliced; return ret; } const struct file_operations relay_file_operations = { .open = relay_file_open, .poll = relay_file_poll, .mmap = relay_file_mmap, .read = relay_file_read, .llseek = no_llseek, .release = relay_file_release, .splice_read = relay_file_splice_read, }; EXPORT_SYMBOL_GPL(relay_file_operations); static __init int relay_init(void) { hotcpu_notifier(relay_hotcpu_callback, 0); return 0; } early_initcall(relay_init);
gpl-2.0
yseung123/android_kernel_oneplus_msm8994
drivers/target/target_core_configfs.c
1351
82407
/******************************************************************************* * Filename: target_core_configfs.c * * This file contains ConfigFS logic for the Generic Target Engine project. * * (c) Copyright 2008-2012 RisingTide Systems LLC. * * Nicholas A. Bellinger <nab@kernel.org> * * based on configfs Copyright (C) 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ****************************************************************************/ #include <linux/module.h> #include <linux/moduleparam.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/unistd.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/syscalls.h> #include <linux/configfs.h> #include <linux/spinlock.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_pr.h" #include "target_core_rd.h" extern struct t10_alua_lu_gp *default_lu_gp; static LIST_HEAD(g_tf_list); static DEFINE_MUTEX(g_tf_lock); struct target_core_configfs_attribute { struct configfs_attribute attr; ssize_t (*show)(void *, char *); ssize_t (*store)(void *, const char *, size_t); }; static struct config_group target_core_hbagroup; static struct config_group alua_group; static struct config_group alua_lu_gps_group; static inline struct se_hba * item_to_hba(struct config_item *item) { return container_of(to_config_group(item), struct se_hba, hba_group); } /* * Attributes for /sys/kernel/config/target/ */ static ssize_t target_core_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, utsname()->sysname, utsname()->machine); } static struct configfs_item_operations target_core_fabric_item_ops = { .show_attribute = target_core_attr_show, }; static struct configfs_attribute target_core_item_attr_version = { .ca_owner = THIS_MODULE, .ca_name = "version", .ca_mode = S_IRUGO, }; static struct target_fabric_configfs *target_core_get_fabric( const char *name) { struct target_fabric_configfs *tf; if (!name) return NULL; mutex_lock(&g_tf_lock); list_for_each_entry(tf, &g_tf_list, tf_list) { if (!strcmp(tf->tf_name, name)) { atomic_inc(&tf->tf_access_cnt); mutex_unlock(&g_tf_lock); return tf; } } mutex_unlock(&g_tf_lock); return NULL; } /* * Called from struct target_core_group_ops->make_group() */ static struct config_group *target_core_register_fabric( struct config_group *group, const char *name) { struct target_fabric_configfs *tf; int ret; pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); /* * Below are some hardcoded request_module() calls to automatically * local fabric modules when the following is called: * * mkdir -p /sys/kernel/config/target/$MODULE_NAME * * Note that this does not limit which TCM fabric module can be * registered, but simply provids auto loading logic for modules with * mkdir(2) system calls with known TCM fabric modules. */ if (!strncmp(name, "iscsi", 5)) { /* * Automatically load the LIO Target fabric module when the * following is called: * * mkdir -p $CONFIGFS/target/iscsi */ ret = request_module("iscsi_target_mod"); if (ret < 0) { pr_err("request_module() failed for" " iscsi_target_mod.ko: %d\n", ret); return ERR_PTR(-EINVAL); } } else if (!strncmp(name, "loopback", 8)) { /* * Automatically load the tcm_loop fabric module when the * following is called: * * mkdir -p $CONFIGFS/target/loopback */ ret = request_module("tcm_loop"); if (ret < 0) { pr_err("request_module() failed for" " tcm_loop.ko: %d\n", ret); return ERR_PTR(-EINVAL); } } tf = target_core_get_fabric(name); if (!tf) { pr_err("target_core_get_fabric() failed for %s\n", name); return ERR_PTR(-EINVAL); } pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" " %s\n", tf->tf_name); /* * On a successful target_core_get_fabric() look, the returned * struct target_fabric_configfs *tf will contain a usage reference. */ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", &TF_CIT_TMPL(tf)->tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; tf->tf_group.default_groups[0] = &tf->tf_disc_group; tf->tf_group.default_groups[1] = NULL; config_group_init_type_name(&tf->tf_group, name, &TF_CIT_TMPL(tf)->tfc_wwn_cit); config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", &TF_CIT_TMPL(tf)->tfc_discovery_cit); pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); /* * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() */ tf->tf_ops.tf_subsys = tf->tf_subsys; tf->tf_fabric = &tf->tf_group.cg_item; pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" " for %s\n", name); return &tf->tf_group; } /* * Called from struct target_core_group_ops->drop_item() */ static void target_core_deregister_fabric( struct config_group *group, struct config_item *item) { struct target_fabric_configfs *tf = container_of( to_config_group(item), struct target_fabric_configfs, tf_group); struct config_group *tf_group; struct config_item *df_item; int i; pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" " tf list\n", config_item_name(item)); pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" " %s\n", tf->tf_name); atomic_dec(&tf->tf_access_cnt); pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" " tf->tf_fabric for %s\n", tf->tf_name); tf->tf_fabric = NULL; pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" " %s\n", config_item_name(item)); tf_group = &tf->tf_group; for (i = 0; tf_group->default_groups[i]; i++) { df_item = &tf_group->default_groups[i]->cg_item; tf_group->default_groups[i] = NULL; config_item_put(df_item); } config_item_put(item); } static struct configfs_group_operations target_core_fabric_group_ops = { .make_group = &target_core_register_fabric, .drop_item = &target_core_deregister_fabric, }; /* * All item attributes appearing in /sys/kernel/target/ appear here. */ static struct configfs_attribute *target_core_fabric_item_attrs[] = { &target_core_item_attr_version, NULL, }; /* * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ */ static struct config_item_type target_core_fabrics_item = { .ct_item_ops = &target_core_fabric_item_ops, .ct_group_ops = &target_core_fabric_group_ops, .ct_attrs = target_core_fabric_item_attrs, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem target_core_fabrics = { .su_group = { .cg_item = { .ci_namebuf = "target", .ci_type = &target_core_fabrics_item, }, }, }; static struct configfs_subsystem *target_core_subsystem[] = { &target_core_fabrics, NULL, }; /*############################################################################## // Start functions called by external Target Fabrics Modules //############################################################################*/ /* * First function called by fabric modules to: * * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. * 2) Add struct target_fabric_configfs to g_tf_list * 3) Return struct target_fabric_configfs to fabric module to be passed * into target_fabric_configfs_register(). */ struct target_fabric_configfs *target_fabric_configfs_init( struct module *fabric_mod, const char *name) { struct target_fabric_configfs *tf; if (!(name)) { pr_err("Unable to locate passed fabric name\n"); return ERR_PTR(-EINVAL); } if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { pr_err("Passed name: %s exceeds TARGET_FABRIC" "_NAME_SIZE\n", name); return ERR_PTR(-EINVAL); } tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); if (!tf) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tf->tf_list); atomic_set(&tf->tf_access_cnt, 0); /* * Setup the default generic struct config_item_type's (cits) in * struct target_fabric_configfs->tf_cit_tmpl */ tf->tf_module = fabric_mod; target_fabric_setup_cits(tf); tf->tf_subsys = target_core_subsystem[0]; snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); mutex_lock(&g_tf_lock); list_add_tail(&tf->tf_list, &g_tf_list); mutex_unlock(&g_tf_lock); pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" ">>>>>>>>>>>>>>\n"); pr_debug("Initialized struct target_fabric_configfs: %p for" " %s\n", tf, tf->tf_name); return tf; } EXPORT_SYMBOL(target_fabric_configfs_init); /* * Called by fabric plugins after FAILED target_fabric_configfs_register() call. */ void target_fabric_configfs_free( struct target_fabric_configfs *tf) { mutex_lock(&g_tf_lock); list_del(&tf->tf_list); mutex_unlock(&g_tf_lock); kfree(tf); } EXPORT_SYMBOL(target_fabric_configfs_free); /* * Perform a sanity check of the passed tf->tf_ops before completing * TCM fabric module registration. */ static int target_fabric_tf_ops_check( struct target_fabric_configfs *tf) { struct target_core_fabric_ops *tfo = &tf->tf_ops; if (!tfo->get_fabric_name) { pr_err("Missing tfo->get_fabric_name()\n"); return -EINVAL; } if (!tfo->get_fabric_proto_ident) { pr_err("Missing tfo->get_fabric_proto_ident()\n"); return -EINVAL; } if (!tfo->tpg_get_wwn) { pr_err("Missing tfo->tpg_get_wwn()\n"); return -EINVAL; } if (!tfo->tpg_get_tag) { pr_err("Missing tfo->tpg_get_tag()\n"); return -EINVAL; } if (!tfo->tpg_get_default_depth) { pr_err("Missing tfo->tpg_get_default_depth()\n"); return -EINVAL; } if (!tfo->tpg_get_pr_transport_id) { pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); return -EINVAL; } if (!tfo->tpg_get_pr_transport_id_len) { pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); return -EINVAL; } if (!tfo->tpg_check_demo_mode) { pr_err("Missing tfo->tpg_check_demo_mode()\n"); return -EINVAL; } if (!tfo->tpg_check_demo_mode_cache) { pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); return -EINVAL; } if (!tfo->tpg_check_demo_mode_write_protect) { pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); return -EINVAL; } if (!tfo->tpg_check_prod_mode_write_protect) { pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); return -EINVAL; } if (!tfo->tpg_alloc_fabric_acl) { pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); return -EINVAL; } if (!tfo->tpg_release_fabric_acl) { pr_err("Missing tfo->tpg_release_fabric_acl()\n"); return -EINVAL; } if (!tfo->tpg_get_inst_index) { pr_err("Missing tfo->tpg_get_inst_index()\n"); return -EINVAL; } if (!tfo->release_cmd) { pr_err("Missing tfo->release_cmd()\n"); return -EINVAL; } if (!tfo->shutdown_session) { pr_err("Missing tfo->shutdown_session()\n"); return -EINVAL; } if (!tfo->close_session) { pr_err("Missing tfo->close_session()\n"); return -EINVAL; } if (!tfo->sess_get_index) { pr_err("Missing tfo->sess_get_index()\n"); return -EINVAL; } if (!tfo->write_pending) { pr_err("Missing tfo->write_pending()\n"); return -EINVAL; } if (!tfo->write_pending_status) { pr_err("Missing tfo->write_pending_status()\n"); return -EINVAL; } if (!tfo->set_default_node_attributes) { pr_err("Missing tfo->set_default_node_attributes()\n"); return -EINVAL; } if (!tfo->get_task_tag) { pr_err("Missing tfo->get_task_tag()\n"); return -EINVAL; } if (!tfo->get_cmd_state) { pr_err("Missing tfo->get_cmd_state()\n"); return -EINVAL; } if (!tfo->queue_data_in) { pr_err("Missing tfo->queue_data_in()\n"); return -EINVAL; } if (!tfo->queue_status) { pr_err("Missing tfo->queue_status()\n"); return -EINVAL; } if (!tfo->queue_tm_rsp) { pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in * target_core_fabric_configfs.c WWN+TPG group context code. */ if (!tfo->fabric_make_wwn) { pr_err("Missing tfo->fabric_make_wwn()\n"); return -EINVAL; } if (!tfo->fabric_drop_wwn) { pr_err("Missing tfo->fabric_drop_wwn()\n"); return -EINVAL; } if (!tfo->fabric_make_tpg) { pr_err("Missing tfo->fabric_make_tpg()\n"); return -EINVAL; } if (!tfo->fabric_drop_tpg) { pr_err("Missing tfo->fabric_drop_tpg()\n"); return -EINVAL; } return 0; } /* * Called 2nd from fabric module with returned parameter of * struct target_fabric_configfs * from target_fabric_configfs_init(). * * Upon a successful registration, the new fabric's struct config_item is * return. Also, a pointer to this struct is set in the passed * struct target_fabric_configfs. */ int target_fabric_configfs_register( struct target_fabric_configfs *tf) { int ret; if (!tf) { pr_err("Unable to locate target_fabric_configfs" " pointer\n"); return -EINVAL; } if (!tf->tf_subsys) { pr_err("Unable to target struct config_subsystem" " pointer\n"); return -EINVAL; } ret = target_fabric_tf_ops_check(tf); if (ret < 0) return ret; pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" ">>>>>>>>>>\n"); return 0; } EXPORT_SYMBOL(target_fabric_configfs_register); void target_fabric_configfs_deregister( struct target_fabric_configfs *tf) { struct configfs_subsystem *su; if (!tf) { pr_err("Unable to locate passed target_fabric_" "configfs\n"); return; } su = tf->tf_subsys; if (!su) { pr_err("Unable to locate passed tf->tf_subsys" " pointer\n"); return; } pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" ">>>>>>>>>>>>\n"); mutex_lock(&g_tf_lock); if (atomic_read(&tf->tf_access_cnt)) { mutex_unlock(&g_tf_lock); pr_err("Non zero tf->tf_access_cnt for fabric %s\n", tf->tf_name); BUG(); } list_del(&tf->tf_list); mutex_unlock(&g_tf_lock); pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" " %s\n", tf->tf_name); tf->tf_module = NULL; tf->tf_subsys = NULL; kfree(tf); pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" ">>>>>\n"); } EXPORT_SYMBOL(target_fabric_configfs_deregister); /*############################################################################## // Stop functions called by external Target Fabrics Modules //############################################################################*/ /* Start functions for struct config_item_type target_core_dev_attrib_cit */ #define DEF_DEV_ATTRIB_SHOW(_name) \ static ssize_t target_core_dev_show_attr_##_name( \ struct se_dev_attrib *da, \ char *page) \ { \ return snprintf(page, PAGE_SIZE, "%u\n", \ (u32)da->da_dev->dev_attrib._name); \ } #define DEF_DEV_ATTRIB_STORE(_name) \ static ssize_t target_core_dev_store_attr_##_name( \ struct se_dev_attrib *da, \ const char *page, \ size_t count) \ { \ unsigned long val; \ int ret; \ \ ret = strict_strtoul(page, 0, &val); \ if (ret < 0) { \ pr_err("strict_strtoul() failed with" \ " ret: %d\n", ret); \ return -EINVAL; \ } \ ret = se_dev_set_##_name(da->da_dev, (u32)val); \ \ return (!ret) ? count : -EINVAL; \ } #define DEF_DEV_ATTRIB(_name) \ DEF_DEV_ATTRIB_SHOW(_name); \ DEF_DEV_ATTRIB_STORE(_name); #define DEF_DEV_ATTRIB_RO(_name) \ DEF_DEV_ATTRIB_SHOW(_name); CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); #define SE_DEV_ATTR(_name, _mode) \ static struct target_core_dev_attrib_attribute \ target_core_dev_attrib_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_dev_show_attr_##_name, \ target_core_dev_store_attr_##_name); #define SE_DEV_ATTR_RO(_name); \ static struct target_core_dev_attrib_attribute \ target_core_dev_attrib_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_dev_show_attr_##_name); DEF_DEV_ATTRIB(emulate_model_alias); SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_dpo); SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_fua_write); SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_fua_read); SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_write_cache); SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_tas); SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_tpu); SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_tpws); SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(is_nonrot); SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_rest_reord); SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_block_size); SE_DEV_ATTR_RO(hw_block_size); DEF_DEV_ATTRIB(block_size); SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_max_sectors); SE_DEV_ATTR_RO(hw_max_sectors); DEF_DEV_ATTRIB(fabric_max_sectors); SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(optimal_sectors); SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_queue_depth); SE_DEV_ATTR_RO(hw_queue_depth); DEF_DEV_ATTRIB(queue_depth); SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(max_unmap_lba_count); SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(max_unmap_block_desc_count); SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(unmap_granularity); SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(unmap_granularity_alignment); SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(max_write_same_len); SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_model_alias.attr, &target_core_dev_attrib_emulate_dpo.attr, &target_core_dev_attrib_emulate_fua_write.attr, &target_core_dev_attrib_emulate_fua_read.attr, &target_core_dev_attrib_emulate_write_cache.attr, &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, &target_core_dev_attrib_emulate_tas.attr, &target_core_dev_attrib_emulate_tpu.attr, &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_enforce_pr_isids.attr, &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_emulate_rest_reord.attr, &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, &target_core_dev_attrib_fabric_max_sectors.attr, &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, &target_core_dev_attrib_queue_depth.attr, &target_core_dev_attrib_max_unmap_lba_count.attr, &target_core_dev_attrib_max_unmap_block_desc_count.attr, &target_core_dev_attrib_unmap_granularity.attr, &target_core_dev_attrib_unmap_granularity_alignment.attr, &target_core_dev_attrib_max_write_same_len.attr, NULL, }; static struct configfs_item_operations target_core_dev_attrib_ops = { .show_attribute = target_core_dev_attrib_attr_show, .store_attribute = target_core_dev_attrib_attr_store, }; static struct config_item_type target_core_dev_attrib_cit = { .ct_item_ops = &target_core_dev_attrib_ops, .ct_attrs = target_core_dev_attrib_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_dev_attrib_cit */ /* Start functions for struct config_item_type target_core_dev_wwn_cit */ CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); #define SE_DEV_WWN_ATTR(_name, _mode) \ static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_dev_wwn_show_attr_##_name, \ target_core_dev_wwn_store_attr_##_name); #define SE_DEV_WWN_ATTR_RO(_name); \ do { \ static struct target_core_dev_wwn_attribute \ target_core_dev_wwn_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_dev_wwn_show_attr_##_name); \ } while (0); /* * VPD page 0x80 Unit serial */ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( struct t10_wwn *t10_wwn, char *page) { return sprintf(page, "T10 VPD Unit Serial Number: %s\n", &t10_wwn->unit_serial[0]); } static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( struct t10_wwn *t10_wwn, const char *page, size_t count) { struct se_device *dev = t10_wwn->t10_dev; unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; /* * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial * from the struct scsi_device level firmware, do not allow * VPD Unit Serial to be emulated. * * Note this struct scsi_device could also be emulating VPD * information from its drivers/scsi LLD. But for now we assume * it is doing 'the right thing' wrt a world wide unique * VPD Unit Serial Number that OS dependent multipath can depend on. */ if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { pr_err("Underlying SCSI device firmware provided VPD" " Unit Serial, ignoring request\n"); return -EOPNOTSUPP; } if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { pr_err("Emulated VPD Unit Serial exceeds" " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); return -EOVERFLOW; } /* * Check to see if any active $FABRIC_MOD exports exist. If they * do exist, fail here as changing this information on the fly * (underneath the initiator side OS dependent multipath code) * could cause negative effects. */ if (dev->export_count) { pr_err("Unable to set VPD Unit Serial while" " active %d $FABRIC_MOD exports exist\n", dev->export_count); return -EINVAL; } /* * This currently assumes ASCII encoding for emulated VPD Unit Serial. * * Also, strip any newline added from the userspace * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial */ memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, "%s", strstrip(buf)); dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" " %s\n", dev->t10_wwn.unit_serial); return count; } SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR); /* * VPD page 0x83 Protocol Identifier */ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( struct t10_wwn *t10_wwn, char *page) { struct t10_vpd *vpd; unsigned char buf[VPD_TMP_BUF_SIZE]; ssize_t len = 0; memset(buf, 0, VPD_TMP_BUF_SIZE); spin_lock(&t10_wwn->t10_vpd_lock); list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { if (!vpd->protocol_identifier_set) continue; transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); } spin_unlock(&t10_wwn->t10_vpd_lock); return len; } static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier( struct t10_wwn *t10_wwn, const char *page, size_t count) { return -ENOSYS; } SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR); /* * Generic wrapper for dumping VPD identifiers by association. */ #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ static ssize_t target_core_dev_wwn_show_attr_##_name( \ struct t10_wwn *t10_wwn, \ char *page) \ { \ struct t10_vpd *vpd; \ unsigned char buf[VPD_TMP_BUF_SIZE]; \ ssize_t len = 0; \ \ spin_lock(&t10_wwn->t10_vpd_lock); \ list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ if (vpd->association != _assoc) \ continue; \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ } \ spin_unlock(&t10_wwn->t10_vpd_lock); \ \ return len; \ } /* * VPD page 0x83 Association: Logical Unit */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit( struct t10_wwn *t10_wwn, const char *page, size_t count) { return -ENOSYS; } SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR); /* * VPD page 0x83 Association: Target Port */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port( struct t10_wwn *t10_wwn, const char *page, size_t count) { return -ENOSYS; } SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR); /* * VPD page 0x83 Association: SCSI Target Device */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device( struct t10_wwn *t10_wwn, const char *page, size_t count) { return -ENOSYS; } SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group); static struct configfs_attribute *target_core_dev_wwn_attrs[] = { &target_core_dev_wwn_vpd_unit_serial.attr, &target_core_dev_wwn_vpd_protocol_identifier.attr, &target_core_dev_wwn_vpd_assoc_logical_unit.attr, &target_core_dev_wwn_vpd_assoc_target_port.attr, &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, NULL, }; static struct configfs_item_operations target_core_dev_wwn_ops = { .show_attribute = target_core_dev_wwn_attr_show, .store_attribute = target_core_dev_wwn_attr_store, }; static struct config_item_type target_core_dev_wwn_cit = { .ct_item_ops = &target_core_dev_wwn_ops, .ct_attrs = target_core_dev_wwn_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_dev_wwn_cit */ /* Start functions for struct config_item_type target_core_dev_pr_cit */ CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); #define SE_DEV_PR_ATTR(_name, _mode) \ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_dev_pr_show_attr_##_name, \ target_core_dev_pr_store_attr_##_name); #define SE_DEV_PR_ATTR_RO(_name); \ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_dev_pr_show_attr_##_name); static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, char *page) { struct se_node_acl *se_nacl; struct t10_pr_registration *pr_reg; char i_buf[PR_REG_ISID_ID_LEN]; int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); pr_reg = dev->dev_pr_res_holder; if (!pr_reg) return sprintf(page, "No SPC-3 Reservation holder\n"); se_nacl = pr_reg->pr_reg_nacl; prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); } static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, char *page) { struct se_node_acl *se_nacl; ssize_t len; se_nacl = dev->dev_reserved_node_acl; if (se_nacl) { len = sprintf(page, "SPC-2 Reservation: %s Initiator: %s\n", se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname); } else { len = sprintf(page, "No SPC-2 Reservation holder\n"); } return len; } static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, char *page) { int ret; if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return sprintf(page, "Passthrough\n"); spin_lock(&dev->dev_reservation_lock); if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) ret = target_core_dev_pr_show_spc2_res(dev, page); else ret = target_core_dev_pr_show_spc3_res(dev, page); spin_unlock(&dev->dev_reservation_lock); return ret; } SE_DEV_PR_ATTR_RO(res_holder); static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( struct se_device *dev, char *page) { ssize_t len = 0; spin_lock(&dev->dev_reservation_lock); if (!dev->dev_pr_res_holder) { len = sprintf(page, "No SPC-3 Reservation holder\n"); } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) { len = sprintf(page, "SPC-3 Reservation: All Target" " Ports registration\n"); } else { len = sprintf(page, "SPC-3 Reservation: Single" " Target Port registration\n"); } spin_unlock(&dev->dev_reservation_lock); return len; } SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); static ssize_t target_core_dev_pr_show_attr_res_pr_generation( struct se_device *dev, char *page) { return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation); } SE_DEV_PR_ATTR_RO(res_pr_generation); /* * res_pr_holder_tg_port */ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( struct se_device *dev, char *page) { struct se_node_acl *se_nacl; struct se_lun *lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg; struct target_core_fabric_ops *tfo; ssize_t len = 0; spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); goto out_unlock; } se_nacl = pr_reg->pr_reg_nacl; se_tpg = se_nacl->se_tpg; lun = pr_reg->pr_reg_tg_pt_lun; tfo = se_tpg->se_tpg_tfo; len += sprintf(page+len, "SPC-3 Reservation: %s" " Target Node Endpoint: %s\n", tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg)); len += sprintf(page+len, "SPC-3 Reservation: Relative Port" " Identifier Tag: %hu %s Portal Group Tag: %hu" " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), tfo->get_fabric_name(), lun->unpacked_lun); out_unlock: spin_unlock(&dev->dev_reservation_lock); return len; } SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( struct se_device *dev, char *page) { struct target_core_fabric_ops *tfo; struct t10_pr_registration *pr_reg; unsigned char buf[384]; char i_buf[PR_REG_ISID_ID_LEN]; ssize_t len = 0; int reg_count = 0, prf_isid; len += sprintf(page+len, "SPC-3 PR Registrations:\n"); spin_lock(&dev->t10_pr.registration_lock); list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, pr_reg_list) { memset(buf, 0, 384); memset(i_buf, 0, PR_REG_ISID_ID_LEN); tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "", pr_reg->pr_res_key, pr_reg->pr_res_generation); if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); reg_count++; } spin_unlock(&dev->t10_pr.registration_lock); if (!reg_count) len += sprintf(page+len, "None\n"); return len; } SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); static ssize_t target_core_dev_pr_show_attr_res_pr_type( struct se_device *dev, char *page) { struct t10_pr_registration *pr_reg; ssize_t len = 0; spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; if (pr_reg) { len = sprintf(page, "SPC-3 Reservation Type: %s\n", core_scsi3_pr_dump_type(pr_reg->pr_res_type)); } else { len = sprintf(page, "No SPC-3 Reservation holder\n"); } spin_unlock(&dev->dev_reservation_lock); return len; } SE_DEV_PR_ATTR_RO(res_pr_type); static ssize_t target_core_dev_pr_show_attr_res_type( struct se_device *dev, char *page) { if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return sprintf(page, "SPC_PASSTHROUGH\n"); else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return sprintf(page, "SPC2_RESERVATIONS\n"); else return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); } SE_DEV_PR_ATTR_RO(res_type); static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( struct se_device *dev, char *page) { if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return 0; return sprintf(page, "APTPL Bit Status: %s\n", (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); } SE_DEV_PR_ATTR_RO(res_aptpl_active); /* * res_aptpl_metadata */ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( struct se_device *dev, char *page) { if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); } enum { Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err }; static match_table_t tokens = { {Opt_initiator_fabric, "initiator_fabric=%s"}, {Opt_initiator_node, "initiator_node=%s"}, {Opt_initiator_sid, "initiator_sid=%s"}, {Opt_sa_res_key, "sa_res_key=%s"}, {Opt_res_holder, "res_holder=%d"}, {Opt_res_type, "res_type=%d"}, {Opt_res_scope, "res_scope=%d"}, {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, {Opt_mapped_lun, "mapped_lun=%d"}, {Opt_target_fabric, "target_fabric=%s"}, {Opt_target_node, "target_node=%s"}, {Opt_tpgt, "tpgt=%d"}, {Opt_port_rtpi, "port_rtpi=%d"}, {Opt_target_lun, "target_lun=%d"}, {Opt_err, NULL} }; static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( struct se_device *dev, const char *page, size_t count) { unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; unsigned char *t_fabric = NULL, *t_port = NULL; char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; unsigned long long tmp_ll; u64 sa_res_key = 0; u32 mapped_lun = 0, target_lun = 0; int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; u16 port_rpti = 0, tpgt = 0; u8 type = 0, scope; if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) return 0; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return 0; if (dev->export_count) { pr_debug("Unable to process APTPL metadata while" " active fabric exports exist\n"); return -EINVAL; } opts = kstrdup(page, GFP_KERNEL); if (!opts) return -ENOMEM; orig = opts; while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; token = match_token(ptr, tokens, args); switch (token) { case Opt_initiator_fabric: i_fabric = match_strdup(&args[0]); if (!i_fabric) { ret = -ENOMEM; goto out; } break; case Opt_initiator_node: i_port = match_strdup(&args[0]); if (!i_port) { ret = -ENOMEM; goto out; } if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { pr_err("APTPL metadata initiator_node=" " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", PR_APTPL_MAX_IPORT_LEN); ret = -EINVAL; break; } break; case Opt_initiator_sid: isid = match_strdup(&args[0]); if (!isid) { ret = -ENOMEM; goto out; } if (strlen(isid) >= PR_REG_ISID_LEN) { pr_err("APTPL metadata initiator_isid" "= exceeds PR_REG_ISID_LEN: %d\n", PR_REG_ISID_LEN); ret = -EINVAL; break; } break; case Opt_sa_res_key: arg_p = match_strdup(&args[0]); if (!arg_p) { ret = -ENOMEM; goto out; } ret = strict_strtoull(arg_p, 0, &tmp_ll); if (ret < 0) { pr_err("strict_strtoull() failed for" " sa_res_key=\n"); goto out; } sa_res_key = (u64)tmp_ll; break; /* * PR APTPL Metadata for Reservation */ case Opt_res_holder: match_int(args, &arg); res_holder = arg; break; case Opt_res_type: match_int(args, &arg); type = (u8)arg; break; case Opt_res_scope: match_int(args, &arg); scope = (u8)arg; break; case Opt_res_all_tg_pt: match_int(args, &arg); all_tg_pt = (int)arg; break; case Opt_mapped_lun: match_int(args, &arg); mapped_lun = (u32)arg; break; /* * PR APTPL Metadata for Target Port */ case Opt_target_fabric: t_fabric = match_strdup(&args[0]); if (!t_fabric) { ret = -ENOMEM; goto out; } break; case Opt_target_node: t_port = match_strdup(&args[0]); if (!t_port) { ret = -ENOMEM; goto out; } if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { pr_err("APTPL metadata target_node=" " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", PR_APTPL_MAX_TPORT_LEN); ret = -EINVAL; break; } break; case Opt_tpgt: match_int(args, &arg); tpgt = (u16)arg; break; case Opt_port_rtpi: match_int(args, &arg); port_rpti = (u16)arg; break; case Opt_target_lun: match_int(args, &arg); target_lun = (u32)arg; break; default: break; } } if (!i_port || !t_port || !sa_res_key) { pr_err("Illegal parameters for APTPL registration\n"); ret = -EINVAL; goto out; } if (res_holder && !(type)) { pr_err("Illegal PR type: 0x%02x for reservation" " holder\n", type); ret = -EINVAL; goto out; } ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, i_port, isid, mapped_lun, t_port, tpgt, target_lun, res_holder, all_tg_pt, type); out: kfree(i_fabric); kfree(i_port); kfree(isid); kfree(t_fabric); kfree(t_port); kfree(orig); return (ret == 0) ? count : ret; } SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group); static struct configfs_attribute *target_core_dev_pr_attrs[] = { &target_core_dev_pr_res_holder.attr, &target_core_dev_pr_res_pr_all_tgt_pts.attr, &target_core_dev_pr_res_pr_generation.attr, &target_core_dev_pr_res_pr_holder_tg_port.attr, &target_core_dev_pr_res_pr_registered_i_pts.attr, &target_core_dev_pr_res_pr_type.attr, &target_core_dev_pr_res_type.attr, &target_core_dev_pr_res_aptpl_active.attr, &target_core_dev_pr_res_aptpl_metadata.attr, NULL, }; static struct configfs_item_operations target_core_dev_pr_ops = { .show_attribute = target_core_dev_pr_attr_show, .store_attribute = target_core_dev_pr_attr_store, }; static struct config_item_type target_core_dev_pr_cit = { .ct_item_ops = &target_core_dev_pr_ops, .ct_attrs = target_core_dev_pr_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_dev_pr_cit */ /* Start functions for struct config_item_type target_core_dev_cit */ static ssize_t target_core_show_dev_info(void *p, char *page) { struct se_device *dev = p; struct se_subsystem_api *t = dev->transport; int bl = 0; ssize_t read_bytes = 0; transport_dump_dev_state(dev, page, &bl); read_bytes += bl; read_bytes += t->show_configfs_dev_params(dev, page+read_bytes); return read_bytes; } static struct target_core_configfs_attribute target_core_attr_dev_info = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "info", .ca_mode = S_IRUGO }, .show = target_core_show_dev_info, .store = NULL, }; static ssize_t target_core_store_dev_control( void *p, const char *page, size_t count) { struct se_device *dev = p; struct se_subsystem_api *t = dev->transport; return t->set_configfs_dev_params(dev, page, count); } static struct target_core_configfs_attribute target_core_attr_dev_control = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "control", .ca_mode = S_IWUSR }, .show = NULL, .store = target_core_store_dev_control, }; static ssize_t target_core_show_dev_alias(void *p, char *page) { struct se_device *dev = p; if (!(dev->dev_flags & DF_USING_ALIAS)) return 0; return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); } static ssize_t target_core_store_dev_alias( void *p, const char *page, size_t count) { struct se_device *dev = p; struct se_hba *hba = dev->se_hba; ssize_t read_bytes; if (count > (SE_DEV_ALIAS_LEN-1)) { pr_err("alias count: %d exceeds" " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, SE_DEV_ALIAS_LEN-1); return -EINVAL; } read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); if (!read_bytes) return -EINVAL; if (dev->dev_alias[read_bytes - 1] == '\n') dev->dev_alias[read_bytes - 1] = '\0'; dev->dev_flags |= DF_USING_ALIAS; pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item), dev->dev_alias); return read_bytes; } static struct target_core_configfs_attribute target_core_attr_dev_alias = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "alias", .ca_mode = S_IRUGO | S_IWUSR }, .show = target_core_show_dev_alias, .store = target_core_store_dev_alias, }; static ssize_t target_core_show_dev_udev_path(void *p, char *page) { struct se_device *dev = p; if (!(dev->dev_flags & DF_USING_UDEV_PATH)) return 0; return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); } static ssize_t target_core_store_dev_udev_path( void *p, const char *page, size_t count) { struct se_device *dev = p; struct se_hba *hba = dev->se_hba; ssize_t read_bytes; if (count > (SE_UDEV_PATH_LEN-1)) { pr_err("udev_path count: %d exceeds" " SE_UDEV_PATH_LEN-1: %u\n", (int)count, SE_UDEV_PATH_LEN-1); return -EINVAL; } read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, "%s", page); if (!read_bytes) return -EINVAL; if (dev->udev_path[read_bytes - 1] == '\n') dev->udev_path[read_bytes - 1] = '\0'; dev->dev_flags |= DF_USING_UDEV_PATH; pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item), dev->udev_path); return read_bytes; } static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "udev_path", .ca_mode = S_IRUGO | S_IWUSR }, .show = target_core_show_dev_udev_path, .store = target_core_store_dev_udev_path, }; static ssize_t target_core_show_dev_enable(void *p, char *page) { struct se_device *dev = p; return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); } static ssize_t target_core_store_dev_enable( void *p, const char *page, size_t count) { struct se_device *dev = p; char *ptr; int ret; ptr = strstr(page, "1"); if (!ptr) { pr_err("For dev_enable ops, only valid value" " is \"1\"\n"); return -EINVAL; } ret = target_configure_device(dev); if (ret) return ret; return count; } static struct target_core_configfs_attribute target_core_attr_dev_enable = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "enable", .ca_mode = S_IRUGO | S_IWUSR }, .show = target_core_show_dev_enable, .store = target_core_store_dev_enable, }; static ssize_t target_core_show_alua_lu_gp(void *p, char *page) { struct se_device *dev = p; struct config_item *lu_ci; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; ssize_t len = 0; lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) return 0; spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if (lu_gp) { lu_ci = &lu_gp->lu_gp_group.cg_item; len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", config_item_name(lu_ci), lu_gp->lu_gp_id); } spin_unlock(&lu_gp_mem->lu_gp_mem_lock); return len; } static ssize_t target_core_store_alua_lu_gp( void *p, const char *page, size_t count) { struct se_device *dev = p; struct se_hba *hba = dev->se_hba; struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; unsigned char buf[LU_GROUP_NAME_BUF]; int move = 0; lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) return 0; if (count > LU_GROUP_NAME_BUF) { pr_err("ALUA LU Group Alias too large!\n"); return -EINVAL; } memset(buf, 0, LU_GROUP_NAME_BUF); memcpy(buf, page, count); /* * Any ALUA logical unit alias besides "NULL" means we will be * making a new group association. */ if (strcmp(strstrip(buf), "NULL")) { /* * core_alua_get_lu_gp_by_name() will increment reference to * struct t10_alua_lu_gp. This reference is released with * core_alua_get_lu_gp_by_name below(). */ lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); if (!lu_gp_new) return -ENODEV; } spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if (lu_gp) { /* * Clearing an existing lu_gp association, and replacing * with NULL */ if (!lu_gp_new) { pr_debug("Target_Core_ConfigFS: Releasing %s/%s" " from ALUA LU Group: core/alua/lu_gps/%s, ID:" " %hu\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item), config_item_name(&lu_gp->lu_gp_group.cg_item), lu_gp->lu_gp_id); __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); return count; } /* * Removing existing association of lu_gp_mem with lu_gp */ __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); move = 1; } /* * Associate lu_gp_mem with lu_gp_new. */ __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" " core/alua/lu_gps/%s, ID: %hu\n", (move) ? "Moving" : "Adding", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item), config_item_name(&lu_gp_new->lu_gp_group.cg_item), lu_gp_new->lu_gp_id); core_alua_put_lu_gp_from_name(lu_gp_new); return count; } static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "alua_lu_gp", .ca_mode = S_IRUGO | S_IWUSR }, .show = target_core_show_alua_lu_gp, .store = target_core_store_alua_lu_gp, }; static struct configfs_attribute *lio_core_dev_attrs[] = { &target_core_attr_dev_info.attr, &target_core_attr_dev_control.attr, &target_core_attr_dev_alias.attr, &target_core_attr_dev_udev_path.attr, &target_core_attr_dev_enable.attr, &target_core_attr_dev_alua_lu_gp.attr, NULL, }; static void target_core_dev_release(struct config_item *item) { struct config_group *dev_cg = to_config_group(item); struct se_device *dev = container_of(dev_cg, struct se_device, dev_group); kfree(dev_cg->default_groups); target_free_device(dev); } static ssize_t target_core_dev_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct config_group *dev_cg = to_config_group(item); struct se_device *dev = container_of(dev_cg, struct se_device, dev_group); struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); if (!tc_attr->show) return -EINVAL; return tc_attr->show(dev, page); } static ssize_t target_core_dev_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct config_group *dev_cg = to_config_group(item); struct se_device *dev = container_of(dev_cg, struct se_device, dev_group); struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); if (!tc_attr->store) return -EINVAL; return tc_attr->store(dev, page, count); } static struct configfs_item_operations target_core_dev_item_ops = { .release = target_core_dev_release, .show_attribute = target_core_dev_show, .store_attribute = target_core_dev_store, }; static struct config_item_type target_core_dev_cit = { .ct_item_ops = &target_core_dev_item_ops, .ct_attrs = lio_core_dev_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_dev_cit */ /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp); #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \ static struct target_core_alua_lu_gp_attribute \ target_core_alua_lu_gp_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_alua_lu_gp_show_attr_##_name, \ target_core_alua_lu_gp_store_attr_##_name); #define SE_DEV_ALUA_LU_ATTR_RO(_name) \ static struct target_core_alua_lu_gp_attribute \ target_core_alua_lu_gp_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_alua_lu_gp_show_attr_##_name); /* * lu_gp_id */ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( struct t10_alua_lu_gp *lu_gp, char *page) { if (!lu_gp->lu_gp_valid_id) return 0; return sprintf(page, "%hu\n", lu_gp->lu_gp_id); } static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( struct t10_alua_lu_gp *lu_gp, const char *page, size_t count) { struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; unsigned long lu_gp_id; int ret; ret = strict_strtoul(page, 0, &lu_gp_id); if (ret < 0) { pr_err("strict_strtoul() returned %d for" " lu_gp_id\n", ret); return -EINVAL; } if (lu_gp_id > 0x0000ffff) { pr_err("ALUA lu_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", lu_gp_id); return -EINVAL; } ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); if (ret < 0) return -EINVAL; pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" " Group: core/alua/lu_gps/%s to ID: %hu\n", config_item_name(&alua_lu_gp_cg->cg_item), lu_gp->lu_gp_id); return count; } SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR); /* * members */ static ssize_t target_core_alua_lu_gp_show_attr_members( struct t10_alua_lu_gp *lu_gp, char *page) { struct se_device *dev; struct se_hba *hba; struct t10_alua_lu_gp_member *lu_gp_mem; ssize_t len = 0, cur_len; unsigned char buf[LU_GROUP_NAME_BUF]; memset(buf, 0, LU_GROUP_NAME_BUF); spin_lock(&lu_gp->lu_gp_lock); list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { dev = lu_gp_mem->lu_gp_mem_dev; hba = dev->se_hba; cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&dev->dev_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } memcpy(page+len, buf, cur_len); len += cur_len; } spin_unlock(&lu_gp->lu_gp_lock); return len; } SE_DEV_ALUA_LU_ATTR_RO(members); CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group); static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { &target_core_alua_lu_gp_lu_gp_id.attr, &target_core_alua_lu_gp_members.attr, NULL, }; static void target_core_alua_lu_gp_release(struct config_item *item) { struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), struct t10_alua_lu_gp, lu_gp_group); core_alua_free_lu_gp(lu_gp); } static struct configfs_item_operations target_core_alua_lu_gp_ops = { .release = target_core_alua_lu_gp_release, .show_attribute = target_core_alua_lu_gp_attr_show, .store_attribute = target_core_alua_lu_gp_attr_store, }; static struct config_item_type target_core_alua_lu_gp_cit = { .ct_item_ops = &target_core_alua_lu_gp_ops, .ct_attrs = target_core_alua_lu_gp_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ static struct config_group *target_core_alua_create_lu_gp( struct config_group *group, const char *name) { struct t10_alua_lu_gp *lu_gp; struct config_group *alua_lu_gp_cg = NULL; struct config_item *alua_lu_gp_ci = NULL; lu_gp = core_alua_allocate_lu_gp(name, 0); if (IS_ERR(lu_gp)) return NULL; alua_lu_gp_cg = &lu_gp->lu_gp_group; alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; config_group_init_type_name(alua_lu_gp_cg, name, &target_core_alua_lu_gp_cit); pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" " Group: core/alua/lu_gps/%s\n", config_item_name(alua_lu_gp_ci)); return alua_lu_gp_cg; } static void target_core_alua_drop_lu_gp( struct config_group *group, struct config_item *item) { struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), struct t10_alua_lu_gp, lu_gp_group); pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" " Group: core/alua/lu_gps/%s, ID: %hu\n", config_item_name(item), lu_gp->lu_gp_id); /* * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() * -> target_core_alua_lu_gp_release() */ config_item_put(item); } static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { .make_group = &target_core_alua_create_lu_gp, .drop_item = &target_core_alua_drop_lu_gp, }; static struct config_item_type target_core_alua_lu_gps_cit = { .ct_item_ops = NULL, .ct_group_ops = &target_core_alua_lu_gps_group_ops, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp); #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \ static struct target_core_alua_tg_pt_gp_attribute \ target_core_alua_tg_pt_gp_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_alua_tg_pt_gp_show_attr_##_name, \ target_core_alua_tg_pt_gp_store_attr_##_name); #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \ static struct target_core_alua_tg_pt_gp_attribute \ target_core_alua_tg_pt_gp_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_alua_tg_pt_gp_show_attr_##_name); /* * alua_access_state */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state)); } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; unsigned long tmp; int new_state, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { pr_err("Unable to do implict ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } if (!(dev->dev_flags & DF_CONFIGURED)) { pr_err("Unable to set alua_access_state while device is" " not configured\n"); return -ENODEV; } ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access state from" " %s\n", page); return -EINVAL; } new_state = (int)tmp; if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { pr_err("Unable to process implict configfs ALUA" " transition while TPGS_IMPLICT_ALUA is disabled\n"); return -EINVAL; } ret = core_alua_do_port_transition(tg_pt_gp, dev, NULL, NULL, new_state, 0); return (!ret) ? count : -EINVAL; } SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR); /* * alua_access_status */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%s\n", core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int new_status, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { pr_err("Unable to do set ALUA access status on non" " valid tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access status" " from %s\n", page); return -EINVAL; } new_status = (int)tmp; if ((new_status != ALUA_STATUS_NONE) && (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; } tg_pt_gp->tg_pt_gp_alua_access_status = new_status; return count; } SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR); /* * alua_access_type */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return core_alua_show_access_type(tg_pt_gp, page); } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { return core_alua_store_access_type(tg_pt_gp, page, count); } SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); /* * alua_write_metadata */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { unsigned long tmp; int ret; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_write_metadata\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { pr_err("Illegal value for alua_write_metadata:" " %lu\n", tmp); return -EINVAL; } tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; return count; } SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR); /* * nonop_delay_msecs */ static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return core_alua_show_nonop_delay_msecs(tg_pt_gp, page); } static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); } SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR); /* * trans_delay_msecs */ static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return core_alua_show_trans_delay_msecs(tg_pt_gp, page); } static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); } SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* * implict_trans_secs */ static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return core_alua_show_implict_trans_secs(tg_pt_gp, page); } static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); } SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); /* * preferred */ static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { return core_alua_show_preferred_bit(tg_pt_gp, page); } static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { return core_alua_store_preferred_bit(tg_pt_gp, page, count); } SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR); /* * tg_pt_gp_id */ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { if (!tg_pt_gp->tg_pt_gp_valid_id) return 0; return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); } static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; unsigned long tg_pt_gp_id; int ret; ret = strict_strtoul(page, 0, &tg_pt_gp_id); if (ret < 0) { pr_err("strict_strtoul() returned %d for" " tg_pt_gp_id\n", ret); return -EINVAL; } if (tg_pt_gp_id > 0x0000ffff) { pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", tg_pt_gp_id); return -EINVAL; } ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); if (ret < 0) return -EINVAL; pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " "core/alua/tg_pt_gps/%s to ID: %hu\n", config_item_name(&alua_tg_pt_gp_cg->cg_item), tg_pt_gp->tg_pt_gp_id); return count; } SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR); /* * members */ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { struct se_port *port; struct se_portal_group *tpg; struct se_lun *lun; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ssize_t len = 0, cur_len; unsigned char buf[TG_PT_GROUP_NAME_BUF]; memset(buf, 0, TG_PT_GROUP_NAME_BUF); spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { port = tg_pt_gp_mem->tg_pt; tpg = port->sep_tpg; lun = port->sep_lun; cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } memcpy(page+len, buf, cur_len); len += cur_len; } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); return len; } SE_DEV_ALUA_TG_PT_ATTR_RO(members); CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp, tg_pt_gp_group); static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_access_state.attr, &target_core_alua_tg_pt_gp_alua_access_status.attr, &target_core_alua_tg_pt_gp_alua_access_type.attr, &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, &target_core_alua_tg_pt_gp_implict_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, NULL, }; static void target_core_alua_tg_pt_gp_release(struct config_item *item) { struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), struct t10_alua_tg_pt_gp, tg_pt_gp_group); core_alua_free_tg_pt_gp(tg_pt_gp); } static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { .release = target_core_alua_tg_pt_gp_release, .show_attribute = target_core_alua_tg_pt_gp_attr_show, .store_attribute = target_core_alua_tg_pt_gp_attr_store, }; static struct config_item_type target_core_alua_tg_pt_gp_cit = { .ct_item_ops = &target_core_alua_tg_pt_gp_ops, .ct_attrs = target_core_alua_tg_pt_gp_attrs, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ /* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ static struct config_group *target_core_alua_create_tg_pt_gp( struct config_group *group, const char *name) { struct t10_alua *alua = container_of(group, struct t10_alua, alua_tg_pt_gps_group); struct t10_alua_tg_pt_gp *tg_pt_gp; struct config_group *alua_tg_pt_gp_cg = NULL; struct config_item *alua_tg_pt_gp_ci = NULL; tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); if (!tg_pt_gp) return NULL; alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; config_group_init_type_name(alua_tg_pt_gp_cg, name, &target_core_alua_tg_pt_gp_cit); pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" " Group: alua/tg_pt_gps/%s\n", config_item_name(alua_tg_pt_gp_ci)); return alua_tg_pt_gp_cg; } static void target_core_alua_drop_tg_pt_gp( struct config_group *group, struct config_item *item) { struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), struct t10_alua_tg_pt_gp, tg_pt_gp_group); pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" " Group: alua/tg_pt_gps/%s, ID: %hu\n", config_item_name(item), tg_pt_gp->tg_pt_gp_id); /* * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() * -> target_core_alua_tg_pt_gp_release(). */ config_item_put(item); } static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { .make_group = &target_core_alua_create_tg_pt_gp, .drop_item = &target_core_alua_drop_tg_pt_gp, }; static struct config_item_type target_core_alua_tg_pt_gps_cit = { .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ /* Start functions for struct config_item_type target_core_alua_cit */ /* * target_core_alua_cit is a ConfigFS group that lives under * /sys/kernel/config/target/core/alua. There are default groups * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to * target_core_alua_cit in target_core_init_configfs() below. */ static struct config_item_type target_core_alua_cit = { .ct_item_ops = NULL, .ct_attrs = NULL, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_alua_cit */ /* Start functions for struct config_item_type target_core_stat_cit */ static struct config_group *target_core_stat_mkdir( struct config_group *group, const char *name) { return ERR_PTR(-ENOSYS); } static void target_core_stat_rmdir( struct config_group *group, struct config_item *item) { return; } static struct configfs_group_operations target_core_stat_group_ops = { .make_group = &target_core_stat_mkdir, .drop_item = &target_core_stat_rmdir, }; static struct config_item_type target_core_stat_cit = { .ct_group_ops = &target_core_stat_group_ops, .ct_owner = THIS_MODULE, }; /* End functions for struct config_item_type target_core_stat_cit */ /* Start functions for struct config_item_type target_core_hba_cit */ static struct config_group *target_core_make_subdev( struct config_group *group, const char *name) { struct t10_alua_tg_pt_gp *tg_pt_gp; struct se_subsystem_api *t; struct config_item *hba_ci = &group->cg_item; struct se_hba *hba = item_to_hba(hba_ci); struct se_device *dev; struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; struct config_group *dev_stat_grp = NULL; int errno = -ENOMEM, ret; ret = mutex_lock_interruptible(&hba->hba_access_mutex); if (ret) return ERR_PTR(ret); /* * Locate the struct se_subsystem_api from parent's struct se_hba. */ t = hba->transport; dev = target_alloc_device(hba, name); if (!dev) goto out_unlock; dev_cg = &dev->dev_group; dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6, GFP_KERNEL); if (!dev_cg->default_groups) goto out_free_device; config_group_init_type_name(dev_cg, name, &target_core_dev_cit); config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", &target_core_dev_attrib_cit); config_group_init_type_name(&dev->dev_pr_group, "pr", &target_core_dev_pr_cit); config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", &target_core_dev_wwn_cit); config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, "alua", &target_core_alua_tg_pt_gps_cit); config_group_init_type_name(&dev->dev_stat_grps.stat_group, "statistics", &target_core_stat_cit); dev_cg->default_groups[0] = &dev->dev_attrib.da_group; dev_cg->default_groups[1] = &dev->dev_pr_group; dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group; dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group; dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group; dev_cg->default_groups[5] = NULL; /* * Add core/$HBA/$DEV/alua/default_tg_pt_gp */ tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); if (!tg_pt_gp) goto out_free_dev_cg_default_groups; dev->t10_alua.default_tg_pt_gp = tg_pt_gp; tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group; tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!tg_pt_gp_cg->default_groups) { pr_err("Unable to allocate tg_pt_gp_cg->" "default_groups\n"); goto out_free_tg_pt_gp; } config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; tg_pt_gp_cg->default_groups[1] = NULL; /* * Add core/$HBA/$DEV/statistics/ default groups */ dev_stat_grp = &dev->dev_stat_grps.stat_group; dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4, GFP_KERNEL); if (!dev_stat_grp->default_groups) { pr_err("Unable to allocate dev_stat_grp->default_groups\n"); goto out_free_tg_pt_gp_cg_default_groups; } target_stat_setup_dev_default_groups(dev); mutex_unlock(&hba->hba_access_mutex); return dev_cg; out_free_tg_pt_gp_cg_default_groups: kfree(tg_pt_gp_cg->default_groups); out_free_tg_pt_gp: core_alua_free_tg_pt_gp(tg_pt_gp); out_free_dev_cg_default_groups: kfree(dev_cg->default_groups); out_free_device: target_free_device(dev); out_unlock: mutex_unlock(&hba->hba_access_mutex); return ERR_PTR(errno); } static void target_core_drop_subdev( struct config_group *group, struct config_item *item) { struct config_group *dev_cg = to_config_group(item); struct se_device *dev = container_of(dev_cg, struct se_device, dev_group); struct se_hba *hba; struct config_item *df_item; struct config_group *tg_pt_gp_cg, *dev_stat_grp; int i; hba = item_to_hba(&dev->se_hba->hba_group.cg_item); mutex_lock(&hba->hba_access_mutex); dev_stat_grp = &dev->dev_stat_grps.stat_group; for (i = 0; dev_stat_grp->default_groups[i]; i++) { df_item = &dev_stat_grp->default_groups[i]->cg_item; dev_stat_grp->default_groups[i] = NULL; config_item_put(df_item); } kfree(dev_stat_grp->default_groups); tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group; for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; tg_pt_gp_cg->default_groups[i] = NULL; config_item_put(df_item); } kfree(tg_pt_gp_cg->default_groups); /* * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp * directly from target_core_alua_tg_pt_gp_release(). */ dev->t10_alua.default_tg_pt_gp = NULL; for (i = 0; dev_cg->default_groups[i]; i++) { df_item = &dev_cg->default_groups[i]->cg_item; dev_cg->default_groups[i] = NULL; config_item_put(df_item); } /* * se_dev is released from target_core_dev_item_ops->release() */ config_item_put(item); mutex_unlock(&hba->hba_access_mutex); } static struct configfs_group_operations target_core_hba_group_ops = { .make_group = target_core_make_subdev, .drop_item = target_core_drop_subdev, }; CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba); #define SE_HBA_ATTR(_name, _mode) \ static struct target_core_hba_attribute \ target_core_hba_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_core_hba_show_attr_##_name, \ target_core_hba_store_attr_##_name); #define SE_HBA_ATTR_RO(_name) \ static struct target_core_hba_attribute \ target_core_hba_##_name = \ __CONFIGFS_EATTR_RO(_name, \ target_core_hba_show_attr_##_name); static ssize_t target_core_hba_show_attr_hba_info( struct se_hba *hba, char *page) { return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", hba->hba_id, hba->transport->name, TARGET_CORE_CONFIGFS_VERSION); } SE_HBA_ATTR_RO(hba_info); static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, char *page) { int hba_mode = 0; if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) hba_mode = 1; return sprintf(page, "%d\n", hba_mode); } static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, const char *page, size_t count) { struct se_subsystem_api *transport = hba->transport; unsigned long mode_flag; int ret; if (transport->pmode_enable_hba == NULL) return -EINVAL; ret = strict_strtoul(page, 0, &mode_flag); if (ret < 0) { pr_err("Unable to extract hba mode flag: %d\n", ret); return -EINVAL; } if (hba->dev_count) { pr_err("Unable to set hba_mode with active devices\n"); return -EINVAL; } ret = transport->pmode_enable_hba(hba, mode_flag); if (ret < 0) return -EINVAL; if (ret > 0) hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; else if (ret == 0) hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; return count; } SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); static void target_core_hba_release(struct config_item *item) { struct se_hba *hba = container_of(to_config_group(item), struct se_hba, hba_group); core_delete_hba(hba); } static struct configfs_attribute *target_core_hba_attrs[] = { &target_core_hba_hba_info.attr, &target_core_hba_hba_mode.attr, NULL, }; static struct configfs_item_operations target_core_hba_item_ops = { .release = target_core_hba_release, .show_attribute = target_core_hba_attr_show, .store_attribute = target_core_hba_attr_store, }; static struct config_item_type target_core_hba_cit = { .ct_item_ops = &target_core_hba_item_ops, .ct_group_ops = &target_core_hba_group_ops, .ct_attrs = target_core_hba_attrs, .ct_owner = THIS_MODULE, }; static struct config_group *target_core_call_addhbatotarget( struct config_group *group, const char *name) { char *se_plugin_str, *str, *str2; struct se_hba *hba; char buf[TARGET_CORE_NAME_MAX_LEN]; unsigned long plugin_dep_id = 0; int ret; memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { pr_err("Passed *name strlen(): %d exceeds" " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), TARGET_CORE_NAME_MAX_LEN); return ERR_PTR(-ENAMETOOLONG); } snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); str = strstr(buf, "_"); if (!str) { pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); return ERR_PTR(-EINVAL); } se_plugin_str = buf; /* * Special case for subsystem plugins that have "_" in their names. * Namely rd_direct and rd_mcp.. */ str2 = strstr(str+1, "_"); if (str2) { *str2 = '\0'; /* Terminate for *se_plugin_str */ str2++; /* Skip to start of plugin dependent ID */ str = str2; } else { *str = '\0'; /* Terminate for *se_plugin_str */ str++; /* Skip to start of plugin dependent ID */ } ret = strict_strtoul(str, 0, &plugin_dep_id); if (ret < 0) { pr_err("strict_strtoul() returned %d for" " plugin_dep_id\n", ret); return ERR_PTR(-EINVAL); } /* * Load up TCM subsystem plugins if they have not already been loaded. */ transport_subsystem_check_init(); hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); if (IS_ERR(hba)) return ERR_CAST(hba); config_group_init_type_name(&hba->hba_group, name, &target_core_hba_cit); return &hba->hba_group; } static void target_core_call_delhbafromtarget( struct config_group *group, struct config_item *item) { /* * core_delete_hba() is called from target_core_hba_item_ops->release() * -> target_core_hba_release() */ config_item_put(item); } static struct configfs_group_operations target_core_group_ops = { .make_group = target_core_call_addhbatotarget, .drop_item = target_core_call_delhbafromtarget, }; static struct config_item_type target_core_cit = { .ct_item_ops = NULL, .ct_group_ops = &target_core_group_ops, .ct_attrs = NULL, .ct_owner = THIS_MODULE, }; /* Stop functions for struct config_item_type target_core_hba_cit */ static int __init target_core_init_configfs(void) { struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; struct configfs_subsystem *subsys; struct t10_alua_lu_gp *lu_gp; int ret; pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); subsys = target_core_subsystem[0]; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); ret = init_se_kmem_caches(); if (ret < 0) return ret; /* * Create $CONFIGFS/target/core default group for HBA <-> Storage Object * and ALUA Logical Unit Group and Target Port Group infrastructure. */ target_cg = &subsys->su_group; target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); ret = -ENOMEM; goto out_global; } config_group_init_type_name(&target_core_hbagroup, "core", &target_core_cit); target_cg->default_groups[0] = &target_core_hbagroup; target_cg->default_groups[1] = NULL; /* * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ */ hba_cg = &target_core_hbagroup; hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!hba_cg->default_groups) { pr_err("Unable to allocate hba_cg->default_groups\n"); ret = -ENOMEM; goto out_global; } config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); hba_cg->default_groups[0] = &alua_group; hba_cg->default_groups[1] = NULL; /* * Add ALUA Logical Unit Group and Target Port Group ConfigFS * groups under /sys/kernel/config/target/core/alua/ */ alua_cg = &alua_group; alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!alua_cg->default_groups) { pr_err("Unable to allocate alua_cg->default_groups\n"); ret = -ENOMEM; goto out_global; } config_group_init_type_name(&alua_lu_gps_group, "lu_gps", &target_core_alua_lu_gps_cit); alua_cg->default_groups[0] = &alua_lu_gps_group; alua_cg->default_groups[1] = NULL; /* * Add core/alua/lu_gps/default_lu_gp */ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); if (IS_ERR(lu_gp)) { ret = -ENOMEM; goto out_global; } lu_gp_cg = &alua_lu_gps_group; lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!lu_gp_cg->default_groups) { pr_err("Unable to allocate lu_gp_cg->default_groups\n"); ret = -ENOMEM; goto out_global; } config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", &target_core_alua_lu_gp_cit); lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; lu_gp_cg->default_groups[1] = NULL; default_lu_gp = lu_gp; /* * Register the target_core_mod subsystem with configfs. */ ret = configfs_register_subsystem(subsys); if (ret < 0) { pr_err("Error %d while registering subsystem %s\n", ret, subsys->su_group.cg_item.ci_namebuf); goto out_global; } pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); /* * Register built-in RAMDISK subsystem logic for virtual LUN 0 */ ret = rd_module_init(); if (ret < 0) goto out; ret = core_dev_setup_virtual_lun0(); if (ret < 0) goto out; return 0; out: configfs_unregister_subsystem(subsys); core_dev_release_virtual_lun0(); rd_module_exit(); out_global: if (default_lu_gp) { core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; } if (lu_gp_cg) kfree(lu_gp_cg->default_groups); if (alua_cg) kfree(alua_cg->default_groups); if (hba_cg) kfree(hba_cg->default_groups); kfree(target_cg->default_groups); release_se_kmem_caches(); return ret; } static void __exit target_core_exit_configfs(void) { struct configfs_subsystem *subsys; struct config_group *hba_cg, *alua_cg, *lu_gp_cg; struct config_item *item; int i; subsys = target_core_subsystem[0]; lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; lu_gp_cg->default_groups[i] = NULL; config_item_put(item); } kfree(lu_gp_cg->default_groups); lu_gp_cg->default_groups = NULL; alua_cg = &alua_group; for (i = 0; alua_cg->default_groups[i]; i++) { item = &alua_cg->default_groups[i]->cg_item; alua_cg->default_groups[i] = NULL; config_item_put(item); } kfree(alua_cg->default_groups); alua_cg->default_groups = NULL; hba_cg = &target_core_hbagroup; for (i = 0; hba_cg->default_groups[i]; i++) { item = &hba_cg->default_groups[i]->cg_item; hba_cg->default_groups[i] = NULL; config_item_put(item); } kfree(hba_cg->default_groups); hba_cg->default_groups = NULL; /* * We expect subsys->su_group.default_groups to be released * by configfs subsystem provider logic.. */ configfs_unregister_subsystem(subsys); kfree(subsys->su_group.default_groups); core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); core_dev_release_virtual_lun0(); rd_module_exit(); release_se_kmem_caches(); } MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); MODULE_AUTHOR("nab@Linux-iSCSI.org"); MODULE_LICENSE("GPL"); module_init(target_core_init_configfs); module_exit(target_core_exit_configfs);
gpl-2.0
sunnyden/ubuntu_kernel
drivers/hsi/clients/hsi_char.c
1863
19854
/* * HSI character device driver, implements the character device * interface. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Andras Domokos <andras.domokos@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/errno.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/kmemleak.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/scatterlist.h> #include <linux/stat.h> #include <linux/hsi/hsi.h> #include <linux/hsi/hsi_char.h> #define HSC_DEVS 16 /* Num of channels */ #define HSC_MSGS 4 #define HSC_RXBREAK 0 #define HSC_ID_BITS 6 #define HSC_PORT_ID_BITS 4 #define HSC_ID_MASK 3 #define HSC_PORT_ID_MASK 3 #define HSC_CH_MASK 0xf /* * We support up to 4 controllers that can have up to 4 * ports, which should currently be more than enough. */ #define HSC_BASEMINOR(id, port_id) \ ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \ (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS)) enum { HSC_CH_OPEN, HSC_CH_READ, HSC_CH_WRITE, HSC_CH_WLINE, }; enum { HSC_RX, HSC_TX, }; struct hsc_client_data; /** * struct hsc_channel - hsi_char internal channel data * @ch: channel number * @flags: Keeps state of the channel (open/close, reading, writing) * @free_msgs_list: List of free HSI messages/requests * @rx_msgs_queue: List of pending RX requests * @tx_msgs_queue: List of pending TX requests * @lock: Serialize access to the lists * @cl: reference to the associated hsi_client * @cl_data: reference to the client data that this channels belongs to * @rx_wait: RX requests wait queue * @tx_wait: TX requests wait queue */ struct hsc_channel { unsigned int ch; unsigned long flags; struct list_head free_msgs_list; struct list_head rx_msgs_queue; struct list_head tx_msgs_queue; spinlock_t lock; struct hsi_client *cl; struct hsc_client_data *cl_data; wait_queue_head_t rx_wait; wait_queue_head_t tx_wait; }; /** * struct hsc_client_data - hsi_char internal client data * @cdev: Characther device associated to the hsi_client * @lock: Lock to serialize open/close access * @flags: Keeps track of port state (rx hwbreak armed) * @usecnt: Use count for claiming the HSI port (mutex protected) * @cl: Referece to the HSI client * @channels: Array of channels accessible by the client */ struct hsc_client_data { struct cdev cdev; struct mutex lock; unsigned long flags; unsigned int usecnt; struct hsi_client *cl; struct hsc_channel channels[HSC_DEVS]; }; /* Stores the major number dynamically allocated for hsi_char */ static unsigned int hsc_major; /* Maximum buffer size that hsi_char will accept from userspace */ static unsigned int max_data_size = 0x1000; module_param(max_data_size, uint, 0); MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, struct list_head *queue) { unsigned long flags; spin_lock_irqsave(&channel->lock, flags); list_add_tail(&msg->link, queue); spin_unlock_irqrestore(&channel->lock, flags); } static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel, struct list_head *queue) { struct hsi_msg *msg = NULL; unsigned long flags; spin_lock_irqsave(&channel->lock, flags); if (list_empty(queue)) goto out; msg = list_first_entry(queue, struct hsi_msg, link); list_del(&msg->link); out: spin_unlock_irqrestore(&channel->lock, flags); return msg; } static inline void hsc_msg_free(struct hsi_msg *msg) { kfree(sg_virt(msg->sgt.sgl)); hsi_free_msg(msg); } static void hsc_free_list(struct list_head *list) { struct hsi_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, list, link) { list_del(&msg->link); hsc_msg_free(msg); } } static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l) { unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&channel->lock, flags); list_splice_init(l, &list); spin_unlock_irqrestore(&channel->lock, flags); hsc_free_list(&list); } static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size) { struct hsi_msg *msg; void *buf; msg = hsi_alloc_msg(1, GFP_KERNEL); if (!msg) goto out; buf = kmalloc(alloc_size, GFP_KERNEL); if (!buf) { hsi_free_msg(msg); goto out; } sg_init_one(msg->sgt.sgl, buf, alloc_size); /* Ignore false positive, due to sg pointer handling */ kmemleak_ignore(buf); return msg; out: return NULL; } static inline int hsc_msgs_alloc(struct hsc_channel *channel) { struct hsi_msg *msg; int i; for (i = 0; i < HSC_MSGS; i++) { msg = hsc_msg_alloc(max_data_size); if (!msg) goto out; msg->channel = channel->ch; list_add_tail(&msg->link, &channel->free_msgs_list); } return 0; out: hsc_free_list(&channel->free_msgs_list); return -ENOMEM; } static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg) { return msg->sgt.sgl->length; } static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len) { msg->sgt.sgl->length = len; } static void hsc_rx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_READ, &channel->flags)) { hsc_add_tail(channel, msg, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_rx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_rx_completed(msg); } static void hsc_tx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_WRITE, &channel->flags)) { hsc_add_tail(channel, msg, &channel->tx_msgs_queue); wake_up(&channel->tx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_tx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_tx_completed(msg); } static void hsc_break_req_destructor(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsi_free_msg(msg); clear_bit(HSC_RXBREAK, &cl_data->flags); } static void hsc_break_received(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels; int i, ret; /* Broadcast HWBREAK on all channels */ for (i = 0; i < HSC_DEVS; i++, channel++) { struct hsi_msg *msg2; if (!test_bit(HSC_CH_READ, &channel->flags)) continue; msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg2) continue; clear_bit(HSC_CH_READ, &channel->flags); hsc_msg_len_set(msg2, 0); msg2->status = HSI_STATUS_COMPLETED; hsc_add_tail(channel, msg2, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } hsi_flush(msg->cl); ret = hsi_async_read(msg->cl, msg); if (ret < 0) hsc_break_req_destructor(msg); } static int hsc_break_request(struct hsi_client *cl) { struct hsc_client_data *cl_data = hsi_client_drvdata(cl); struct hsi_msg *msg; int ret; if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags)) return -EBUSY; msg = hsi_alloc_msg(0, GFP_KERNEL); if (!msg) { clear_bit(HSC_RXBREAK, &cl_data->flags); return -ENOMEM; } msg->break_frame = 1; msg->complete = hsc_break_received; msg->destructor = hsc_break_req_destructor; ret = hsi_async_read(cl, msg); if (ret < 0) hsc_break_req_destructor(msg); return ret; } static int hsc_break_send(struct hsi_client *cl) { struct hsi_msg *msg; int ret; msg = hsi_alloc_msg(0, GFP_ATOMIC); if (!msg) return -ENOMEM; msg->break_frame = 1; msg->complete = hsi_free_msg; msg->destructor = hsi_free_msg; ret = hsi_async_write(cl, msg); if (ret < 0) hsi_free_msg(msg); return ret; } static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) { struct hsi_config tmp; int ret; if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS)) return -EINVAL; if (rxc->channels & (rxc->channels - 1)) return -EINVAL; if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE)) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; cl->rx_cfg.num_hw_channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { cl->rx_cfg = tmp; return ret; } if (rxc->mode == HSI_MODE_FRAME) hsc_break_request(cl); return ret; } static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; rxc->channels = cl->rx_cfg.num_hw_channels; rxc->flow = cl->rx_cfg.flow; } static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) { struct hsi_config tmp; int ret; if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((txc->channels == 0) || (txc->channels > HSC_DEVS)) return -EINVAL; if (txc->channels & (txc->channels - 1)) return -EINVAL; if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO)) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; cl->tx_cfg.num_hw_channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); if (ret < 0) { cl->tx_cfg = tmp; return ret; } return ret; } static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; txc->channels = cl->tx_cfg.num_hw_channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if (len == 0) return 0; if (!IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->rx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { ret = -ENOSPC; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_rx_completed; msg->destructor = hsc_rx_msg_destructor; ret = hsi_async_read(channel->cl, msg); if (ret < 0) { hsc_add_tail(channel, msg, &channel->free_msgs_list); goto out; } ret = wait_event_interruptible(channel->rx_wait, !list_empty(&channel->rx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_READ, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue); if (msg) { if (msg->status != HSI_STATUS_ERROR) { ret = copy_to_user((void __user *)buf, sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg)); if (ret) ret = -EFAULT; else ret = hsc_msg_len_get(msg); } else { ret = -EIO; } hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_READ, &channel->flags); return ret; } static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if ((len == 0) || !IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->tx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { clear_bit(HSC_CH_WRITE, &channel->flags); return -ENOSPC; } if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) { ret = -EFAULT; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_tx_completed; msg->destructor = hsc_tx_msg_destructor; ret = hsi_async_write(channel->cl, msg); if (ret < 0) goto out; ret = wait_event_interruptible(channel->tx_wait, !list_empty(&channel->tx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_WRITE, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue); if (msg) { if (msg->status == HSI_STATUS_ERROR) ret = -EIO; else ret = hsc_msg_len_get(msg); hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_WRITE, &channel->flags); return ret; } static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hsc_channel *channel = file->private_data; unsigned int state; struct hsc_rx_config rxc; struct hsc_tx_config txc; long ret = 0; switch (cmd) { case HSC_RESET: hsi_flush(channel->cl); break; case HSC_SET_PM: if (copy_from_user(&state, (void __user *)arg, sizeof(state))) return -EFAULT; if (state == HSC_PM_DISABLE) { if (test_and_set_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_start_tx(channel->cl); } else if (state == HSC_PM_ENABLE) { if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_stop_tx(channel->cl); } else { ret = -EINVAL; } break; case HSC_SEND_BREAK: return hsc_break_send(channel->cl); case HSC_SET_RX: if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc))) return -EFAULT; return hsc_rx_set(channel->cl, &rxc); case HSC_GET_RX: hsc_rx_get(channel->cl, &rxc); if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc))) return -EFAULT; break; case HSC_SET_TX: if (copy_from_user(&txc, (void __user *)arg, sizeof(txc))) return -EFAULT; return hsc_tx_set(channel->cl, &txc); case HSC_GET_TX: hsc_tx_get(channel->cl, &txc); if (copy_to_user((void __user *)arg, &txc, sizeof(txc))) return -EFAULT; break; default: return -ENOIOCTLCMD; } return ret; } static inline void __hsc_port_release(struct hsc_client_data *cl_data) { BUG_ON(cl_data->usecnt == 0); if (--cl_data->usecnt == 0) { hsi_flush(cl_data->cl); hsi_release_port(cl_data->cl); } } static int hsc_open(struct inode *inode, struct file *file) { struct hsc_client_data *cl_data; struct hsc_channel *channel; int ret = 0; pr_debug("open, minor = %d\n", iminor(inode)); cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev); mutex_lock(&cl_data->lock); channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK); if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) { ret = -EBUSY; goto out; } /* * Check if we have already claimed the port associated to the HSI * client. If not then try to claim it, else increase its refcount */ if (cl_data->usecnt == 0) { ret = hsi_claim_port(cl_data->cl, 0); if (ret < 0) goto out; hsi_setup(cl_data->cl); } cl_data->usecnt++; ret = hsc_msgs_alloc(channel); if (ret < 0) { __hsc_port_release(cl_data); goto out; } file->private_data = channel; mutex_unlock(&cl_data->lock); return ret; out: mutex_unlock(&cl_data->lock); return ret; } static int hsc_release(struct inode *inode __maybe_unused, struct file *file) { struct hsc_channel *channel = file->private_data; struct hsc_client_data *cl_data = channel->cl_data; mutex_lock(&cl_data->lock); file->private_data = NULL; if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) hsi_stop_tx(channel->cl); __hsc_port_release(cl_data); hsc_reset_list(channel, &channel->rx_msgs_queue); hsc_reset_list(channel, &channel->tx_msgs_queue); hsc_reset_list(channel, &channel->free_msgs_list); clear_bit(HSC_CH_READ, &channel->flags); clear_bit(HSC_CH_WRITE, &channel->flags); clear_bit(HSC_CH_OPEN, &channel->flags); wake_up(&channel->rx_wait); wake_up(&channel->tx_wait); mutex_unlock(&cl_data->lock); return 0; } static const struct file_operations hsc_fops = { .owner = THIS_MODULE, .read = hsc_read, .write = hsc_write, .unlocked_ioctl = hsc_ioctl, .open = hsc_open, .release = hsc_release, }; static void hsc_channel_init(struct hsc_channel *channel) { init_waitqueue_head(&channel->rx_wait); init_waitqueue_head(&channel->tx_wait); spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->free_msgs_list); INIT_LIST_HEAD(&channel->rx_msgs_queue); INIT_LIST_HEAD(&channel->tx_msgs_queue); } static int hsc_probe(struct device *dev) { const char devname[] = "hsi_char"; struct hsc_client_data *cl_data; struct hsc_channel *channel; struct hsi_client *cl = to_hsi_client(dev); unsigned int hsc_baseminor; dev_t hsc_dev; int ret; int i; cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL); if (!cl_data) { dev_err(dev, "Could not allocate hsc_client_data\n"); return -ENOMEM; } hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl)); if (!hsc_major) { ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor, HSC_DEVS, devname); if (ret == 0) hsc_major = MAJOR(hsc_dev); } else { hsc_dev = MKDEV(hsc_major, hsc_baseminor); ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname); } if (ret < 0) { dev_err(dev, "Device %s allocation failed %d\n", hsc_major ? "minor" : "major", ret); goto out1; } mutex_init(&cl_data->lock); hsi_client_set_drvdata(cl, cl_data); cdev_init(&cl_data->cdev, &hsc_fops); cl_data->cdev.owner = THIS_MODULE; cl_data->cl = cl; for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) { hsc_channel_init(channel); channel->ch = i; channel->cl = cl; channel->cl_data = cl_data; } /* 1 hsi client -> N char devices (one for each channel) */ ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS); if (ret) { dev_err(dev, "Could not add char device %d\n", ret); goto out2; } return 0; out2: unregister_chrdev_region(hsc_dev, HSC_DEVS); out1: kfree(cl_data); return ret; } static int hsc_remove(struct device *dev) { struct hsi_client *cl = to_hsi_client(dev); struct hsc_client_data *cl_data = hsi_client_drvdata(cl); dev_t hsc_dev = cl_data->cdev.dev; cdev_del(&cl_data->cdev); unregister_chrdev_region(hsc_dev, HSC_DEVS); hsi_client_set_drvdata(cl, NULL); kfree(cl_data); return 0; } static struct hsi_client_driver hsc_driver = { .driver = { .name = "hsi_char", .owner = THIS_MODULE, .probe = hsc_probe, .remove = hsc_remove, }, }; static int __init hsc_init(void) { int ret; if ((max_data_size < 4) || (max_data_size > 0x10000) || (max_data_size & (max_data_size - 1))) { pr_err("Invalid max read/write data size"); return -EINVAL; } ret = hsi_register_client_driver(&hsc_driver); if (ret) { pr_err("Error while registering HSI/SSI driver %d", ret); return ret; } pr_info("HSI/SSI char device loaded\n"); return 0; } module_init(hsc_init); static void __exit hsc_exit(void) { hsi_unregister_client_driver(&hsc_driver); pr_info("HSI char device removed\n"); } module_exit(hsc_exit); MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>"); MODULE_ALIAS("hsi:hsi_char"); MODULE_DESCRIPTION("HSI character device"); MODULE_LICENSE("GPL v2");
gpl-2.0
Zex/linux
drivers/media/tuners/fc0012.c
2119
12044
/* * Fitipower FC0012 tuner driver * * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fc0012.h" #include "fc0012-priv.h" static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val) { u8 buf[2] = {reg, val}; struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { dev_err(&priv->i2c->dev, "%s: I2C write reg failed, reg: %02x, val: %02x\n", KBUILD_MODNAME, reg, val); return -EREMOTEIO; } return 0; } static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; if (i2c_transfer(priv->i2c, msg, 2) != 2) { dev_err(&priv->i2c->dev, "%s: I2C read reg failed, reg: %02x\n", KBUILD_MODNAME, reg); return -EREMOTEIO; } return 0; } static int fc0012_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int fc0012_init(struct dvb_frontend *fe) { struct fc0012_priv *priv = fe->tuner_priv; int i, ret = 0; unsigned char reg[] = { 0x00, /* dummy reg. 0 */ 0x05, /* reg. 0x01 */ 0x10, /* reg. 0x02 */ 0x00, /* reg. 0x03 */ 0x00, /* reg. 0x04 */ 0x0f, /* reg. 0x05: may also be 0x0a */ 0x00, /* reg. 0x06: divider 2, VCO slow */ 0x00, /* reg. 0x07: may also be 0x0f */ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256, Loop Bw 1/8 */ 0x6e, /* reg. 0x09: Disable LoopThrough, Enable LoopThrough: 0x6f */ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */ 0x82, /* reg. 0x0b: Output Clock is same as clock frequency, may also be 0x83 */ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */ 0x02, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, 0x02 for DVB-T */ 0x00, /* reg. 0x0e */ 0x00, /* reg. 0x0f */ 0x00, /* reg. 0x10: may also be 0x0d */ 0x00, /* reg. 0x11 */ 0x1f, /* reg. 0x12: Set to maximum gain */ 0x08, /* reg. 0x13: Set to Middle Gain: 0x08, Low Gain: 0x00, High Gain: 0x10, enable IX2: 0x80 */ 0x00, /* reg. 0x14 */ 0x04, /* reg. 0x15: Enable LNA COMPS */ }; switch (priv->cfg->xtal_freq) { case FC_XTAL_27_MHZ: case FC_XTAL_28_8_MHZ: reg[0x07] |= 0x20; break; case FC_XTAL_36_MHZ: default: break; } if (priv->cfg->dual_master) reg[0x0c] |= 0x02; if (priv->cfg->loop_through) reg[0x09] |= 0x01; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ for (i = 1; i < sizeof(reg); i++) { ret = fc0012_writereg(priv, i, reg[i]); if (ret) break; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (ret) dev_err(&priv->i2c->dev, "%s: fc0012_writereg failed: %d\n", KBUILD_MODNAME, ret); return ret; } static int fc0012_set_params(struct dvb_frontend *fe) { struct fc0012_priv *priv = fe->tuner_priv; int i, ret = 0; struct dtv_frontend_properties *p = &fe->dtv_property_cache; u32 freq = p->frequency / 1000; u32 delsys = p->delivery_system; unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1)); if (ret) goto exit; } switch (priv->cfg->xtal_freq) { case FC_XTAL_27_MHZ: xtal_freq_khz_2 = 27000 / 2; break; case FC_XTAL_36_MHZ: xtal_freq_khz_2 = 36000 / 2; break; case FC_XTAL_28_8_MHZ: default: xtal_freq_khz_2 = 28800 / 2; break; } /* select frequency divider and the frequency of VCO */ if (freq < 37084) { /* freq * 96 < 3560000 */ multi = 96; reg[5] = 0x82; reg[6] = 0x00; } else if (freq < 55625) { /* freq * 64 < 3560000 */ multi = 64; reg[5] = 0x82; reg[6] = 0x02; } else if (freq < 74167) { /* freq * 48 < 3560000 */ multi = 48; reg[5] = 0x42; reg[6] = 0x00; } else if (freq < 111250) { /* freq * 32 < 3560000 */ multi = 32; reg[5] = 0x42; reg[6] = 0x02; } else if (freq < 148334) { /* freq * 24 < 3560000 */ multi = 24; reg[5] = 0x22; reg[6] = 0x00; } else if (freq < 222500) { /* freq * 16 < 3560000 */ multi = 16; reg[5] = 0x22; reg[6] = 0x02; } else if (freq < 296667) { /* freq * 12 < 3560000 */ multi = 12; reg[5] = 0x12; reg[6] = 0x00; } else if (freq < 445000) { /* freq * 8 < 3560000 */ multi = 8; reg[5] = 0x12; reg[6] = 0x02; } else if (freq < 593334) { /* freq * 6 < 3560000 */ multi = 6; reg[5] = 0x0a; reg[6] = 0x00; } else { multi = 4; reg[5] = 0x0a; reg[6] = 0x02; } f_vco = freq * multi; if (f_vco >= 3060000) { reg[6] |= 0x08; vco_select = true; } if (freq >= 45000) { /* From divided value (XDIV) determined the FA and FP value */ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2); if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2)) xdiv++; pm = (unsigned char)(xdiv / 8); am = (unsigned char)(xdiv - (8 * pm)); if (am < 2) { reg[1] = am + 8; reg[2] = pm - 1; } else { reg[1] = am; reg[2] = pm; } } else { /* fix for frequency less than 45 MHz */ reg[1] = 0x06; reg[2] = 0x11; } /* fix clock out */ reg[6] |= 0x20; /* From VCO frequency determines the XIN ( fractional part of Delta Sigma PLL) and divided value (XDIV) */ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2); xin = (xin << 15) / xtal_freq_khz_2; if (xin >= 16384) xin += 32768; reg[3] = xin >> 8; /* xin with 9 bit resolution */ reg[4] = xin & 0xff; if (delsys == SYS_DVBT) { reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */ switch (p->bandwidth_hz) { case 6000000: reg[6] |= 0x80; break; case 7000000: reg[6] |= 0x40; break; case 8000000: default: break; } } else { dev_err(&priv->i2c->dev, "%s: modulation type not supported!\n", KBUILD_MODNAME); return -EINVAL; } /* modified for Realtek demod */ reg[5] |= 0x07; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ for (i = 1; i <= 6; i++) { ret = fc0012_writereg(priv, i, reg[i]); if (ret) goto exit; } /* VCO Calibration */ ret = fc0012_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x00); /* VCO Re-Calibration if needed */ if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x00); if (!ret) { msleep(10); ret = fc0012_readreg(priv, 0x0e, &tmp); } if (ret) goto exit; /* vco selection */ tmp &= 0x3f; if (vco_select) { if (tmp > 0x3c) { reg[6] &= ~0x08; ret = fc0012_writereg(priv, 0x06, reg[6]); if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x00); } } else { if (tmp < 0x02) { reg[6] |= 0x08; ret = fc0012_writereg(priv, 0x06, reg[6]); if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0012_writereg(priv, 0x0e, 0x00); } } priv->frequency = p->frequency; priv->bandwidth = p->bandwidth_hz; exit: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (ret) dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n", KBUILD_MODNAME, __func__, ret); return ret; } static int fc0012_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct fc0012_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int fc0012_get_if_frequency(struct dvb_frontend *fe, u32 *frequency) { *frequency = 0; /* Zero-IF */ return 0; } static int fc0012_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct fc0012_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } #define INPUT_ADC_LEVEL -8 static int fc0012_get_rf_strength(struct dvb_frontend *fe, u16 *strength) { struct fc0012_priv *priv = fe->tuner_priv; int ret; unsigned char tmp; int int_temp, lna_gain, int_lna, tot_agc_gain, power; const int fc0012_lna_gain_table[] = { /* low gain */ -63, -58, -99, -73, -63, -65, -54, -60, /* middle gain */ 71, 70, 68, 67, 65, 63, 61, 58, /* high gain */ 197, 191, 188, 186, 184, 182, 181, 179, }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ ret = fc0012_writereg(priv, 0x12, 0x00); if (ret) goto err; ret = fc0012_readreg(priv, 0x12, &tmp); if (ret) goto err; int_temp = tmp; ret = fc0012_readreg(priv, 0x13, &tmp); if (ret) goto err; lna_gain = tmp & 0x1f; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (lna_gain < ARRAY_SIZE(fc0012_lna_gain_table)) { int_lna = fc0012_lna_gain_table[lna_gain]; tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 + (int_temp & 0x1f)) * 2; power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10; if (power >= 45) *strength = 255; /* 100% */ else if (power < -95) *strength = 0; else *strength = (power + 95) * 255 / 140; *strength |= *strength << 8; } else { ret = -1; } goto exit; err: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ exit: if (ret) dev_warn(&priv->i2c->dev, "%s: %s failed: %d\n", KBUILD_MODNAME, __func__, ret); return ret; } static const struct dvb_tuner_ops fc0012_tuner_ops = { .info = { .name = "Fitipower FC0012", .frequency_min = 37000000, /* estimate */ .frequency_max = 862000000, /* estimate */ .frequency_step = 0, }, .release = fc0012_release, .init = fc0012_init, .set_params = fc0012_set_params, .get_frequency = fc0012_get_frequency, .get_if_frequency = fc0012_get_if_frequency, .get_bandwidth = fc0012_get_bandwidth, .get_rf_strength = fc0012_get_rf_strength, }; struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct fc0012_config *cfg) { struct fc0012_priv *priv; int ret; u8 chip_id; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); priv = kzalloc(sizeof(struct fc0012_priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME); goto err; } priv->cfg = cfg; priv->i2c = i2c; /* check if the tuner is there */ ret = fc0012_readreg(priv, 0x00, &chip_id); if (ret < 0) goto err; dev_dbg(&i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id); switch (chip_id) { case 0xa1: break; default: ret = -ENODEV; goto err; } dev_info(&i2c->dev, "%s: Fitipower FC0012 successfully identified\n", KBUILD_MODNAME); if (priv->cfg->loop_through) { ret = fc0012_writereg(priv, 0x09, 0x6f); if (ret < 0) goto err; } /* * TODO: Clock out en or div? * For dual tuner configuration clearing bit [0] is required. */ if (priv->cfg->clock_out) { ret = fc0012_writereg(priv, 0x0b, 0x82); if (ret < 0) goto err; } fe->tuner_priv = priv; memcpy(&fe->ops.tuner_ops, &fc0012_tuner_ops, sizeof(struct dvb_tuner_ops)); err: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (ret) { dev_dbg(&i2c->dev, "%s: failed: %d\n", __func__, ret); kfree(priv); return NULL; } return fe; } EXPORT_SYMBOL(fc0012_attach); MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver"); MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.6");
gpl-2.0
cholokei/msm8660_test_kernel-1
drivers/scsi/lpfc/lpfc_debugfs.c
2375
85507
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* * debugfs interface * * To access this interface the user should: * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: * /sys/kernel/debug/lpfc/fnX/vportY * where X is the lpfc hba function unique_id * where Y is the vport VPI on that hba * * Debugging services available per vport: * discovery_trace * This is an ACSII readable file that contains a trace of the last * lpfc_debugfs_max_disc_trc events that happened on a specific vport. * See lpfc_debugfs.h for different categories of discovery events. * To enable the discovery trace, the following module parameters must be set: * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for * EACH vport. X MUST also be a power of 2. * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in * lpfc_debugfs.h . * * slow_ring_trace * This is an ACSII readable file that contains a trace of the last * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA. * To enable the slow ring trace, the following module parameters must be set: * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for * the HBA. X MUST also be a power of 2. */ static int lpfc_debugfs_enable = 1; module_param(lpfc_debugfs_enable, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_disc_trc; module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_slow_ring_trc; module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); static int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); #include <linux/debugfs.h> static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; /* iDiag */ static struct lpfc_idiag idiag; /** * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer * @vport: The vport to gather the log info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine gathers the lpfc discovery debugfs data from the @vport and * dumps it to @buf up to @size number of bytes. It will start at the next entry * in the log and process the log until the end of the buffer. Then it will * gather from the beginning of the log and process until the current entry. * * Notes: * Discovery logging will be disabled while while this routine dumps the log. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) { int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; len = 0; index = (atomic_read(&vport->disc_trc_cnt) + 1) & (lpfc_debugfs_max_disc_trc - 1); for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { dtp = vport->disc_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += snprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { dtp = vport->disc_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += snprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } lpfc_debugfs_enable = enable; return len; } /** * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer * @phba: The HBA to gather the log info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine gathers the lpfc slow ring debugfs data from the @phba and * dumps it to @buf up to @size number of bytes. It will start at the next entry * in the log and process the log until the end of the buffer. Then it will * gather from the beginning of the log and process until the current entry. * * Notes: * Slow ring logging will be disabled while while this routine dumps the log. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) { int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; len = 0; index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & (lpfc_debugfs_max_slow_ring_trc - 1); for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { dtp = phba->slow_ring_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += snprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { dtp = phba->slow_ring_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += snprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } lpfc_debugfs_enable = enable; return len; } static int lpfc_debugfs_last_hbq = -1; /** * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer * @phba: The HBA to gather host buffer info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the host buffer queue info from the @phba to @buf up to * @size number of bytes. A header that describes the current hbq state will be * dumped to @buf first and then info on each hbq entry will be dumped to @buf * until @size bytes have been dumped or all the hbq info has been dumped. * * Notes: * This routine will rotate through each configured HBQ each time called. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int cnt, i, j, found, posted, low; uint32_t phys, raw_index, getidx; struct lpfc_hbq_init *hip; struct hbq_s *hbqs; struct lpfc_hbq_entry *hbqe; struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; if (phba->sli_rev != 3) return 0; cnt = LPFC_HBQINFO_SIZE; spin_lock_irq(&phba->hbalock); /* toggle between multiple hbqs, if any */ i = lpfc_sli_hbq_count(); if (i > 1) { lpfc_debugfs_last_hbq++; if (lpfc_debugfs_last_hbq >= i) lpfc_debugfs_last_hbq = 0; } else lpfc_debugfs_last_hbq = 0; i = lpfc_debugfs_last_hbq; len += snprintf(buf+len, size-len, "HBQ %d Info\n", i); hbqs = &phba->hbqs[i]; posted = 0; list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) posted++; hip = lpfc_hbq_defs[i]; len += snprintf(buf+len, size-len, "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n", hip->hbq_index, hip->profile, hip->rn, hip->buffer_count, hip->init_count, hip->add_count, posted); raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); len += snprintf(buf+len, size-len, "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; for (j=0; j<hbqs->entry_count; j++) { len += snprintf(buf+len, size-len, "%03d: %08x %04x %05x ", j, le32_to_cpu(hbqe->bde.addrLow), le32_to_cpu(hbqe->bde.tus.w), le32_to_cpu(hbqe->buffer_tag)); i = 0; found = 0; /* First calculate if slot has an associated posted buffer */ low = hbqs->hbqPutIdx - posted; if (low >= 0) { if ((j >= hbqs->hbqPutIdx) || (j < low)) { len += snprintf(buf+len, size-len, "Unused\n"); goto skipit; } } else { if ((j >= hbqs->hbqPutIdx) && (j < (hbqs->entry_count+low))) { len += snprintf(buf+len, size-len, "Unused\n"); goto skipit; } } /* Get the Buffer info for the posted buffer */ list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); if (phys == le32_to_cpu(hbqe->bde.addrLow)) { len += snprintf(buf+len, size-len, "Buf%d: %p %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); found = 1; break; } i++; } if (!found) { len += snprintf(buf+len, size-len, "No DMAinfo?\n"); } skipit: hbqe++; if (len > LPFC_HBQINFO_SIZE - 54) break; } spin_unlock_irq(&phba->hbalock); return len; } static int lpfc_debugfs_last_hba_slim_off; /** * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer * @phba: The HBA to gather SLIM info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current contents of HBA SLIM for the HBA associated * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data. * * Notes: * This routine will only dump up to 1024 bytes of data each time called and * should be called multiple times to dump the entire HBA SLIM. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int i, off; uint32_t *ptr; char buffer[1024]; off = 0; spin_lock_irq(&phba->hbalock); len += snprintf(buf+len, size-len, "HBA SLIM\n"); lpfc_memcpy_from_slim(buffer, phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024); ptr = (uint32_t *)&buffer[0]; off = lpfc_debugfs_last_hba_slim_off; /* Set it up for the next time */ lpfc_debugfs_last_hba_slim_off += 1024; if (lpfc_debugfs_last_hba_slim_off >= 4096) lpfc_debugfs_last_hba_slim_off = 0; i = 1024; while (i > 0) { len += snprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } spin_unlock_irq(&phba->hbalock); return len; } /** * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer * @phba: The HBA to gather Host SLIM info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current contents of host SLIM for the host associated * with @phba to @buf up to @size bytes of data. The dump will contain the * Mailbox, PCB, Rings, and Registers that are located in host memory. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int i, off; uint32_t word0, word1, word2, word3; uint32_t *ptr; struct lpfc_pgp *pgpp; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; off = 0; spin_lock_irq(&phba->hbalock); len += snprintf(buf+len, size-len, "SLIM Mailbox\n"); ptr = (uint32_t *)phba->slim2p.virt; i = sizeof(MAILBOX_t); while (i > 0) { len += snprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } len += snprintf(buf+len, size-len, "SLIM PCB\n"); ptr = (uint32_t *)phba->pcb; i = sizeof(PCB_t); while (i > 0) { len += snprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } for (i = 0; i < 4; i++) { pgpp = &phba->port_gp[i]; pring = &psli->ring[i]; len += snprintf(buf+len, size-len, "Ring %d: CMD GetInx:%d (Max:%d Next:%d " "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", i, pgpp->cmdGetInx, pring->numCiocb, pring->next_cmdidx, pring->local_getidx, pring->flag, pgpp->rspPutInx, pring->numRiocb); } if (phba->sli_rev <= LPFC_SLI_REV3) { word0 = readl(phba->HAregaddr); word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); word3 = readl(phba->HCregaddr); len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " "HC:%08x\n", word0, word1, word2, word3); } spin_unlock_irq(&phba->hbalock); return len; } /** * lpfc_debugfs_nodelist_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current target node list associated with @vport to * @buf up to @size bytes of data. Each node entry in the dump will contain a * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) { int len = 0; int cnt; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; unsigned char *statep, *name; cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!cnt) { len += snprintf(buf+len, size-len, "Missing Nodelist Entries\n"); break; } cnt--; switch (ndlp->nlp_state) { case NLP_STE_UNUSED_NODE: statep = "UNUSED"; break; case NLP_STE_PLOGI_ISSUE: statep = "PLOGI "; break; case NLP_STE_ADISC_ISSUE: statep = "ADISC "; break; case NLP_STE_REG_LOGIN_ISSUE: statep = "REGLOG"; break; case NLP_STE_PRLI_ISSUE: statep = "PRLI "; break; case NLP_STE_UNMAPPED_NODE: statep = "UNMAP "; break; case NLP_STE_MAPPED_NODE: statep = "MAPPED"; break; case NLP_STE_NPR_NODE: statep = "NPR "; break; default: statep = "UNKNOWN"; } len += snprintf(buf+len, size-len, "%s DID:x%06x ", statep, ndlp->nlp_DID); name = (unsigned char *)&ndlp->nlp_portname; len += snprintf(buf+len, size-len, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7)); name = (unsigned char *)&ndlp->nlp_nodename; len += snprintf(buf+len, size-len, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7)); len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ", ndlp->nlp_rpi, ndlp->nlp_flag); if (!ndlp->nlp_type) len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) len += snprintf(buf+len, size-len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) len += snprintf(buf+len, size-len, "FABRIC "); if (ndlp->nlp_type & NLP_FCP_TARGET) len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) len += snprintf(buf+len, size-len, "FCP_INITIATOR "); len += snprintf(buf+len, size-len, "usgmap:%x ", ndlp->nlp_usg_map); len += snprintf(buf+len, size-len, "refcnt:%x", atomic_read(&ndlp->kref.refcount)); len += snprintf(buf+len, size-len, "\n"); } spin_unlock_irq(shost->host_lock); return len; } #endif /** * lpfc_debugfs_disc_trc - Store discovery trace log * @vport: The vport to associate this trace string with for retrieval. * @mask: Log entry classification. * @fmt: Format string to be displayed when dumping the log. * @data1: 1st data parameter to be applied to @fmt. * @data2: 2nd data parameter to be applied to @fmt. * @data3: 3rd data parameter to be applied to @fmt. * * Description: * This routine is used by the driver code to add a debugfs log entry to the * discovery trace buffer associated with @vport. Only entries with a @mask that * match the current debugfs discovery mask will be saved. Entries that do not * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like * printf when displaying the log. **/ inline void lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, uint32_t data1, uint32_t data2, uint32_t data3) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_debugfs_trc *dtp; int index; if (!(lpfc_debugfs_mask_disc_trc & mask)) return; if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc || !vport || !vport->disc_trc) return; index = atomic_inc_return(&vport->disc_trc_cnt) & (lpfc_debugfs_max_disc_trc - 1); dtp = vport->disc_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; } /** * lpfc_debugfs_slow_ring_trc - Store slow ring trace log * @phba: The phba to associate this trace string with for retrieval. * @fmt: Format string to be displayed when dumping the log. * @data1: 1st data parameter to be applied to @fmt. * @data2: 2nd data parameter to be applied to @fmt. * @data3: 3rd data parameter to be applied to @fmt. * * Description: * This routine is used by the driver code to add a debugfs log entry to the * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and * @data3 are used like printf when displaying the log. **/ inline void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, uint32_t data1, uint32_t data2, uint32_t data3) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_debugfs_trc *dtp; int index; if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc || !phba || !phba->slow_ring_trc) return; index = atomic_inc_return(&phba->slow_ring_trc_cnt) & (lpfc_debugfs_max_slow_ring_trc - 1); dtp = phba->slow_ring_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_debugfs_disc_trc_open - Open the discovery trace log * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int size; int rc = -ENOMEM; if (!lpfc_debugfs_max_disc_trc) { rc = -ENOSPC; goto out; } debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); size = PAGE_ALIGN(size); debug->buffer = kmalloc(size, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int size; int rc = -ENOMEM; if (!lpfc_debugfs_max_slow_ring_trc) { rc = -ENOSPC; goto out; } debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); size = PAGE_ALIGN(size); debug->buffer = kmalloc(size, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer, LPFC_HBQINFO_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, LPFC_DUMPHBASLIM_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, LPFC_DUMPHOSTSLIM_SIZE); file->private_data = debug; rc = 0; out: return rc; } static int lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) { struct lpfc_debug *debug; int rc = -ENOMEM; if (!_dump_buf_data) return -EBUSY; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", __func__, _dump_buf_data); debug->buffer = _dump_buf_data; if (!debug->buffer) { kfree(debug); goto out; } debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT; file->private_data = debug; rc = 0; out: return rc; } static int lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) { struct lpfc_debug *debug; int rc = -ENOMEM; if (!_dump_buf_dif) return -EBUSY; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", __func__, _dump_buf_dif, file->f_dentry->d_name.name); debug->buffer = _dump_buf_dif; if (!debug->buffer) { kfree(debug); goto out; } debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { /* * The Data/DIF buffers only save one failing IO * The write op is used as a reset mechanism after an IO has * already been saved to the next one can be saved */ spin_lock(&_dump_buf_lock); memset((void *)_dump_buf_data, 0, ((1 << PAGE_SHIFT) << _dump_buf_data_order)); memset((void *)_dump_buf_dif, 0, ((1 << PAGE_SHIFT) << _dump_buf_dif_order)); _dump_buf_done = 0; spin_unlock(&_dump_buf_lock); return nbytes; } /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return an negative * error value. **/ static int lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer, LPFC_NODELIST_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_lseek - Seek through a debugfs file * @file: The file pointer to seek through. * @off: The offset to seek to or the amount to seek by. * @whence: Indicates how to seek. * * Description: * This routine is the entry point for the debugfs lseek file operation. The * @whence parameter indicates whether @off is the offset to directly seek to, * or if it is a value to seek forward or reverse by. This function figures out * what the new offset of the debugfs file will be and assigns that value to the * f_pos field of @file. * * Returns: * This function returns the new offset if successful and returns a negative * error if unable to process the seek. **/ static loff_t lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) { struct lpfc_debug *debug; loff_t pos = -1; debug = file->private_data; switch (whence) { case 0: pos = off; break; case 1: pos = file->f_pos + off; break; case 2: pos = debug->len - off; } return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); } /** * lpfc_debugfs_read - Read a debugfs file * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from from the buffer indicated in the private_data * field of @file. It will start reading at @ppos and copy up to @nbytes of * data to @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, debug->len); } /** * lpfc_debugfs_release - Release the buffer used to store debugfs file data * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine frees the buffer that was allocated when the debugfs file was * opened. * * Returns: * This function returns zero. **/ static int lpfc_debugfs_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; kfree(debug->buffer); kfree(debug); return 0; } static int lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; debug->buffer = NULL; kfree(debug); return 0; } /* * --------------------------------- * iDiag debugfs file access methods * --------------------------------- * * All access methods are through the proper SLI4 PCI function's debugfs * iDiag directory: * * /sys/kernel/debug/lpfc/fn<#>/iDiag */ /** * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space * @buf: The pointer to the user space buffer. * @nbytes: The number of bytes in the user space buffer. * @idiag_cmd: pointer to the idiag command struct. * * This routine reads data from debugfs user space buffer and parses the * buffer for getting the idiag command and arguments. The while space in * between the set of data is used as the parsing separator. * * This routine returns 0 when successful, it returns proper error code * back to the user space in error conditions. */ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, struct lpfc_idiag_cmd *idiag_cmd) { char mybuf[64]; char *pbuf, *step_str; int bsize, i; /* Protect copy from user */ if (!access_ok(VERIFY_READ, buf, nbytes)) return -EFAULT; memset(mybuf, 0, sizeof(mybuf)); memset(idiag_cmd, 0, sizeof(*idiag_cmd)); bsize = min(nbytes, (sizeof(mybuf)-1)); if (copy_from_user(mybuf, buf, bsize)) return -EFAULT; pbuf = &mybuf[0]; step_str = strsep(&pbuf, "\t "); /* The opcode must present */ if (!step_str) return -EINVAL; idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); if (idiag_cmd->opcode == 0) return -EINVAL; for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { step_str = strsep(&pbuf, "\t "); if (!step_str) return i; idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); } return i; } /** * lpfc_idiag_open - idiag open debugfs * @inode: The inode pointer that contains a pointer to phba. * @file: The file pointer to attach the file operation. * * Description: * This routine is the entry point for the debugfs open file operation. It * gets the reference to phba from the i_private field in @inode, it then * allocates buffer for the file operation, performs the necessary PCI config * space read into the allocated buffer according to the idiag user command * setup, and then returns a pointer to buffer in the private_data field in * @file. * * Returns: * This function returns zero if successful. On error it will return an * negative error value. **/ static int lpfc_idiag_open(struct inode *inode, struct file *file) { struct lpfc_debug *debug; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) return -ENOMEM; debug->i_private = inode->i_private; debug->buffer = NULL; file->private_data = debug; return 0; } /** * lpfc_idiag_release - Release idiag access file operation * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine is the generic release routine for the idiag access file * operation, it frees the buffer that was allocated when the debugfs file * was opened. * * Returns: * This function returns zero. **/ static int lpfc_idiag_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; /* Free the buffers to the file operation */ kfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_idiag_cmd_release - Release idiag cmd access file operation * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine frees the buffer that was allocated when the debugfs file * was opened. It also reset the fields in the idiag command struct in the * case of command for write operation. * * Returns: * This function returns zero. **/ static int lpfc_idiag_cmd_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; if (debug->op == LPFC_IDIAG_OP_WR) { switch (idiag.cmd.opcode) { case LPFC_IDIAG_CMD_PCICFG_WR: case LPFC_IDIAG_CMD_PCICFG_ST: case LPFC_IDIAG_CMD_PCICFG_CL: case LPFC_IDIAG_CMD_QUEACC_WR: case LPFC_IDIAG_CMD_QUEACC_ST: case LPFC_IDIAG_CMD_QUEACC_CL: memset(&idiag, 0, sizeof(idiag)); break; default: break; } } /* Free the buffers to the file operation */ kfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba pci config space according to the * idiag command, and copies to user @buf. Depending on the PCI config space * read command setup, it does either a single register read of a byte * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all * registers from the 4K extended PCI config space. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; int where, count; char *pbuffer; struct pci_dev *pdev; uint32_t u32val; uint16_t u16val; uint8_t u8val; pdev = phba->pcidev; if (!pdev) return 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { where = idiag.cmd.data[0]; count = idiag.cmd.data[1]; } else return 0; /* Read single PCI config space register */ switch (count) { case SIZE_U8: /* byte (8 bits) */ pci_read_config_byte(pdev, where, &u8val); len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %02x\n", where, u8val); break; case SIZE_U16: /* word (16 bits) */ pci_read_config_word(pdev, where, &u16val); len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %04x\n", where, u16val); break; case SIZE_U32: /* double word (32 bits) */ pci_read_config_dword(pdev, where, &u32val); len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %08x\n", where, u32val); break; case LPFC_PCI_CFG_BROWSE: /* browse all */ goto pcicfg_browse; break; default: /* illegal count */ len = 0; break; } return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); pcicfg_browse: /* Browse all PCI config space registers */ offset_label = idiag.offset.last_rd; offset = offset_label; /* Read PCI config space */ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: ", offset_label); while (index > 0) { pci_read_config_dword(pdev, offset, &u32val); len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%08x ", u32val); offset += sizeof(uint32_t); index -= sizeof(uint32_t); if (!index) len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n%03x: ", offset_label); } } /* Set up the offset for next portion of pci cfg read */ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) idiag.offset.last_rd = 0; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and * then perform the syntax check for PCI config space read or write command * accordingly. In the case of PCI config space read command, it sets up * the command in the idiag command struct for the debugfs read operation. * In the case of PCI config space write operation, it executes the write * operation into the PCI config space accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. */ static ssize_t lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t where, value, count; uint32_t u32val; uint16_t u16val; uint8_t u8val; struct pci_dev *pdev; int rc; pdev = phba->pcidev; if (!pdev) return -EFAULT; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { /* Sanity check on PCI config read command line arguments */ if (rc != LPFC_PCI_CFG_RD_CMD_ARG) goto error_out; /* Read command from PCI config space, set up command fields */ where = idiag.cmd.data[0]; count = idiag.cmd.data[1]; if (count == LPFC_PCI_CFG_BROWSE) { if (where % sizeof(uint32_t)) goto error_out; /* Starting offset to browse */ idiag.offset.last_rd = where; } else if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && (count != sizeof(uint32_t))) goto error_out; if (count == sizeof(uint8_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) goto error_out; if (where % sizeof(uint8_t)) goto error_out; } if (count == sizeof(uint16_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) goto error_out; if (where % sizeof(uint16_t)) goto error_out; } if (count == sizeof(uint32_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) goto error_out; if (where % sizeof(uint32_t)) goto error_out; } } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { /* Sanity check on PCI config write command line arguments */ if (rc != LPFC_PCI_CFG_WR_CMD_ARG) goto error_out; /* Write command to PCI config space, read-modify-write */ where = idiag.cmd.data[0]; count = idiag.cmd.data[1]; value = idiag.cmd.data[2]; /* Sanity checks */ if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && (count != sizeof(uint32_t))) goto error_out; if (count == sizeof(uint8_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) goto error_out; if (where % sizeof(uint8_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_byte(pdev, where, (uint8_t)value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_byte(pdev, where, &u8val); if (!rc) { u8val |= (uint8_t)value; pci_write_config_byte(pdev, where, u8val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_byte(pdev, where, &u8val); if (!rc) { u8val &= (uint8_t)(~value); pci_write_config_byte(pdev, where, u8val); } } } if (count == sizeof(uint16_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) goto error_out; if (where % sizeof(uint16_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_word(pdev, where, (uint16_t)value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_word(pdev, where, &u16val); if (!rc) { u16val |= (uint16_t)value; pci_write_config_word(pdev, where, u16val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_word(pdev, where, &u16val); if (!rc) { u16val &= (uint16_t)(~value); pci_write_config_word(pdev, where, u16val); } } } if (count == sizeof(uint32_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) goto error_out; if (where % sizeof(uint32_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_dword(pdev, where, value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_dword(pdev, where, &u32val); if (!rc) { u32val |= value; pci_write_config_dword(pdev, where, u32val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_dword(pdev, where, &u32val); if (!rc) { u32val &= ~value; pci_write_config_dword(pdev, where, u32val); } } } } else /* All other opecodes are illegal for now */ goto error_out; return nbytes; error_out: memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_queinfo_read - idiag debugfs read queue information * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba SLI4 PCI function queue information, * and copies to user @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; int len = 0, fcp_qidx; char *pbuffer; if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; /* Get slow-path event queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path EQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tEQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.sp_eq->queue_id, phba->sli4_hba.sp_eq->entry_count, phba->sli4_hba.sp_eq->entry_size, phba->sli4_hba.sp_eq->host_index, phba->sli4_hba.sp_eq->hba_index); /* Get fast-path event queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Fast-path EQ information:\n"); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tEQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, phba->sli4_hba.fp_eq[fcp_qidx]->host_index, phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); } len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); /* Get mailbox complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path MBX CQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated EQID[%02d]:\n", phba->sli4_hba.mbx_cq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tCQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.mbx_cq->queue_id, phba->sli4_hba.mbx_cq->entry_count, phba->sli4_hba.mbx_cq->entry_size, phba->sli4_hba.mbx_cq->host_index, phba->sli4_hba.mbx_cq->hba_index); /* Get slow-path complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path ELS CQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated EQID[%02d]:\n", phba->sli4_hba.els_cq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tCQID [%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.els_cq->entry_count, phba->sli4_hba.els_cq->entry_size, phba->sli4_hba.els_cq->host_index, phba->sli4_hba.els_cq->hba_index); /* Get fast-path complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Fast-path FCP CQ information:\n"); fcp_qidx = 0; do { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated EQID[%02d]:\n", phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tCQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); } while (++fcp_qidx < phba->cfg_fcp_eq_count); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); /* Get mailbox queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path MBX MQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated CQID[%02d]:\n", phba->sli4_hba.mbx_wq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tWQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.mbx_wq->queue_id, phba->sli4_hba.mbx_wq->entry_count, phba->sli4_hba.mbx_wq->entry_size, phba->sli4_hba.mbx_wq->host_index, phba->sli4_hba.mbx_wq->hba_index); /* Get slow-path work queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path ELS WQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated CQID[%02d]:\n", phba->sli4_hba.els_wq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tWQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_wq->entry_count, phba->sli4_hba.els_wq->entry_size, phba->sli4_hba.els_wq->host_index, phba->sli4_hba.els_wq->hba_index); /* Get fast-path work queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Fast-path FCP WQ information:\n"); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated CQID[%02d]:\n", phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tWQID[%02d], " "QE-COUNT[%04d], WQE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); } len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); /* Get receive queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path RQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Associated CQID[%02d]:\n", phba->sli4_hba.hdr_rq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tHQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.hdr_rq->entry_count, phba->sli4_hba.hdr_rq->entry_size, phba->sli4_hba.hdr_rq->host_index, phba->sli4_hba.hdr_rq->hba_index); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\tDQID[%02d], " "QE-COUNT[%04d], QE-SIZE[%04d], " "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.dat_rq->entry_count, phba->sli4_hba.dat_rq->entry_size, phba->sli4_hba.dat_rq->host_index, phba->sli4_hba.dat_rq->hba_index); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_que_param_check - queue access command parameter sanity check * @q: The pointer to queue structure. * @index: The index into a queue entry. * @count: The number of queue entries to access. * * Description: * The routine performs sanity check on device queue access method commands. * * Returns: * This function returns -EINVAL when fails the sanity check, otherwise, it * returns 0. **/ static int lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count) { /* Only support single entry read or browsing */ if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE)) return -EINVAL; if (index > q->entry_count - 1) return -EINVAL; return 0; } /** * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index * @pbuffer: The pointer to buffer to copy the read data into. * @pque: The pointer to the queue to be read. * @index: The index into the queue entry. * * Description: * This routine reads out a single entry from the given queue's index location * and copies it into the buffer provided. * * Returns: * This function returns 0 when it fails, otherwise, it returns the length of * the data read into the buffer provided. **/ static int lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, uint32_t index) { int offset, esize; uint32_t *pentry; if (!pbuffer || !pque) return 0; esize = pque->entry_size; len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "QE-INDEX[%04d]:\n", index); offset = 0; pentry = pque->qe[index].address; while (esize > 0) { len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "%08x ", *pentry); pentry++; offset += sizeof(uint32_t); esize -= sizeof(uint32_t); if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); } len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); return len; } /** * lpfc_idiag_queacc_read - idiag debugfs read port queue * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba device queue memory according to the * idiag command, and copies to user @buf. Depending on the queue dump read * command setup, it does either a single queue entry read or browing through * all entries of the queue. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; uint32_t last_index, index, count; struct lpfc_queue *pque = NULL; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { index = idiag.cmd.data[2]; count = idiag.cmd.data[3]; pque = (struct lpfc_queue *)idiag.ptr_private; } else return 0; /* Browse the queue starting from index */ if (count == LPFC_QUE_ACC_BROWSE) goto que_browse; /* Read a single entry from the queue */ len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); que_browse: /* Browse all entries from the queue */ last_index = idiag.offset.last_rd; index = last_index; while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) { len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); index++; if (index > pque->entry_count - 1) break; } /* Set up the offset for next portion of pci cfg read */ if (index > pque->entry_count - 1) index = 0; idiag.offset.last_rd = index; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for port queue read (dump) or write (set) command * accordingly. In the case of port queue read command, it sets up the command * in the idiag command struct for the following debugfs read operation. In * the case of port queue write operation, it executes the write operation * into the port queue entry accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_queacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t qidx, quetp, queid, index, count, offset, value; uint32_t *pentry; struct lpfc_queue *pque; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Get and sanity check on command feilds */ quetp = idiag.cmd.data[0]; queid = idiag.cmd.data[1]; index = idiag.cmd.data[2]; count = idiag.cmd.data[3]; offset = idiag.cmd.data[4]; value = idiag.cmd.data[5]; /* Sanity check on command line arguments */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { if (rc != LPFC_QUE_ACC_WR_CMD_ARG) goto error_out; if (count != 1) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { if (rc != LPFC_QUE_ACC_RD_CMD_ARG) goto error_out; } else goto error_out; switch (quetp) { case LPFC_IDIAG_EQ: /* Slow-path event queue */ if (phba->sli4_hba.sp_eq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.sp_eq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.sp_eq; goto pass_check; } /* Fast-path event queue */ for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.fp_eq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; goto pass_check; } } goto error_out; break; case LPFC_IDIAG_CQ: /* MBX complete queue */ if (phba->sli4_hba.mbx_cq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.mbx_cq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.mbx_cq; goto pass_check; } /* ELS complete queue */ if (phba->sli4_hba.els_cq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.els_cq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.els_cq; goto pass_check; } /* FCP complete queue */ qidx = 0; do { if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.fcp_cq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.fcp_cq[qidx]; goto pass_check; } } while (++qidx < phba->cfg_fcp_eq_count); goto error_out; break; case LPFC_IDIAG_MQ: /* MBX work queue */ if (phba->sli4_hba.mbx_wq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.mbx_wq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.mbx_wq; goto pass_check; } break; case LPFC_IDIAG_WQ: /* ELS work queue */ if (phba->sli4_hba.els_wq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.els_wq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.els_wq; goto pass_check; } /* FCP work queue */ for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.fcp_wq[qidx], index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.fcp_wq[qidx]; goto pass_check; } } goto error_out; break; case LPFC_IDIAG_RQ: /* HDR queue */ if (phba->sli4_hba.hdr_rq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.hdr_rq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.hdr_rq; goto pass_check; } /* DAT queue */ if (phba->sli4_hba.dat_rq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.dat_rq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.dat_rq; goto pass_check; } goto error_out; break; default: goto error_out; break; } pass_check: if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { if (count == LPFC_QUE_ACC_BROWSE) idiag.offset.last_rd = index; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { /* Additional sanity checks on write operation */ pque = (struct lpfc_queue *)idiag.ptr_private; if (offset > pque->entry_size/sizeof(uint32_t) - 1) goto error_out; pentry = pque->qe[index].address; pentry += offset; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR) *pentry = value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST) *pentry |= value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) *pentry &= ~value; } return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register * @phba: The pointer to hba structure. * @pbuffer: The pointer to the buffer to copy the data to. * @len: The lenght of bytes to copied. * @drbregid: The id to doorbell registers. * * Description: * This routine reads a doorbell register and copies its content to the * user buffer pointed to by @pbuffer. * * Returns: * This function returns the amount of data that was copied into @pbuffer. **/ static int lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, int len, uint32_t drbregid) { if (!pbuffer) return 0; switch (drbregid) { case LPFC_DRB_EQCQ: len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "EQCQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.EQCQDBregaddr)); break; case LPFC_DRB_MQ: len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "MQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.MQDBregaddr)); break; case LPFC_DRB_WQ: len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "WQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.WQDBregaddr)); break; case LPFC_DRB_RQ: len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "RQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.RQDBregaddr)); break; default: break; } return len; } /** * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba device doorbell register according * to the idiag command, and copies to user @buf. Depending on the doorbell * register read command setup, it does either a single doorbell register * read or dump all doorbell registers. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t drb_reg_id, i; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) drb_reg_id = idiag.cmd.data[0]; else return 0; if (drb_reg_id == LPFC_DRB_ACC_ALL) for (i = 1; i <= LPFC_DRB_MAX; i++) len = lpfc_idiag_drbacc_read_reg(phba, pbuffer, len, i); else len = lpfc_idiag_drbacc_read_reg(phba, pbuffer, len, drb_reg_id); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for port doorbell register read (dump) or write * (set) command accordingly. In the case of port queue read command, it sets * up the command in the idiag command struct for the following debugfs read * operation. In the case of port doorbell register write operation, it * executes the write operation into the port doorbell register accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t drb_reg_id, value, reg_val; void __iomem *drb_reg; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Sanity check on command line arguments */ drb_reg_id = idiag.cmd.data[0]; value = idiag.cmd.data[1]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { if (rc != LPFC_DRB_ACC_WR_CMD_ARG) goto error_out; if (drb_reg_id > LPFC_DRB_MAX) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) { if (rc != LPFC_DRB_ACC_RD_CMD_ARG) goto error_out; if ((drb_reg_id > LPFC_DRB_MAX) && (drb_reg_id != LPFC_DRB_ACC_ALL)) goto error_out; } else goto error_out; /* Perform the write access operation */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { switch (drb_reg_id) { case LPFC_DRB_EQCQ: drb_reg = phba->sli4_hba.EQCQDBregaddr; break; case LPFC_DRB_MQ: drb_reg = phba->sli4_hba.MQDBregaddr; break; case LPFC_DRB_WQ: drb_reg = phba->sli4_hba.WQDBregaddr; break; case LPFC_DRB_RQ: drb_reg = phba->sli4_hba.RQDBregaddr; break; default: goto error_out; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR) reg_val = value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) { reg_val = readl(drb_reg); reg_val |= value; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { reg_val = readl(drb_reg); reg_val &= ~value; } writel(reg_val, drb_reg); readl(drb_reg); /* flush */ } return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, .open = lpfc_debugfs_disc_trc_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_nodelist static const struct file_operations lpfc_debugfs_op_nodelist = { .owner = THIS_MODULE, .open = lpfc_debugfs_nodelist_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_hbqinfo static const struct file_operations lpfc_debugfs_op_hbqinfo = { .owner = THIS_MODULE, .open = lpfc_debugfs_hbqinfo_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpHBASlim_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dumpHostSlim static const struct file_operations lpfc_debugfs_op_dumpHostSlim = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpHostSlim_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dumpData static const struct file_operations lpfc_debugfs_op_dumpData = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpData_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_dumpDataDif_write, .release = lpfc_debugfs_dumpDataDif_release, }; #undef lpfc_debugfs_op_dumpDif static const struct file_operations lpfc_debugfs_op_dumpDif = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpDif_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_dumpDataDif_write, .release = lpfc_debugfs_dumpDataDif_release, }; #undef lpfc_debugfs_op_slow_ring_trc static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { .owner = THIS_MODULE, .open = lpfc_debugfs_slow_ring_trc_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; static struct dentry *lpfc_debugfs_root = NULL; static atomic_t lpfc_debugfs_hba_count; /* * File operations for the iDiag debugfs */ #undef lpfc_idiag_op_pciCfg static const struct file_operations lpfc_idiag_op_pciCfg = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_pcicfg_read, .write = lpfc_idiag_pcicfg_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_queInfo static const struct file_operations lpfc_idiag_op_queInfo = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .read = lpfc_idiag_queinfo_read, .release = lpfc_idiag_release, }; #undef lpfc_idiag_op_queacc static const struct file_operations lpfc_idiag_op_queAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_queacc_read, .write = lpfc_idiag_queacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_drbacc static const struct file_operations lpfc_idiag_op_drbAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_drbacc_read, .write = lpfc_idiag_drbacc_write, .release = lpfc_idiag_cmd_release, }; #endif /** * lpfc_debugfs_initialize - Initialize debugfs for a vport * @vport: The vport pointer to initialize. * * Description: * When Debugfs is configured this routine sets up the lpfc debugfs file system. * If not already created, this routine will create the lpfc directory, and * lpfcX directory (for this HBA), and vportX directory for this vport. It will * also create each file used to access lpfc specific debugfs information. **/ inline void lpfc_debugfs_initialize(struct lpfc_vport *vport) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hba *phba = vport->phba; char name[64]; uint32_t num, i; if (!lpfc_debugfs_enable) return; /* Setup lpfc root directory */ if (!lpfc_debugfs_root) { lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); atomic_set(&lpfc_debugfs_hba_count, 0); if (!lpfc_debugfs_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0408 Cannot create debugfs root\n"); goto debug_failed; } } if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; /* Setup funcX directory for specific HBA PCI function */ snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); if (!phba->hba_debugfs_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0412 Cannot create debugfs hba\n"); goto debug_failed; } atomic_inc(&lpfc_debugfs_hba_count); atomic_set(&phba->debugfs_vport_count, 0); /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_hbqinfo); if (!phba->debug_hbqinfo) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0411 Cannot create debugfs hbqinfo\n"); goto debug_failed; } /* Setup dumpHBASlim */ if (phba->sli_rev < LPFC_SLI_REV4) { snprintf(name, sizeof(name), "dumpHBASlim"); phba->debug_dumpHBASlim = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHBASlim); if (!phba->debug_dumpHBASlim) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0413 Cannot create debugfs " "dumpHBASlim\n"); goto debug_failed; } } else phba->debug_dumpHBASlim = NULL; /* Setup dumpHostSlim */ if (phba->sli_rev < LPFC_SLI_REV4) { snprintf(name, sizeof(name), "dumpHostSlim"); phba->debug_dumpHostSlim = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHostSlim); if (!phba->debug_dumpHostSlim) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0414 Cannot create debugfs " "dumpHostSlim\n"); goto debug_failed; } } else phba->debug_dumpHBASlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); phba->debug_dumpData = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpData); if (!phba->debug_dumpData) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0800 Cannot create debugfs dumpData\n"); goto debug_failed; } /* Setup dumpDif */ snprintf(name, sizeof(name), "dumpDif"); phba->debug_dumpDif = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpDif); if (!phba->debug_dumpDif) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0801 Cannot create debugfs dumpDif\n"); goto debug_failed; } /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; if (num & lpfc_debugfs_max_slow_ring_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_slow_ring_trc; i = 0; while (num > 1) { num = num >> 1; i++; } lpfc_debugfs_max_slow_ring_trc = (1 << i); printk(KERN_ERR "lpfc_debugfs_max_disc_trc changed to " "%d\n", lpfc_debugfs_max_disc_trc); } } snprintf(name, sizeof(name), "slow_ring_trace"); phba->debug_slow_ring_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_slow_ring_trc); if (!phba->debug_slow_ring_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0415 Cannot create debugfs " "slow_ring_trace\n"); goto debug_failed; } if (!phba->slow_ring_trc) { phba->slow_ring_trc = kmalloc( (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc), GFP_KERNEL); if (!phba->slow_ring_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0416 Cannot create debugfs " "slow_ring buffer\n"); goto debug_failed; } atomic_set(&phba->slow_ring_trc_cnt, 0); memset(phba->slow_ring_trc, 0, (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc)); } } snprintf(name, sizeof(name), "vport%d", vport->vpi); if (!vport->vport_debugfs_root) { vport->vport_debugfs_root = debugfs_create_dir(name, phba->hba_debugfs_root); if (!vport->vport_debugfs_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0417 Can't create debugfs\n"); goto debug_failed; } atomic_inc(&phba->debugfs_vport_count); } if (lpfc_debugfs_max_disc_trc) { num = lpfc_debugfs_max_disc_trc - 1; if (num & lpfc_debugfs_max_disc_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_disc_trc; i = 0; while (num > 1) { num = num >> 1; i++; } lpfc_debugfs_max_disc_trc = (1 << i); printk(KERN_ERR "lpfc_debugfs_max_disc_trc changed to %d\n", lpfc_debugfs_max_disc_trc); } } vport->disc_trc = kzalloc( (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc), GFP_KERNEL); if (!vport->disc_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0418 Cannot create debugfs disc trace " "buffer\n"); goto debug_failed; } atomic_set(&vport->disc_trc_cnt, 0); snprintf(name, sizeof(name), "discovery_trace"); vport->debug_disc_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_disc_trc); if (!vport->debug_disc_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0419 Cannot create debugfs " "discovery_trace\n"); goto debug_failed; } snprintf(name, sizeof(name), "nodelist"); vport->debug_nodelist = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nodelist); if (!vport->debug_nodelist) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0409 Can't create debugfs nodelist\n"); goto debug_failed; } /* * iDiag debugfs root entry points for SLI4 device only */ if (phba->sli_rev < LPFC_SLI_REV4) goto debug_failed; snprintf(name, sizeof(name), "iDiag"); if (!phba->idiag_root) { phba->idiag_root = debugfs_create_dir(name, phba->hba_debugfs_root); if (!phba->idiag_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2922 Can't create idiag debugfs\n"); goto debug_failed; } /* Initialize iDiag data structure */ memset(&idiag, 0, sizeof(idiag)); } /* iDiag read PCI config space */ snprintf(name, sizeof(name), "pciCfg"); if (!phba->idiag_pci_cfg) { phba->idiag_pci_cfg = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); if (!phba->idiag_pci_cfg) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2923 Can't create idiag debugfs\n"); goto debug_failed; } idiag.offset.last_rd = 0; } /* iDiag get PCI function queue information */ snprintf(name, sizeof(name), "queInfo"); if (!phba->idiag_que_info) { phba->idiag_que_info = debugfs_create_file(name, S_IFREG|S_IRUGO, phba->idiag_root, phba, &lpfc_idiag_op_queInfo); if (!phba->idiag_que_info) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2924 Can't create idiag debugfs\n"); goto debug_failed; } } /* iDiag access PCI function queue */ snprintf(name, sizeof(name), "queAcc"); if (!phba->idiag_que_acc) { phba->idiag_que_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_queAcc); if (!phba->idiag_que_acc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2926 Can't create idiag debugfs\n"); goto debug_failed; } } /* iDiag access PCI function doorbell registers */ snprintf(name, sizeof(name), "drbAcc"); if (!phba->idiag_drb_acc) { phba->idiag_drb_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); if (!phba->idiag_drb_acc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2927 Can't create idiag debugfs\n"); goto debug_failed; } } debug_failed: return; #endif } /** * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport * @vport: The vport pointer to remove from debugfs. * * Description: * When Debugfs is configured this routine removes debugfs file system elements * that are specific to this vport. It also checks to see if there are any * users left for the debugfs directories associated with the HBA and driver. If * this is the last user of the HBA directory or driver directory then it will * remove those from the debugfs infrastructure as well. **/ inline void lpfc_debugfs_terminate(struct lpfc_vport *vport) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hba *phba = vport->phba; if (vport->disc_trc) { kfree(vport->disc_trc); vport->disc_trc = NULL; } if (vport->debug_disc_trc) { debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ vport->debug_disc_trc = NULL; } if (vport->debug_nodelist) { debugfs_remove(vport->debug_nodelist); /* nodelist */ vport->debug_nodelist = NULL; } if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ vport->vport_debugfs_root = NULL; atomic_dec(&phba->debugfs_vport_count); } if (atomic_read(&phba->debugfs_vport_count) == 0) { if (phba->debug_hbqinfo) { debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; } if (phba->debug_dumpHBASlim) { debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ phba->debug_dumpHBASlim = NULL; } if (phba->debug_dumpHostSlim) { debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ phba->debug_dumpHostSlim = NULL; } if (phba->debug_dumpData) { debugfs_remove(phba->debug_dumpData); /* dumpData */ phba->debug_dumpData = NULL; } if (phba->debug_dumpDif) { debugfs_remove(phba->debug_dumpDif); /* dumpDif */ phba->debug_dumpDif = NULL; } if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); phba->slow_ring_trc = NULL; } if (phba->debug_slow_ring_trc) { /* slow_ring_trace */ debugfs_remove(phba->debug_slow_ring_trc); phba->debug_slow_ring_trc = NULL; } /* * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->idiag_drb_acc) { /* iDiag drbAcc */ debugfs_remove(phba->idiag_drb_acc); phba->idiag_drb_acc = NULL; } if (phba->idiag_que_acc) { /* iDiag queAcc */ debugfs_remove(phba->idiag_que_acc); phba->idiag_que_acc = NULL; } if (phba->idiag_que_info) { /* iDiag queInfo */ debugfs_remove(phba->idiag_que_info); phba->idiag_que_info = NULL; } if (phba->idiag_pci_cfg) { /* iDiag pciCfg */ debugfs_remove(phba->idiag_pci_cfg); phba->idiag_pci_cfg = NULL; } /* Finally remove the iDiag debugfs root */ if (phba->idiag_root) { /* iDiag root */ debugfs_remove(phba->idiag_root); phba->idiag_root = NULL; } } if (phba->hba_debugfs_root) { debugfs_remove(phba->hba_debugfs_root); /* fnX */ phba->hba_debugfs_root = NULL; atomic_dec(&lpfc_debugfs_hba_count); } if (atomic_read(&lpfc_debugfs_hba_count) == 0) { debugfs_remove(lpfc_debugfs_root); /* lpfc */ lpfc_debugfs_root = NULL; } } #endif return; }
gpl-2.0
IxLabs/lguest64
drivers/acpi/tables.c
2375
10454
/* * acpi_tables.c - ACPI Boot-Time Table Parsing * * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/bootmem.h> #define PREFIX "ACPI: " #define ACPI_MAX_TABLES 128 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata; static int acpi_apic_instance __initdata; void acpi_table_print_madt_entry(struct acpi_subtable_header *header) { if (!header) return; switch (header->type) { case ACPI_MADT_TYPE_LOCAL_APIC: { struct acpi_madt_local_apic *p = (struct acpi_madt_local_apic *)header; printk(KERN_INFO PREFIX "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", p->processor_id, p->id, (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_MADT_TYPE_LOCAL_X2APIC: { struct acpi_madt_local_x2apic *p = (struct acpi_madt_local_x2apic *)header; printk(KERN_INFO PREFIX "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n", p->local_apic_id, p->uid, (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_MADT_TYPE_IO_APIC: { struct acpi_madt_io_apic *p = (struct acpi_madt_io_apic *)header; printk(KERN_INFO PREFIX "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", p->id, p->address, p->global_irq_base); } break; case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE: { struct acpi_madt_interrupt_override *p = (struct acpi_madt_interrupt_override *)header; printk(KERN_INFO PREFIX "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", p->bus, p->source_irq, p->global_irq, mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]); if (p->inti_flags & ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK)) printk(KERN_INFO PREFIX "INT_SRC_OVR unexpected reserved flags: 0x%x\n", p->inti_flags & ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK)); } break; case ACPI_MADT_TYPE_NMI_SOURCE: { struct acpi_madt_nmi_source *p = (struct acpi_madt_nmi_source *)header; printk(KERN_INFO PREFIX "NMI_SRC (%s %s global_irq %d)\n", mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], p->global_irq); } break; case ACPI_MADT_TYPE_LOCAL_APIC_NMI: { struct acpi_madt_local_apic_nmi *p = (struct acpi_madt_local_apic_nmi *)header; printk(KERN_INFO PREFIX "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n", p->processor_id, mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ], mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], p->lint); } break; case ACPI_MADT_TYPE_LOCAL_X2APIC_NMI: { u16 polarity, trigger; struct acpi_madt_local_x2apic_nmi *p = (struct acpi_madt_local_x2apic_nmi *)header; polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK; trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2; printk(KERN_INFO PREFIX "X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n", p->uid, mps_inti_flags_polarity[polarity], mps_inti_flags_trigger[trigger], p->lint); } break; case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: { struct acpi_madt_local_apic_override *p = (struct acpi_madt_local_apic_override *)header; printk(KERN_INFO PREFIX "LAPIC_ADDR_OVR (address[%p])\n", (void *)(unsigned long)p->address); } break; case ACPI_MADT_TYPE_IO_SAPIC: { struct acpi_madt_io_sapic *p = (struct acpi_madt_io_sapic *)header; printk(KERN_INFO PREFIX "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", p->id, (void *)(unsigned long)p->address, p->global_irq_base); } break; case ACPI_MADT_TYPE_LOCAL_SAPIC: { struct acpi_madt_local_sapic *p = (struct acpi_madt_local_sapic *)header; printk(KERN_INFO PREFIX "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", p->processor_id, p->id, p->eid, (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_MADT_TYPE_INTERRUPT_SOURCE: { struct acpi_madt_interrupt_source *p = (struct acpi_madt_interrupt_source *)header; printk(KERN_INFO PREFIX "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK], mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2], p->type, p->id, p->eid, p->io_sapic_vector, p->global_irq); } break; default: printk(KERN_WARNING PREFIX "Found unsupported MADT entry (type = 0x%x)\n", header->type); break; } } int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries) { struct acpi_table_header *table_header = NULL; struct acpi_subtable_header *entry; unsigned int count = 0; unsigned long table_end; acpi_size tbl_size; if (acpi_disabled) return -ENODEV; if (!handler) return -EINVAL; if (strncmp(id, ACPI_SIG_MADT, 4) == 0) acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size); else acpi_get_table_with_size(id, 0, &table_header, &tbl_size); if (!table_header) { printk(KERN_WARNING PREFIX "%4.4s not present\n", id); return -ENODEV; } table_end = (unsigned long)table_header + table_header->length; /* Parse all entries looking for a match. */ entry = (struct acpi_subtable_header *) ((unsigned long)table_header + table_size); while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < table_end) { if (entry->type == entry_id && (!max_entries || count++ < max_entries)) if (handler(entry, table_end)) goto err; /* * If entry->length is 0, break from this loop to avoid * infinite loop. */ if (entry->length == 0) { pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id); goto err; } entry = (struct acpi_subtable_header *) ((unsigned long)entry + entry->length); } if (max_entries && count > max_entries) { printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of " "%i found\n", id, entry_id, count - max_entries, count); } early_acpi_os_unmap_memory((char *)table_header, tbl_size); return count; err: early_acpi_os_unmap_memory((char *)table_header, tbl_size); return -EINVAL; } int __init acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries) { return acpi_table_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt), id, handler, max_entries); } /** * acpi_table_parse - find table with @id, run @handler on it * * @id: table id to find * @handler: handler to run * * Scan the ACPI System Descriptor Table (STD) for a table matching @id, * run @handler on it. Return 0 if table found, return on if not. */ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler) { struct acpi_table_header *table = NULL; acpi_size tbl_size; if (acpi_disabled) return -ENODEV; if (!handler) return -EINVAL; if (strncmp(id, ACPI_SIG_MADT, 4) == 0) acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size); else acpi_get_table_with_size(id, 0, &table, &tbl_size); if (table) { handler(table); early_acpi_os_unmap_memory(table, tbl_size); return 0; } else return 1; } /* * The BIOS is supposed to supply a single APIC/MADT, * but some report two. Provide a knob to use either. * (don't you wish instance 0 and 1 were not the same?) */ static void __init check_multiple_madt(void) { struct acpi_table_header *table = NULL; acpi_size tbl_size; acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size); if (table) { printk(KERN_WARNING PREFIX "BIOS bug: multiple APIC/MADT found," " using %d\n", acpi_apic_instance); printk(KERN_WARNING PREFIX "If \"acpi_apic_instance=%d\" works better, " "notify linux-acpi@vger.kernel.org\n", acpi_apic_instance ? 0 : 2); early_acpi_os_unmap_memory(table, tbl_size); } else acpi_apic_instance = 0; return; } /* * acpi_table_init() * * find RSDP, find and checksum SDT/XSDT. * checksum all tables, print SDT/XSDT * * result: sdt_entry[] is initialized */ int __init acpi_table_init(void) { acpi_status status; status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); if (ACPI_FAILURE(status)) return 1; check_multiple_madt(); return 0; } static int __init acpi_parse_apic_instance(char *str) { if (!str) return -EINVAL; acpi_apic_instance = simple_strtoul(str, NULL, 0); printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n", acpi_apic_instance); return 0; } early_param("acpi_apic_instance", acpi_parse_apic_instance);
gpl-2.0
hroark13/MS770v10c_krn
arch/cris/arch-v10/drivers/i2c.c
3143
14309
/*!*************************************************************************** *! *! FILE NAME : i2c.c *! *! DESCRIPTION: implements an interface for IIC/I2C, both directly from other *! kernel modules (i2c_writereg/readreg) and from userspace using *! ioctl()'s *! *! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN *! *!***************************************************************************/ /****************** INCLUDE FILES SECTION ***********************************/ #include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/init.h> #include <asm/etraxi2c.h> #include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/delay.h> #include <arch/io_interface_mux.h> #include "i2c.h" /****************** I2C DEFINITION SECTION *************************/ #define D(x) #define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */ static const char i2c_name[] = "i2c"; #define CLOCK_LOW_TIME 8 #define CLOCK_HIGH_TIME 8 #define START_CONDITION_HOLD_TIME 8 #define STOP_CONDITION_HOLD_TIME 8 #define ENABLE_OUTPUT 0x01 #define ENABLE_INPUT 0x00 #define I2C_CLOCK_HIGH 1 #define I2C_CLOCK_LOW 0 #define I2C_DATA_HIGH 1 #define I2C_DATA_LOW 0 #ifdef CONFIG_ETRAX_I2C_USES_PB_NOT_PB_I2C /* Use PB and not PB_I2C */ #ifndef CONFIG_ETRAX_I2C_DATA_PORT #define CONFIG_ETRAX_I2C_DATA_PORT 0 #endif #ifndef CONFIG_ETRAX_I2C_CLK_PORT #define CONFIG_ETRAX_I2C_CLK_PORT 1 #endif #define SDABIT CONFIG_ETRAX_I2C_DATA_PORT #define SCLBIT CONFIG_ETRAX_I2C_CLK_PORT #define i2c_enable() #define i2c_disable() /* enable or disable output-enable, to select output or input on the i2c bus */ #define i2c_dir_out() \ REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, SDABIT, 1) #define i2c_dir_in() \ REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, SDABIT, 0) /* control the i2c clock and data signals */ #define i2c_clk(x) \ REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, SCLBIT, x) #define i2c_data(x) \ REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, SDABIT, x) /* read a bit from the i2c interface */ #define i2c_getbit() (((*R_PORT_PB_READ & (1 << SDABIT))) >> SDABIT) #else /* enable or disable the i2c interface */ #define i2c_enable() *R_PORT_PB_I2C = (port_pb_i2c_shadow |= IO_MASK(R_PORT_PB_I2C, i2c_en)) #define i2c_disable() *R_PORT_PB_I2C = (port_pb_i2c_shadow &= ~IO_MASK(R_PORT_PB_I2C, i2c_en)) /* enable or disable output-enable, to select output or input on the i2c bus */ #define i2c_dir_out() \ *R_PORT_PB_I2C = (port_pb_i2c_shadow &= ~IO_MASK(R_PORT_PB_I2C, i2c_oe_)); \ REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, 0, 1); #define i2c_dir_in() \ *R_PORT_PB_I2C = (port_pb_i2c_shadow |= IO_MASK(R_PORT_PB_I2C, i2c_oe_)); \ REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, 0, 0); /* control the i2c clock and data signals */ #define i2c_clk(x) \ *R_PORT_PB_I2C = (port_pb_i2c_shadow = (port_pb_i2c_shadow & \ ~IO_MASK(R_PORT_PB_I2C, i2c_clk)) | IO_FIELD(R_PORT_PB_I2C, i2c_clk, (x))); \ REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 1, x); #define i2c_data(x) \ *R_PORT_PB_I2C = (port_pb_i2c_shadow = (port_pb_i2c_shadow & \ ~IO_MASK(R_PORT_PB_I2C, i2c_d)) | IO_FIELD(R_PORT_PB_I2C, i2c_d, (x))); \ REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 0, x); /* read a bit from the i2c interface */ #define i2c_getbit() (*R_PORT_PB_READ & 0x1) #endif /* use the kernels delay routine */ #define i2c_delay(usecs) udelay(usecs) static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */ /****************** FUNCTION DEFINITION SECTION *************************/ /* generate i2c start condition */ void i2c_start(void) { /* * SCL=1 SDA=1 */ i2c_dir_out(); i2c_delay(CLOCK_HIGH_TIME/6); i2c_data(I2C_DATA_HIGH); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); /* * SCL=1 SDA=0 */ i2c_data(I2C_DATA_LOW); i2c_delay(START_CONDITION_HOLD_TIME); /* * SCL=0 SDA=0 */ i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); } /* generate i2c stop condition */ void i2c_stop(void) { i2c_dir_out(); /* * SCL=0 SDA=0 */ i2c_clk(I2C_CLOCK_LOW); i2c_data(I2C_DATA_LOW); i2c_delay(CLOCK_LOW_TIME*2); /* * SCL=1 SDA=0 */ i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME*2); /* * SCL=1 SDA=1 */ i2c_data(I2C_DATA_HIGH); i2c_delay(STOP_CONDITION_HOLD_TIME); i2c_dir_in(); } /* write a byte to the i2c interface */ void i2c_outbyte(unsigned char x) { int i; i2c_dir_out(); for (i = 0; i < 8; i++) { if (x & 0x80) { i2c_data(I2C_DATA_HIGH); } else { i2c_data(I2C_DATA_LOW); } i2c_delay(CLOCK_LOW_TIME/2); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME/2); x <<= 1; } i2c_data(I2C_DATA_LOW); i2c_delay(CLOCK_LOW_TIME/2); /* * enable input */ i2c_dir_in(); } /* read a byte from the i2c interface */ unsigned char i2c_inbyte(void) { unsigned char aBitByte = 0; int i; /* Switch off I2C to get bit */ i2c_disable(); i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/2); /* Get bit */ aBitByte |= i2c_getbit(); /* Enable I2C */ i2c_enable(); i2c_delay(CLOCK_LOW_TIME/2); for (i = 1; i < 8; i++) { aBitByte <<= 1; /* Clock pulse */ i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); /* Switch off I2C to get bit */ i2c_disable(); i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/2); /* Get bit */ aBitByte |= i2c_getbit(); /* Enable I2C */ i2c_enable(); i2c_delay(CLOCK_LOW_TIME/2); } i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); /* * we leave the clock low, getbyte is usually followed * by sendack/nack, they assume the clock to be low */ i2c_clk(I2C_CLOCK_LOW); return aBitByte; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_getack *# *# DESCRIPTION : checks if ack was received from ic2 *# *#--------------------------------------------------------------------------*/ int i2c_getack(void) { int ack = 1; /* * enable output */ i2c_dir_out(); /* * Release data bus by setting * data high */ i2c_data(I2C_DATA_HIGH); /* * enable input */ i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/4); /* * generate ACK clock pulse */ i2c_clk(I2C_CLOCK_HIGH); /* * Use PORT PB instead of I2C * for input. (I2C not working) */ i2c_clk(1); i2c_data(1); /* * switch off I2C */ i2c_data(1); i2c_disable(); i2c_dir_in(); /* * now wait for ack */ i2c_delay(CLOCK_HIGH_TIME/2); /* * check for ack */ if(i2c_getbit()) ack = 0; i2c_delay(CLOCK_HIGH_TIME/2); if(!ack){ if(!i2c_getbit()) /* receiver pulld SDA low */ ack = 1; i2c_delay(CLOCK_HIGH_TIME/2); } /* * our clock is high now, make sure data is low * before we enable our output. If we keep data high * and enable output, we would generate a stop condition. */ i2c_data(I2C_DATA_LOW); /* * end clock pulse */ i2c_enable(); i2c_dir_out(); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_HIGH_TIME/4); /* * enable output */ i2c_dir_out(); /* * remove ACK clock pulse */ i2c_data(I2C_DATA_HIGH); i2c_delay(CLOCK_LOW_TIME/2); return ack; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: I2C::sendAck *# *# DESCRIPTION : Send ACK on received data *# *#--------------------------------------------------------------------------*/ void i2c_sendack(void) { /* * enable output */ i2c_delay(CLOCK_LOW_TIME); i2c_dir_out(); /* * set ack pulse high */ i2c_data(I2C_DATA_LOW); /* * generate clock pulse */ i2c_delay(CLOCK_HIGH_TIME/6); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME/6); /* * reset data out */ i2c_data(I2C_DATA_HIGH); i2c_delay(CLOCK_LOW_TIME); i2c_dir_in(); } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_sendnack *# *# DESCRIPTION : Sends NACK on received data *# *#--------------------------------------------------------------------------*/ void i2c_sendnack(void) { /* * enable output */ i2c_delay(CLOCK_LOW_TIME); i2c_dir_out(); /* * set data high */ i2c_data(I2C_DATA_HIGH); /* * generate clock pulse */ i2c_delay(CLOCK_HIGH_TIME/6); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); i2c_dir_in(); } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_writereg *# *# DESCRIPTION : Writes a value to an I2C device *# *#--------------------------------------------------------------------------*/ int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue) { int error, cntr = 3; unsigned long flags; spin_lock(&i2c_lock); do { error = 0; /* * we don't like to be interrupted */ local_irq_save(flags); i2c_start(); /* * send slave address */ i2c_outbyte((theSlave & 0xfe)); /* * wait for ack */ if(!i2c_getack()) error = 1; /* * now select register */ i2c_dir_out(); i2c_outbyte(theReg); /* * now it's time to wait for ack */ if(!i2c_getack()) error |= 2; /* * send register register data */ i2c_outbyte(theValue); /* * now it's time to wait for ack */ if(!i2c_getack()) error |= 4; /* * end byte stream */ i2c_stop(); /* * enable interrupt again */ local_irq_restore(flags); } while(error && cntr--); i2c_delay(CLOCK_LOW_TIME); spin_unlock(&i2c_lock); return -error; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_readreg *# *# DESCRIPTION : Reads a value from the decoder registers. *# *#--------------------------------------------------------------------------*/ unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg) { unsigned char b = 0; int error, cntr = 3; unsigned long flags; spin_lock(&i2c_lock); do { error = 0; /* * we don't like to be interrupted */ local_irq_save(flags); /* * generate start condition */ i2c_start(); /* * send slave address */ i2c_outbyte((theSlave & 0xfe)); /* * wait for ack */ if(!i2c_getack()) error = 1; /* * now select register */ i2c_dir_out(); i2c_outbyte(theReg); /* * now it's time to wait for ack */ if(!i2c_getack()) error = 1; /* * repeat start condition */ i2c_delay(CLOCK_LOW_TIME); i2c_start(); /* * send slave address */ i2c_outbyte(theSlave | 0x01); /* * wait for ack */ if(!i2c_getack()) error = 1; /* * fetch register */ b = i2c_inbyte(); /* * last received byte needs to be nacked * instead of acked */ i2c_sendnack(); /* * end sequence */ i2c_stop(); /* * enable interrupt again */ local_irq_restore(flags); } while(error && cntr--); spin_unlock(&i2c_lock); return b; } static int i2c_open(struct inode *inode, struct file *filp) { return 0; } static int i2c_release(struct inode *inode, struct file *filp) { return 0; } /* Main device API. ioctl's to write or read to/from i2c registers. */ static long i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) { return -EINVAL; } switch (_IOC_NR(cmd)) { case I2C_WRITEREG: /* write to an i2c slave */ D(printk(KERN_DEBUG "i2cw %d %d %d\n", I2C_ARGSLAVE(arg), I2C_ARGREG(arg), I2C_ARGVALUE(arg))); return i2c_writereg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg), I2C_ARGVALUE(arg)); case I2C_READREG: { unsigned char val; /* read from an i2c slave */ D(printk(KERN_DEBUG "i2cr %d %d ", I2C_ARGSLAVE(arg), I2C_ARGREG(arg))); val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg)); D(printk(KERN_DEBUG "= %d\n", val)); return val; } default: return -EINVAL; } return 0; } static const struct file_operations i2c_fops = { .owner = THIS_MODULE, .unlocked_ioctl = i2c_ioctl, .open = i2c_open, .release = i2c_release, .llseek = noop_llseek, }; int __init i2c_init(void) { static int res = 0; static int first = 1; if (!first) { return res; } first = 0; /* Setup and enable the Port B I2C interface */ #ifndef CONFIG_ETRAX_I2C_USES_PB_NOT_PB_I2C if ((res = cris_request_io_interface(if_i2c, "I2C"))) { printk(KERN_CRIT "i2c_init: Failed to get IO interface\n"); return res; } *R_PORT_PB_I2C = port_pb_i2c_shadow |= IO_STATE(R_PORT_PB_I2C, i2c_en, on) | IO_FIELD(R_PORT_PB_I2C, i2c_d, 1) | IO_FIELD(R_PORT_PB_I2C, i2c_clk, 1) | IO_STATE(R_PORT_PB_I2C, i2c_oe_, enable); port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir0); port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir1); *R_PORT_PB_DIR = (port_pb_dir_shadow |= IO_STATE(R_PORT_PB_DIR, dir0, input) | IO_STATE(R_PORT_PB_DIR, dir1, output)); #else if ((res = cris_io_interface_allocate_pins(if_i2c, 'b', CONFIG_ETRAX_I2C_DATA_PORT, CONFIG_ETRAX_I2C_DATA_PORT))) { printk(KERN_WARNING "i2c_init: Failed to get IO pin for I2C data port\n"); return res; } else if ((res = cris_io_interface_allocate_pins(if_i2c, 'b', CONFIG_ETRAX_I2C_CLK_PORT, CONFIG_ETRAX_I2C_CLK_PORT))) { cris_io_interface_free_pins(if_i2c, 'b', CONFIG_ETRAX_I2C_DATA_PORT, CONFIG_ETRAX_I2C_DATA_PORT); printk(KERN_WARNING "i2c_init: Failed to get IO pin for I2C clk port\n"); } #endif return res; } static int __init i2c_register(void) { int res; res = i2c_init(); if (res < 0) return res; res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops); if(res < 0) { printk(KERN_ERR "i2c: couldn't get a major number.\n"); return res; } printk(KERN_INFO "I2C driver v2.2, (c) 1999-2004 Axis Communications AB\n"); return 0; } /* this makes sure that i2c_register is called during boot */ module_init(i2c_register); /****************** END OF FILE i2c.c ********************************/
gpl-2.0
imoseyon/leanKernel-tbolt-gingerbread
drivers/hwmon/gl520sm.c
3655
28635
/* gl520sm.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki <kmalkki@cc.hut.fi> Copyright (c) 2005 Maarten Deprez <maartendeprez@users.sourceforge.net> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> /* Type of the extra sensor */ static unsigned short extra_sensor_type; module_param(extra_sensor_type, ushort, 0); MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=temperature, 2=voltage)"); /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; /* Many GL520 constants specified below One of the inputs can be configured as either temp or voltage. That's why _TEMP2 and _IN4 access the same register */ /* The GL520 registers */ #define GL520_REG_CHIP_ID 0x00 #define GL520_REG_REVISION 0x01 #define GL520_REG_CONF 0x03 #define GL520_REG_MASK 0x11 #define GL520_REG_VID_INPUT 0x02 static const u8 GL520_REG_IN_INPUT[] = { 0x15, 0x14, 0x13, 0x0d, 0x0e }; static const u8 GL520_REG_IN_LIMIT[] = { 0x0c, 0x09, 0x0a, 0x0b }; static const u8 GL520_REG_IN_MIN[] = { 0x0c, 0x09, 0x0a, 0x0b, 0x18 }; static const u8 GL520_REG_IN_MAX[] = { 0x0c, 0x09, 0x0a, 0x0b, 0x17 }; static const u8 GL520_REG_TEMP_INPUT[] = { 0x04, 0x0e }; static const u8 GL520_REG_TEMP_MAX[] = { 0x05, 0x17 }; static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 }; #define GL520_REG_FAN_INPUT 0x07 #define GL520_REG_FAN_MIN 0x08 #define GL520_REG_FAN_DIV 0x0f #define GL520_REG_FAN_OFF GL520_REG_FAN_DIV #define GL520_REG_ALARMS 0x12 #define GL520_REG_BEEP_MASK 0x10 #define GL520_REG_BEEP_ENABLE GL520_REG_CONF /* * Function declarations */ static int gl520_probe(struct i2c_client *client, const struct i2c_device_id *id); static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl520_init_client(struct i2c_client *client); static int gl520_remove(struct i2c_client *client); static int gl520_read_value(struct i2c_client *client, u8 reg); static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value); static struct gl520_data *gl520_update_device(struct device *dev); /* Driver data */ static const struct i2c_device_id gl520_id[] = { { "gl520sm", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, gl520_id); static struct i2c_driver gl520_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "gl520sm", }, .probe = gl520_probe, .remove = gl520_remove, .id_table = gl520_id, .detect = gl520_detect, .address_list = normal_i2c, }; /* Client data */ struct gl520_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until the following fields are valid */ unsigned long last_updated; /* in jiffies */ u8 vid; u8 vrm; u8 in_input[5]; /* [0] = VVD */ u8 in_min[5]; /* [0] = VDD */ u8 in_max[5]; /* [0] = VDD */ u8 fan_input[2]; u8 fan_min[2]; u8 fan_div[2]; u8 fan_off; u8 temp_input[2]; u8 temp_max[2]; u8 temp_max_hyst[2]; u8 alarms; u8 beep_enable; u8 beep_mask; u8 alarm_mask; u8 two_temps; }; /* * Sysfs stuff */ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL); #define VDD_FROM_REG(val) (((val)*95+2)/4) #define VDD_TO_REG(val) (SENSORS_LIMIT((((val)*4+47)/95),0,255)) #define IN_FROM_REG(val) ((val)*19) #define IN_TO_REG(val) (SENSORS_LIMIT((((val)+9)/19),0,255)) static ssize_t get_in_input(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); u8 r = data->in_input[n]; if (n == 0) return sprintf(buf, "%d\n", VDD_FROM_REG(r)); else return sprintf(buf, "%d\n", IN_FROM_REG(r)); } static ssize_t get_in_min(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); u8 r = data->in_min[n]; if (n == 0) return sprintf(buf, "%d\n", VDD_FROM_REG(r)); else return sprintf(buf, "%d\n", IN_FROM_REG(r)); } static ssize_t get_in_max(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); u8 r = data->in_max[n]; if (n == 0) return sprintf(buf, "%d\n", VDD_FROM_REG(r)); else return sprintf(buf, "%d\n", IN_FROM_REG(r)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; long v = simple_strtol(buf, NULL, 10); u8 r; mutex_lock(&data->update_lock); if (n == 0) r = VDD_TO_REG(v); else r = IN_TO_REG(v); data->in_min[n] = r; if (n < 4) gl520_write_value(client, GL520_REG_IN_MIN[n], (gl520_read_value(client, GL520_REG_IN_MIN[n]) & ~0xff) | r); else gl520_write_value(client, GL520_REG_IN_MIN[n], r); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; long v = simple_strtol(buf, NULL, 10); u8 r; if (n == 0) r = VDD_TO_REG(v); else r = IN_TO_REG(v); mutex_lock(&data->update_lock); data->in_max[n] = r; if (n < 4) gl520_write_value(client, GL520_REG_IN_MAX[n], (gl520_read_value(client, GL520_REG_IN_MAX[n]) & ~0xff00) | (r << 8)); else gl520_write_value(client, GL520_REG_IN_MAX[n], r); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, get_in_input, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, get_in_input, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, get_in_input, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, get_in_input, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, get_in_input, NULL, 4); static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, get_in_min, set_in_min, 0); static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, get_in_min, set_in_min, 1); static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR, get_in_min, set_in_min, 2); static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO | S_IWUSR, get_in_min, set_in_min, 3); static SENSOR_DEVICE_ATTR(in4_min, S_IRUGO | S_IWUSR, get_in_min, set_in_min, 4); static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, get_in_max, set_in_max, 0); static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, get_in_max, set_in_max, 1); static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR, get_in_max, set_in_max, 2); static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO | S_IWUSR, get_in_max, set_in_max, 3); static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR, get_in_max, set_in_max, 4); #define DIV_FROM_REG(val) (1 << (val)) #define FAN_FROM_REG(val,div) ((val)==0 ? 0 : (480000/((val) << (div)))) #define FAN_TO_REG(val,div) ((val)<=0?0:SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255)); static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_input[n], data->fan_div[n])); } static ssize_t get_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[n], data->fan_div[n])); } static ssize_t get_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[n])); } static ssize_t get_fan_off(struct device *dev, struct device_attribute *attr, char *buf) { struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", data->fan_off); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; unsigned long v = simple_strtoul(buf, NULL, 10); u8 r; mutex_lock(&data->update_lock); r = FAN_TO_REG(v, data->fan_div[n]); data->fan_min[n] = r; if (n == 0) gl520_write_value(client, GL520_REG_FAN_MIN, (gl520_read_value(client, GL520_REG_FAN_MIN) & ~0xff00) | (r << 8)); else gl520_write_value(client, GL520_REG_FAN_MIN, (gl520_read_value(client, GL520_REG_FAN_MIN) & ~0xff) | r); data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK); if (data->fan_min[n] == 0) data->alarm_mask &= (n == 0) ? ~0x20 : ~0x40; else data->alarm_mask |= (n == 0) ? 0x20 : 0x40; data->beep_mask &= data->alarm_mask; gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; unsigned long v = simple_strtoul(buf, NULL, 10); u8 r; switch (v) { case 1: r = 0; break; case 2: r = 1; break; case 4: r = 2; break; case 8: r = 3; break; default: dev_err(&client->dev, "fan_div value %ld not supported. Choose one of 1, 2, 4 or 8!\n", v); return -EINVAL; } mutex_lock(&data->update_lock); data->fan_div[n] = r; if (n == 0) gl520_write_value(client, GL520_REG_FAN_DIV, (gl520_read_value(client, GL520_REG_FAN_DIV) & ~0xc0) | (r << 6)); else gl520_write_value(client, GL520_REG_FAN_DIV, (gl520_read_value(client, GL520_REG_FAN_DIV) & ~0x30) | (r << 4)); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_off(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); u8 r = simple_strtoul(buf, NULL, 10)?1:0; mutex_lock(&data->update_lock); data->fan_off = r; gl520_write_value(client, GL520_REG_FAN_OFF, (gl520_read_value(client, GL520_REG_FAN_OFF) & ~0x0c) | (r << 2)); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR, get_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR, get_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, get_fan_div, set_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR, get_fan_div, set_fan_div, 1); static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR, get_fan_off, set_fan_off); #define TEMP_FROM_REG(val) (((val) - 130) * 1000) #define TEMP_TO_REG(val) (SENSORS_LIMIT(((((val)<0?(val)-500:(val)+500) / 1000)+130),0,255)) static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_input[n])); } static ssize_t get_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[n])); } static ssize_t get_temp_max_hyst(struct device *dev, struct device_attribute *attr, char *buf) { int n = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max_hyst[n])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; long v = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_max[n] = TEMP_TO_REG(v); gl520_write_value(client, GL520_REG_TEMP_MAX[n], data->temp_max[n]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int n = to_sensor_dev_attr(attr)->index; long v = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_max_hyst[n] = TEMP_TO_REG(v); gl520_write_value(client, GL520_REG_TEMP_MAX_HYST[n], data->temp_max_hyst[n]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_temp_input, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp_input, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, get_temp_max_hyst, set_temp_max_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, get_temp_max_hyst, set_temp_max_hyst, 1); static ssize_t get_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static ssize_t get_beep_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", data->beep_enable); } static ssize_t get_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", data->beep_mask); } static ssize_t set_beep_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); u8 r = simple_strtoul(buf, NULL, 10)?0:1; mutex_lock(&data->update_lock); data->beep_enable = !r; gl520_write_value(client, GL520_REG_BEEP_ENABLE, (gl520_read_value(client, GL520_REG_BEEP_ENABLE) & ~0x04) | (r << 2)); mutex_unlock(&data->update_lock); return count; } static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); u8 r = simple_strtoul(buf, NULL, 10); mutex_lock(&data->update_lock); r &= data->alarm_mask; data->beep_mask = r; gl520_write_value(client, GL520_REG_BEEP_MASK, r); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL); static DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR, get_beep_enable, set_beep_enable); static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR, get_beep_mask, set_beep_mask); static ssize_t get_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bit_nr = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", (data->alarms >> bit_nr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, get_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, get_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, get_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, get_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, get_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, get_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, get_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, get_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, get_alarm, NULL, 7); static ssize_t get_beep(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct gl520_data *data = gl520_update_device(dev); return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1); } static ssize_t set_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int bitnr = to_sensor_dev_attr(attr)->index; unsigned long bit; bit = simple_strtoul(buf, NULL, 10); if (bit & ~1) return -EINVAL; mutex_lock(&data->update_lock); data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK); if (bit) data->beep_mask |= (1 << bitnr); else data->beep_mask &= ~(1 << bitnr); gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 0); static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 1); static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 2); static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 3); static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 4); static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 5); static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 6); static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7); static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7); static struct attribute *gl520_attributes[] = { &dev_attr_cpu0_vid.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in0_beep.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in1_beep.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in2_beep.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in3_beep.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_beep.dev_attr.attr, &dev_attr_fan1_off.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan2_beep.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp1_beep.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_beep_enable.attr, &dev_attr_beep_mask.attr, NULL }; static const struct attribute_group gl520_group = { .attrs = gl520_attributes, }; static struct attribute *gl520_attributes_opt[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in4_beep.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_beep.dev_attr.attr, NULL }; static const struct attribute_group gl520_group_opt = { .attrs = gl520_attributes_opt, }; /* * Real code */ /* Return 0 if detection is successful, -ENODEV otherwise */ static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; /* Determine the chip type. */ if ((gl520_read_value(client, GL520_REG_CHIP_ID) != 0x20) || ((gl520_read_value(client, GL520_REG_REVISION) & 0x7f) != 0x00) || ((gl520_read_value(client, GL520_REG_CONF) & 0x80) != 0x00)) { dev_dbg(&client->dev, "Unknown chip type, skipping\n"); return -ENODEV; } strlcpy(info->type, "gl520sm", I2C_NAME_SIZE); return 0; } static int gl520_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct gl520_data *data; int err; data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the GL520SM chip */ gl520_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &gl520_group))) goto exit_free; if (data->two_temps) { if ((err = device_create_file(&client->dev, &sensor_dev_attr_temp2_input.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_temp2_max.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_temp2_max_hyst.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_temp2_alarm.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_temp2_beep.dev_attr))) goto exit_remove_files; } else { if ((err = device_create_file(&client->dev, &sensor_dev_attr_in4_input.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_in4_min.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_in4_max.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_in4_alarm.dev_attr)) || (err = device_create_file(&client->dev, &sensor_dev_attr_in4_beep.dev_attr))) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &gl520_group); sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); exit_free: kfree(data); exit: return err; } /* Called when we have found a new GL520SM. */ static void gl520_init_client(struct i2c_client *client) { struct gl520_data *data = i2c_get_clientdata(client); u8 oldconf, conf; conf = oldconf = gl520_read_value(client, GL520_REG_CONF); data->alarm_mask = 0xff; data->vrm = vid_which_vrm(); if (extra_sensor_type == 1) conf &= ~0x10; else if (extra_sensor_type == 2) conf |= 0x10; data->two_temps = !(conf & 0x10); /* If IRQ# is disabled, we can safely force comparator mode */ if (!(conf & 0x20)) conf &= 0xf7; /* Enable monitoring if needed */ conf |= 0x40; if (conf != oldconf) gl520_write_value(client, GL520_REG_CONF, conf); gl520_update_device(&(client->dev)); if (data->fan_min[0] == 0) data->alarm_mask &= ~0x20; if (data->fan_min[1] == 0) data->alarm_mask &= ~0x40; data->beep_mask &= data->alarm_mask; gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); } static int gl520_remove(struct i2c_client *client) { struct gl520_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &gl520_group); sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); kfree(data); return 0; } /* Registers 0x07 to 0x0c are word-sized, others are byte-sized GL520 uses a high-byte first convention */ static int gl520_read_value(struct i2c_client *client, u8 reg) { if ((reg >= 0x07) && (reg <= 0x0c)) return swab16(i2c_smbus_read_word_data(client, reg)); else return i2c_smbus_read_byte_data(client, reg); } static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value) { if ((reg >= 0x07) && (reg <= 0x0c)) return i2c_smbus_write_word_data(client, reg, swab16(value)); else return i2c_smbus_write_byte_data(client, reg, value); } static struct gl520_data *gl520_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct gl520_data *data = i2c_get_clientdata(client); int val, i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { dev_dbg(&client->dev, "Starting gl520sm update\n"); data->alarms = gl520_read_value(client, GL520_REG_ALARMS); data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK); data->vid = gl520_read_value(client, GL520_REG_VID_INPUT) & 0x1f; for (i = 0; i < 4; i++) { data->in_input[i] = gl520_read_value(client, GL520_REG_IN_INPUT[i]); val = gl520_read_value(client, GL520_REG_IN_LIMIT[i]); data->in_min[i] = val & 0xff; data->in_max[i] = (val >> 8) & 0xff; } val = gl520_read_value(client, GL520_REG_FAN_INPUT); data->fan_input[0] = (val >> 8) & 0xff; data->fan_input[1] = val & 0xff; val = gl520_read_value(client, GL520_REG_FAN_MIN); data->fan_min[0] = (val >> 8) & 0xff; data->fan_min[1] = val & 0xff; data->temp_input[0] = gl520_read_value(client, GL520_REG_TEMP_INPUT[0]); data->temp_max[0] = gl520_read_value(client, GL520_REG_TEMP_MAX[0]); data->temp_max_hyst[0] = gl520_read_value(client, GL520_REG_TEMP_MAX_HYST[0]); val = gl520_read_value(client, GL520_REG_FAN_DIV); data->fan_div[0] = (val >> 6) & 0x03; data->fan_div[1] = (val >> 4) & 0x03; data->fan_off = (val >> 2) & 0x01; data->alarms &= data->alarm_mask; val = gl520_read_value(client, GL520_REG_CONF); data->beep_enable = !((val >> 2) & 1); /* Temp1 and Vin4 are the same input */ if (data->two_temps) { data->temp_input[1] = gl520_read_value(client, GL520_REG_TEMP_INPUT[1]); data->temp_max[1] = gl520_read_value(client, GL520_REG_TEMP_MAX[1]); data->temp_max_hyst[1] = gl520_read_value(client, GL520_REG_TEMP_MAX_HYST[1]); } else { data->in_input[4] = gl520_read_value(client, GL520_REG_IN_INPUT[4]); data->in_min[4] = gl520_read_value(client, GL520_REG_IN_MIN[4]); data->in_max[4] = gl520_read_value(client, GL520_REG_IN_MAX[4]); } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sensors_gl520sm_init(void) { return i2c_add_driver(&gl520_driver); } static void __exit sensors_gl520sm_exit(void) { i2c_del_driver(&gl520_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Kyösti Mälkki <kmalkki@cc.hut.fi>, " "Maarten Deprez <maartendeprez@users.sourceforge.net>"); MODULE_DESCRIPTION("GL520SM driver"); MODULE_LICENSE("GPL"); module_init(sensors_gl520sm_init); module_exit(sensors_gl520sm_exit);
gpl-2.0
ms705/linux
fs/sysv/super.c
4167
15417
/* * linux/fs/sysv/inode.c * * minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * xenix/inode.c * Copyright (C) 1992 Doug Evans * * coh/inode.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/inode.c * Copyright (C) 1993 Paul B. Monday * * sysv/inode.c * Copyright (C) 1993 Bruno Haible * Copyright (C) 1997, 1998 Krzysztof G. Baranowski * * This file contains code for read/parsing the superblock. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include "sysv.h" /* * The following functions try to recognize specific filesystems. * * We recognize: * - Xenix FS by its magic number. * - SystemV FS by its magic number. * - Coherent FS by its funny fname/fpack field. * - SCO AFS by s_nfree == 0xffff * - V7 FS has no distinguishing features. * * We discriminate among SystemV4 and SystemV2 FS by the assumption that * the time stamp is not < 01-01-1980. */ enum { JAN_1_1980 = (10*365 + 2) * 24 * 60 * 60 }; static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) { struct buffer_head *bh1 = sbi->s_bh1; struct buffer_head *bh2 = sbi->s_bh2; struct xenix_super_block * sbd1; struct xenix_super_block * sbd2; if (bh1 != bh2) sbd1 = sbd2 = (struct xenix_super_block *) bh1->b_data; else { /* block size = 512, so bh1 != bh2 */ sbd1 = (struct xenix_super_block *) bh1->b_data; sbd2 = (struct xenix_super_block *) (bh2->b_data - 512); } *max_links = XENIX_LINK_MAX; sbi->s_fic_size = XENIX_NICINOD; sbi->s_flc_size = XENIX_NICFREE; sbi->s_sbd1 = (char *)sbd1; sbi->s_sbd2 = (char *)sbd2; sbi->s_sb_fic_count = &sbd1->s_ninode; sbi->s_sb_fic_inodes = &sbd1->s_inode[0]; sbi->s_sb_total_free_inodes = &sbd2->s_tinode; sbi->s_bcache_count = &sbd1->s_nfree; sbi->s_bcache = &sbd1->s_free[0]; sbi->s_free_blocks = &sbd2->s_tfree; sbi->s_sb_time = &sbd2->s_time; sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd1->s_isize); sbi->s_nzones = fs32_to_cpu(sbi, sbd1->s_fsize); } static void detected_sysv4(struct sysv_sb_info *sbi, unsigned *max_links) { struct sysv4_super_block * sbd; struct buffer_head *bh1 = sbi->s_bh1; struct buffer_head *bh2 = sbi->s_bh2; if (bh1 == bh2) sbd = (struct sysv4_super_block *) (bh1->b_data + BLOCK_SIZE/2); else sbd = (struct sysv4_super_block *) bh2->b_data; *max_links = SYSV_LINK_MAX; sbi->s_fic_size = SYSV_NICINOD; sbi->s_flc_size = SYSV_NICFREE; sbi->s_sbd1 = (char *)sbd; sbi->s_sbd2 = (char *)sbd; sbi->s_sb_fic_count = &sbd->s_ninode; sbi->s_sb_fic_inodes = &sbd->s_inode[0]; sbi->s_sb_total_free_inodes = &sbd->s_tinode; sbi->s_bcache_count = &sbd->s_nfree; sbi->s_bcache = &sbd->s_free[0]; sbi->s_free_blocks = &sbd->s_tfree; sbi->s_sb_time = &sbd->s_time; sbi->s_sb_state = &sbd->s_state; sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize); sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } static void detected_sysv2(struct sysv_sb_info *sbi, unsigned *max_links) { struct sysv2_super_block *sbd; struct buffer_head *bh1 = sbi->s_bh1; struct buffer_head *bh2 = sbi->s_bh2; if (bh1 == bh2) sbd = (struct sysv2_super_block *) (bh1->b_data + BLOCK_SIZE/2); else sbd = (struct sysv2_super_block *) bh2->b_data; *max_links = SYSV_LINK_MAX; sbi->s_fic_size = SYSV_NICINOD; sbi->s_flc_size = SYSV_NICFREE; sbi->s_sbd1 = (char *)sbd; sbi->s_sbd2 = (char *)sbd; sbi->s_sb_fic_count = &sbd->s_ninode; sbi->s_sb_fic_inodes = &sbd->s_inode[0]; sbi->s_sb_total_free_inodes = &sbd->s_tinode; sbi->s_bcache_count = &sbd->s_nfree; sbi->s_bcache = &sbd->s_free[0]; sbi->s_free_blocks = &sbd->s_tfree; sbi->s_sb_time = &sbd->s_time; sbi->s_sb_state = &sbd->s_state; sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize); sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } static void detected_coherent(struct sysv_sb_info *sbi, unsigned *max_links) { struct coh_super_block * sbd; struct buffer_head *bh1 = sbi->s_bh1; sbd = (struct coh_super_block *) bh1->b_data; *max_links = COH_LINK_MAX; sbi->s_fic_size = COH_NICINOD; sbi->s_flc_size = COH_NICFREE; sbi->s_sbd1 = (char *)sbd; sbi->s_sbd2 = (char *)sbd; sbi->s_sb_fic_count = &sbd->s_ninode; sbi->s_sb_fic_inodes = &sbd->s_inode[0]; sbi->s_sb_total_free_inodes = &sbd->s_tinode; sbi->s_bcache_count = &sbd->s_nfree; sbi->s_bcache = &sbd->s_free[0]; sbi->s_free_blocks = &sbd->s_tfree; sbi->s_sb_time = &sbd->s_time; sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize); sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } static void detected_v7(struct sysv_sb_info *sbi, unsigned *max_links) { struct buffer_head *bh2 = sbi->s_bh2; struct v7_super_block *sbd = (struct v7_super_block *)bh2->b_data; *max_links = V7_LINK_MAX; sbi->s_fic_size = V7_NICINOD; sbi->s_flc_size = V7_NICFREE; sbi->s_sbd1 = (char *)sbd; sbi->s_sbd2 = (char *)sbd; sbi->s_sb_fic_count = &sbd->s_ninode; sbi->s_sb_fic_inodes = &sbd->s_inode[0]; sbi->s_sb_total_free_inodes = &sbd->s_tinode; sbi->s_bcache_count = &sbd->s_nfree; sbi->s_bcache = &sbd->s_free[0]; sbi->s_free_blocks = &sbd->s_tfree; sbi->s_sb_time = &sbd->s_time; sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize); sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize); } static int detect_xenix(struct sysv_sb_info *sbi, struct buffer_head *bh) { struct xenix_super_block *sbd = (struct xenix_super_block *)bh->b_data; if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0x2b5544)) sbi->s_bytesex = BYTESEX_LE; else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0x2b5544)) sbi->s_bytesex = BYTESEX_BE; else return 0; switch (fs32_to_cpu(sbi, sbd->s_type)) { case 1: sbi->s_type = FSTYPE_XENIX; return 1; case 2: sbi->s_type = FSTYPE_XENIX; return 2; default: return 0; } } static int detect_sysv(struct sysv_sb_info *sbi, struct buffer_head *bh) { struct super_block *sb = sbi->s_sb; /* All relevant fields are at the same offsets in R2 and R4 */ struct sysv4_super_block * sbd; u32 type; sbd = (struct sysv4_super_block *) (bh->b_data + BLOCK_SIZE/2); if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0xfd187e20)) sbi->s_bytesex = BYTESEX_LE; else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0xfd187e20)) sbi->s_bytesex = BYTESEX_BE; else return 0; type = fs32_to_cpu(sbi, sbd->s_type); if (fs16_to_cpu(sbi, sbd->s_nfree) == 0xffff) { sbi->s_type = FSTYPE_AFS; sbi->s_forced_ro = 1; if (!(sb->s_flags & MS_RDONLY)) { printk("SysV FS: SCO EAFS on %s detected, " "forcing read-only mode.\n", sb->s_id); } return type; } if (fs32_to_cpu(sbi, sbd->s_time) < JAN_1_1980) { /* this is likely to happen on SystemV2 FS */ if (type > 3 || type < 1) return 0; sbi->s_type = FSTYPE_SYSV2; return type; } if ((type > 3 || type < 1) && (type > 0x30 || type < 0x10)) return 0; /* On Interactive Unix (ISC) Version 4.0/3.x s_type field = 0x10, 0x20 or 0x30 indicates that symbolic links and the 14-character filename limit is gone. Due to lack of information about this feature read-only mode seems to be a reasonable approach... -KGB */ if (type >= 0x10) { printk("SysV FS: can't handle long file names on %s, " "forcing read-only mode.\n", sb->s_id); sbi->s_forced_ro = 1; } sbi->s_type = FSTYPE_SYSV4; return type >= 0x10 ? type >> 4 : type; } static int detect_coherent(struct sysv_sb_info *sbi, struct buffer_head *bh) { struct coh_super_block * sbd; sbd = (struct coh_super_block *) (bh->b_data + BLOCK_SIZE/2); if ((memcmp(sbd->s_fname,"noname",6) && memcmp(sbd->s_fname,"xxxxx ",6)) || (memcmp(sbd->s_fpack,"nopack",6) && memcmp(sbd->s_fpack,"xxxxx\n",6))) return 0; sbi->s_bytesex = BYTESEX_PDP; sbi->s_type = FSTYPE_COH; return 1; } static int detect_sysv_odd(struct sysv_sb_info *sbi, struct buffer_head *bh) { int size = detect_sysv(sbi, bh); return size>2 ? 0 : size; } static struct { int block; int (*test)(struct sysv_sb_info *, struct buffer_head *); } flavours[] = { {1, detect_xenix}, {0, detect_sysv}, {0, detect_coherent}, {9, detect_sysv_odd}, {15,detect_sysv_odd}, {18,detect_sysv}, }; static char *flavour_names[] = { [FSTYPE_XENIX] = "Xenix", [FSTYPE_SYSV4] = "SystemV", [FSTYPE_SYSV2] = "SystemV Release 2", [FSTYPE_COH] = "Coherent", [FSTYPE_V7] = "V7", [FSTYPE_AFS] = "AFS", }; static void (*flavour_setup[])(struct sysv_sb_info *, unsigned *) = { [FSTYPE_XENIX] = detected_xenix, [FSTYPE_SYSV4] = detected_sysv4, [FSTYPE_SYSV2] = detected_sysv2, [FSTYPE_COH] = detected_coherent, [FSTYPE_V7] = detected_v7, [FSTYPE_AFS] = detected_sysv4, }; static int complete_read_super(struct super_block *sb, int silent, int size) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct inode *root_inode; char *found = flavour_names[sbi->s_type]; u_char n_bits = size+8; int bsize = 1 << n_bits; int bsize_4 = bsize >> 2; sbi->s_firstinodezone = 2; flavour_setup[sbi->s_type](sbi, &sb->s_max_links); sbi->s_truncate = 1; sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone; sbi->s_inodes_per_block = bsize >> 6; sbi->s_inodes_per_block_1 = (bsize >> 6)-1; sbi->s_inodes_per_block_bits = n_bits-6; sbi->s_ind_per_block = bsize_4; sbi->s_ind_per_block_2 = bsize_4*bsize_4; sbi->s_toobig_block = 10 + bsize_4 * (1 + bsize_4 * (1 + bsize_4)); sbi->s_ind_per_block_bits = n_bits-2; sbi->s_ninodes = (sbi->s_firstdatazone - sbi->s_firstinodezone) << sbi->s_inodes_per_block_bits; if (!silent) printk("VFS: Found a %s FS (block size = %ld) on device %s\n", found, sb->s_blocksize, sb->s_id); sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; /* set up enough so that it can read an inode */ sb->s_op = &sysv_sops; if (sbi->s_forced_ro) sb->s_flags |= MS_RDONLY; if (sbi->s_truncate) sb->s_d_op = &sysv_dentry_operations; root_inode = sysv_iget(sb, SYSV_ROOT_INO); if (IS_ERR(root_inode)) { printk("SysV FS: get root inode failed\n"); return 0; } sb->s_root = d_make_root(root_inode); if (!sb->s_root) { printk("SysV FS: get root dentry failed\n"); return 0; } return 1; } static int sysv_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh1, *bh = NULL; struct sysv_sb_info *sbi; unsigned long blocknr; int size = 0, i; BUILD_BUG_ON(1024 != sizeof (struct xenix_super_block)); BUILD_BUG_ON(512 != sizeof (struct sysv4_super_block)); BUILD_BUG_ON(512 != sizeof (struct sysv2_super_block)); BUILD_BUG_ON(500 != sizeof (struct coh_super_block)); BUILD_BUG_ON(64 != sizeof (struct sysv_inode)); sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_sb = sb; sbi->s_block_base = 0; mutex_init(&sbi->s_lock); sb->s_fs_info = sbi; sb_set_blocksize(sb, BLOCK_SIZE); for (i = 0; i < ARRAY_SIZE(flavours) && !size; i++) { brelse(bh); bh = sb_bread(sb, flavours[i].block); if (!bh) continue; size = flavours[i].test(SYSV_SB(sb), bh); } if (!size) goto Eunknown; switch (size) { case 1: blocknr = bh->b_blocknr << 1; brelse(bh); sb_set_blocksize(sb, 512); bh1 = sb_bread(sb, blocknr); bh = sb_bread(sb, blocknr + 1); break; case 2: bh1 = bh; break; case 3: blocknr = bh->b_blocknr >> 1; brelse(bh); sb_set_blocksize(sb, 2048); bh1 = bh = sb_bread(sb, blocknr); break; default: goto Ebadsize; } if (bh && bh1) { sbi->s_bh1 = bh1; sbi->s_bh2 = bh; if (complete_read_super(sb, silent, size)) return 0; } brelse(bh1); brelse(bh); sb_set_blocksize(sb, BLOCK_SIZE); printk("oldfs: cannot read superblock\n"); failed: kfree(sbi); return -EINVAL; Eunknown: brelse(bh); if (!silent) printk("VFS: unable to find oldfs superblock on device %s\n", sb->s_id); goto failed; Ebadsize: brelse(bh); if (!silent) printk("VFS: oldfs: unsupported block size (%dKb)\n", 1<<(size-2)); goto failed; } static int v7_sanity_check(struct super_block *sb, struct buffer_head *bh) { struct v7_super_block *v7sb; struct sysv_inode *v7i; struct buffer_head *bh2; struct sysv_sb_info *sbi; sbi = sb->s_fs_info; /* plausibility check on superblock */ v7sb = (struct v7_super_block *) bh->b_data; if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE || fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD || fs32_to_cpu(sbi, v7sb->s_fsize) > V7_MAXSIZE) return 0; /* plausibility check on root inode: it is a directory, with a nonzero size that is a multiple of 16 */ bh2 = sb_bread(sb, 2); if (bh2 == NULL) return 0; v7i = (struct sysv_inode *)(bh2->b_data + 64); if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR || (fs32_to_cpu(sbi, v7i->i_size) == 0) || (fs32_to_cpu(sbi, v7i->i_size) & 017) || (fs32_to_cpu(sbi, v7i->i_size) > V7_NFILES * sizeof(struct sysv_dir_entry))) { brelse(bh2); return 0; } brelse(bh2); return 1; } static int v7_fill_super(struct super_block *sb, void *data, int silent) { struct sysv_sb_info *sbi; struct buffer_head *bh; if (440 != sizeof (struct v7_super_block)) panic("V7 FS: bad super-block size"); if (64 != sizeof (struct sysv_inode)) panic("sysv fs: bad i-node size"); sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_sb = sb; sbi->s_block_base = 0; sbi->s_type = FSTYPE_V7; mutex_init(&sbi->s_lock); sb->s_fs_info = sbi; sb_set_blocksize(sb, 512); if ((bh = sb_bread(sb, 1)) == NULL) { if (!silent) printk("VFS: unable to read V7 FS superblock on " "device %s.\n", sb->s_id); goto failed; } /* Try PDP-11 UNIX */ sbi->s_bytesex = BYTESEX_PDP; if (v7_sanity_check(sb, bh)) goto detected; /* Try PC/IX, v7/x86 */ sbi->s_bytesex = BYTESEX_LE; if (v7_sanity_check(sb, bh)) goto detected; goto failed; detected: sbi->s_bh1 = bh; sbi->s_bh2 = bh; if (complete_read_super(sb, silent, 1)) return 0; failed: printk(KERN_ERR "VFS: could not find a valid V7 on %s.\n", sb->s_id); brelse(bh); kfree(sbi); return -EINVAL; } /* Every kernel module contains stuff like this. */ static struct dentry *sysv_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, sysv_fill_super); } static struct dentry *v7_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, v7_fill_super); } static struct file_system_type sysv_fs_type = { .owner = THIS_MODULE, .name = "sysv", .mount = sysv_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("sysv"); static struct file_system_type v7_fs_type = { .owner = THIS_MODULE, .name = "v7", .mount = v7_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("v7"); MODULE_ALIAS("v7"); static int __init init_sysv_fs(void) { int error; error = sysv_init_icache(); if (error) goto out; error = register_filesystem(&sysv_fs_type); if (error) goto destroy_icache; error = register_filesystem(&v7_fs_type); if (error) goto unregister; return 0; unregister: unregister_filesystem(&sysv_fs_type); destroy_icache: sysv_destroy_icache(); out: return error; } static void __exit exit_sysv_fs(void) { unregister_filesystem(&sysv_fs_type); unregister_filesystem(&v7_fs_type); sysv_destroy_icache(); } module_init(init_sysv_fs) module_exit(exit_sysv_fs) MODULE_LICENSE("GPL");
gpl-2.0
Schischu/android_kernel_samsung_lt03lte
arch/x86/ia32/sys_ia32.c
4679
14129
/* * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on * sys_sparc32 * * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port) * * These routines maintain argument size conversion between 32bit and 64bit * environment. In 2.5 most of this should be moved to a generic directory. * * This file assumes that there is a hole at the end of user address space. * * Some of the functions are LE specific currently. These are * hopefully all marked. This should be fixed. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/utsname.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> #include <linux/rwsem.h> #include <linux/compat.h> #include <linux/vfs.h> #include <linux/ptrace.h> #include <linux/highuid.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <asm/mman.h> #include <asm/types.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <asm/vgtod.h> #include <asm/sys_ia32.h> #define AA(__x) ((unsigned long)(__x)) asmlinkage long sys32_truncate64(const char __user *filename, unsigned long offset_low, unsigned long offset_high) { return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); } asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, unsigned long offset_high) { return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); } /* * Another set for IA32/LFS -- x86_64 struct stat is different due to * support for 64bit inode numbers. */ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) { typeof(ubuf->st_uid) uid = 0; typeof(ubuf->st_gid) gid = 0; SET_UID(uid, stat->uid); SET_GID(gid, stat->gid); if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) || __put_user(stat->ino, &ubuf->__st_ino) || __put_user(stat->ino, &ubuf->st_ino) || __put_user(stat->mode, &ubuf->st_mode) || __put_user(stat->nlink, &ubuf->st_nlink) || __put_user(uid, &ubuf->st_uid) || __put_user(gid, &ubuf->st_gid) || __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) || __put_user(stat->size, &ubuf->st_size) || __put_user(stat->atime.tv_sec, &ubuf->st_atime) || __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) || __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) || __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) || __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) || __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) || __put_user(stat->blksize, &ubuf->st_blksize) || __put_user(stat->blocks, &ubuf->st_blocks)) return -EFAULT; return 0; } asmlinkage long sys32_stat64(const char __user *filename, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_stat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_lstat64(const char __user *filename, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_lstat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) { struct kstat stat; int ret = vfs_fstat(fd, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, struct stat64 __user *statbuf, int flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_stat64(statbuf, &stat); } /* * Linux/i386 didn't use to be able to handle more than * 4 system call parameters, so these system calls used a memory * block for parameter passing.. */ struct mmap_arg_struct32 { unsigned int addr; unsigned int len; unsigned int prot; unsigned int flags; unsigned int fd; unsigned int offset; }; asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) { struct mmap_arg_struct32 a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset>>PAGE_SHIFT); } asmlinkage long sys32_mprotect(unsigned long start, size_t len, unsigned long prot) { return sys_mprotect(start, len, prot); } asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, struct sigaction32 __user *oact, unsigned int sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; compat_sigset_t set32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t))) return -EFAULT; new_ka.sa.sa_handler = compat_ptr(handler); new_ka.sa.sa_restorer = compat_ptr(restorer); /* * FIXME: here we rely on _COMPAT_NSIG_WORS to be >= * than _NSIG_WORDS << 1 */ switch (_NSIG_WORDS) { case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); } } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { /* * FIXME: here we rely on _COMPAT_NSIG_WORS to be >= * than _NSIG_WORDS << 1 */ switch (_NSIG_WORDS) { case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; } if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t))) return -EFAULT; } return ret; } asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { compat_old_sigset_t mask; compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __get_user(mask, &act->sa_mask)) return -EFAULT; new_ka.sa.sa_handler = compat_ptr(handler); new_ka.sa.sa_restorer = compat_ptr(restorer); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } asmlinkage long sys32_alarm(unsigned int seconds) { return alarm_setitimer(seconds); } asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) { return compat_sys_wait4(pid, stat_addr, options, NULL); } /* 32-bit timeval and related flotsam. */ asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2) { return sys_sysfs(option, arg1, arg2); } asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) { struct timespec t; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); set_fs(old_fs); if (put_compat_timespec(&t, interval)) return -EFAULT; return ret; } asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) { sigset_t s; compat_sigset_t s32; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize); set_fs(old_fs); if (!ret) { switch (_NSIG_WORDS) { case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; } if (copy_to_user(set, &s32, sizeof(compat_sigset_t))) return -EFAULT; } return ret; } asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); set_fs(old_fs); return ret; } /* warning: next two assume little endian */ asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, u32 poslo, u32 poshi) { return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, u32 count, u32 poslo, u32 poshi) { return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } asmlinkage long sys32_personality(unsigned long personality) { int ret; if (personality(current->personality) == PER_LINUX32 && personality == PER_LINUX) personality = PER_LINUX32; ret = sys_personality(personality); if (ret == PER_LINUX32) ret = PER_LINUX; return ret; } asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) { mm_segment_t old_fs = get_fs(); int ret; off_t of; if (offset && get_user(of, offset)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count); set_fs(old_fs); if (offset && put_user(of, offset)) return -EFAULT; return ret; } asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs) { long error; char *filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = compat_do_execve(filename, argv, envp, regs); putname(filename); return error; } asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp, struct pt_regs *regs) { void __user *parent_tid = (void __user *)regs->dx; void __user *child_tid = (void __user *)regs->di; if (!newsp) newsp = regs->sp; return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); } /* * Some system calls that need sign extended arguments. This could be * done by a generic wrapper. */ long sys32_lseek(unsigned int fd, int offset, unsigned int whence) { return sys_lseek(fd, offset, whence); } long sys32_kill(int pid, int sig) { return sys_kill(pid, sig); } long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, __u32 len_low, __u32 len_high, int advice) { return sys_fadvise64_64(fd, (((u64)offset_high)<<32) | offset_low, (((u64)len_high)<<32) | len_low, advice); } long sys32_vm86_warning(void) { struct task_struct *me = current; static char lastcomm[sizeof(me->comm)]; if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", me->comm); strncpy(lastcomm, me->comm, sizeof(lastcomm)); } return -ENOSYS; } long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, char __user *buf, size_t len) { return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); } asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count) { return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); } asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, unsigned n_low, unsigned n_hi, int flags) { return sys_sync_file_range(fd, ((u64)off_hi << 32) | off_low, ((u64)n_hi << 32) | n_low, flags); } asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len, int advice) { return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, len, advice); } asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, unsigned offset_hi, unsigned len_lo, unsigned len_hi) { return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, ((u64)len_hi << 32) | len_lo); } asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags, u32 mask_lo, u32 mask_hi, int fd, const char __user *pathname) { return sys_fanotify_mark(fanotify_fd, flags, ((u64)mask_hi << 32) | mask_lo, fd, pathname); }
gpl-2.0
revjunkie/kernel-copyleft
drivers/pcmcia/pxa2xx_viper.c
4935
4183
/* * Viper/Zeus PCMCIA support * Copyright 2004 Arcom Control Systems * * Maintained by Marc Zyngier <maz@misterjones.org> * * Based on: * iPAQ h2200 PCMCIA support * Copyright 2004 Koen Kooi <koen@vestingbar.nl> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <pcmcia/ss.h> #include <asm/irq.h> #include <mach/arcom-pcmcia.h> #include "soc_common.h" #include "pxa2xx_base.h" static struct platform_device *arcom_pcmcia_dev; static inline struct arcom_pcmcia_pdata *viper_get_pdata(void) { return arcom_pcmcia_dev->dev.platform_data; } static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); unsigned long flags; skt->stat[SOC_STAT_CD].gpio = pdata->cd_gpio; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD"; skt->stat[SOC_STAT_RDY].gpio = pdata->rdy_gpio; skt->stat[SOC_STAT_RDY].name = "CF ready"; if (gpio_request(pdata->pwr_gpio, "CF power")) goto err_request_pwr; local_irq_save(flags); if (gpio_direction_output(pdata->pwr_gpio, 0)) { local_irq_restore(flags); goto err_dir; } local_irq_restore(flags); return 0; err_dir: gpio_free(pdata->pwr_gpio); err_request_pwr: dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n"); return -1; } /* * Release all resources. */ static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); gpio_free(pdata->pwr_gpio); } static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 1; /* Can only apply 3.3V */ state->vs_Xv = 0; } static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); /* Silently ignore Vpp, output enable, speaker enable. */ pdata->reset(state->flags & SS_RESET); /* Apply socket voltage */ switch (state->Vcc) { case 0: gpio_set_value(pdata->pwr_gpio, 0); break; case 33: gpio_set_value(pdata->pwr_gpio, 1); break; default: dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc); return -1; } return 0; } static struct pcmcia_low_level viper_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = viper_pcmcia_hw_init, .hw_shutdown = viper_pcmcia_hw_shutdown, .socket_state = viper_pcmcia_socket_state, .configure_socket = viper_pcmcia_configure_socket, .nr = 1, }; static struct platform_device *viper_pcmcia_device; static int viper_pcmcia_probe(struct platform_device *pdev) { int ret; /* I can't imagine more than one device, but you never know... */ if (arcom_pcmcia_dev) return -EEXIST; if (!pdev->dev.platform_data) return -EINVAL; viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!viper_pcmcia_device) return -ENOMEM; arcom_pcmcia_dev = pdev; viper_pcmcia_device->dev.parent = &pdev->dev; ret = platform_device_add_data(viper_pcmcia_device, &viper_pcmcia_ops, sizeof(viper_pcmcia_ops)); if (!ret) ret = platform_device_add(viper_pcmcia_device); if (ret) { platform_device_put(viper_pcmcia_device); arcom_pcmcia_dev = NULL; } return ret; } static int viper_pcmcia_remove(struct platform_device *pdev) { platform_device_unregister(viper_pcmcia_device); arcom_pcmcia_dev = NULL; return 0; } static struct platform_device_id viper_pcmcia_id_table[] = { { .name = "viper-pcmcia", }, { .name = "zeus-pcmcia", }, { }, }; static struct platform_driver viper_pcmcia_driver = { .probe = viper_pcmcia_probe, .remove = viper_pcmcia_remove, .driver = { .name = "arcom-pcmcia", .owner = THIS_MODULE, }, .id_table = viper_pcmcia_id_table, }; module_platform_driver(viper_pcmcia_driver); MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table); MODULE_LICENSE("GPL");
gpl-2.0
jtouug/kernel_htc_m7_gpe4.4.4
drivers/mtd/maps/ck804xrom.c
8007
10906
/* * ck804xrom.c * * Normal mappings of chips in physical memory * * Dave Olsen <dolsen@lnxi.com> * Ryan Jackson <rjackson@lnxi.com> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/flashchip.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/list.h> #define MOD_NAME KBUILD_BASENAME #define ADDRESS_NAME_LEN 18 #define ROM_PROBE_STEP_SIZE (64*1024) #define DEV_CK804 1 #define DEV_MCP55 2 struct ck804xrom_window { void __iomem *virt; unsigned long phys; unsigned long size; struct list_head maps; struct resource rsrc; struct pci_dev *pdev; }; struct ck804xrom_map_info { struct list_head list; struct map_info map; struct mtd_info *mtd; struct resource rsrc; char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN]; }; /* * The following applies to ck804 only: * The 2 bits controlling the window size are often set to allow reading * the BIOS, but too small to allow writing, since the lock registers are * 4MiB lower in the address space than the data. * * This is intended to prevent flashing the bios, perhaps accidentally. * * This parameter allows the normal driver to override the BIOS settings. * * The bits are 6 and 7. If both bits are set, it is a 5MiB window. * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a * 64KiB window. * * The following applies to mcp55 only: * The 15 bits controlling the window size are distributed as follows: * byte @0x88: bit 0..7 * byte @0x8c: bit 8..15 * word @0x90: bit 16..30 * If all bits are enabled, we have a 16? MiB window * Please set win_size_bits to 0x7fffffff if you actually want to do something */ static uint win_size_bits = 0; module_param(win_size_bits, uint, 0); MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS."); static struct ck804xrom_window ck804xrom_window = { .maps = LIST_HEAD_INIT(ck804xrom_window.maps), }; static void ck804xrom_cleanup(struct ck804xrom_window *window) { struct ck804xrom_map_info *map, *scratch; u8 byte; if (window->pdev) { /* Disable writes through the rom window */ pci_read_config_byte(window->pdev, 0x6d, &byte); pci_write_config_byte(window->pdev, 0x6d, byte & ~1); } /* Free all of the mtd devices */ list_for_each_entry_safe(map, scratch, &window->maps, list) { if (map->rsrc.parent) release_resource(&map->rsrc); mtd_device_unregister(map->mtd); map_destroy(map->mtd); list_del(&map->list); kfree(map); } if (window->rsrc.parent) release_resource(&window->rsrc); if (window->virt) { iounmap(window->virt); window->virt = NULL; window->phys = 0; window->size = 0; } pci_dev_put(window->pdev); } static int __devinit ck804xrom_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; u8 byte; u16 word; struct ck804xrom_window *window = &ck804xrom_window; struct ck804xrom_map_info *map = NULL; unsigned long map_top; /* Remember the pci dev I find the window in */ window->pdev = pci_dev_get(pdev); switch (ent->driver_data) { case DEV_CK804: /* Enable the selected rom window. This is often incorrectly * set up by the BIOS, and the 4MiB offset for the lock registers * requires the full 5MiB of window space. * * This 'write, then read' approach leaves the bits for * other uses of the hardware info. */ pci_read_config_byte(pdev, 0x88, &byte); pci_write_config_byte(pdev, 0x88, byte | win_size_bits ); /* Assume the rom window is properly setup, and find it's size */ pci_read_config_byte(pdev, 0x88, &byte); if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) window->phys = 0xffb00000; /* 5MiB */ else if ((byte & (1<<7)) == (1<<7)) window->phys = 0xffc00000; /* 4MiB */ else window->phys = 0xffff0000; /* 64KiB */ break; case DEV_MCP55: pci_read_config_byte(pdev, 0x88, &byte); pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff)); pci_read_config_byte(pdev, 0x8c, &byte); pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8)); pci_read_config_word(pdev, 0x90, &word); pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16)); window->phys = 0xff000000; /* 16MiB, hardcoded for now */ break; } window->size = 0xffffffffUL - window->phys + 1UL; /* * Try to reserve the window mem region. If this fails then * it is likely due to a fragment of the window being * "reserved" by the BIOS. In the case that the * request_mem_region() fails then once the rom size is * discovered we will try to reserve the unreserved fragment. */ window->rsrc.name = MOD_NAME; window->rsrc.start = window->phys; window->rsrc.end = window->phys + window->size - 1; window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, &window->rsrc)) { window->rsrc.parent = NULL; printk(KERN_ERR MOD_NAME " %s(): Unable to register resource %pR - kernel bug?\n", __func__, &window->rsrc); } /* Enable writes through the rom window */ pci_read_config_byte(pdev, 0x6d, &byte); pci_write_config_byte(pdev, 0x6d, byte | 1); /* FIXME handle registers 0x80 - 0x8C the bios region locks */ /* For write accesses caches are useless */ window->virt = ioremap_nocache(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); goto out; } /* Get the first address to look for a rom chip at */ map_top = window->phys; #if 1 /* The probe sequence run over the firmware hub lock * registers sets them to 0x7 (no access). * Probe at most the last 4MiB of the address space. */ if (map_top < 0xffc00000) map_top = 0xffc00000; #endif /* Loop through and look for rom chips. Since we don't know the * starting address for each chip, probe every ROM_PROBE_STEP_SIZE * bytes from the starting address of the window. */ while((map_top - 1) < 0xffffffffUL) { struct cfi_private *cfi; unsigned long offset; int i; if (!map) map = kmalloc(sizeof(*map), GFP_KERNEL); if (!map) { printk(KERN_ERR MOD_NAME ": kmalloc failed"); goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); map->map.name = map->map_name; map->map.phys = map_top; offset = map_top - window->phys; map->map.virt = (void __iomem *) (((unsigned long)(window->virt)) + offset); map->map.size = 0xffffffffUL - map_top + 1UL; /* Set the name of the map to the address I am trying */ sprintf(map->map_name, "%s @%08Lx", MOD_NAME, (unsigned long long)map->map.phys); /* There is no generic VPP support */ for(map->map.bankwidth = 32; map->map.bankwidth; map->map.bankwidth >>= 1) { char **probe_type; /* Skip bankwidths that are not supported */ if (!map_bankwidth_supported(map->map.bankwidth)) continue; /* Setup the map methods */ simple_map_init(&map->map); /* Try all of the probe methods */ probe_type = rom_probe_types; for(; *probe_type; probe_type++) { map->mtd = do_map_probe(*probe_type, &map->map); if (map->mtd) goto found; } } map_top += ROM_PROBE_STEP_SIZE; continue; found: /* Trim the size if we are larger than the map */ if (map->mtd->size > map->map.size) { printk(KERN_WARNING MOD_NAME " rom(%llu) larger than window(%lu). fixing...\n", (unsigned long long)map->mtd->size, map->map.size); map->mtd->size = map->map.size; } if (window->rsrc.parent) { /* * Registering the MTD device in iomem may not be possible * if there is a BIOS "reserved" and BUSY range. If this * fails then continue anyway. */ map->rsrc.name = map->map_name; map->rsrc.start = map->map.phys; map->rsrc.end = map->map.phys + map->mtd->size - 1; map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&window->rsrc, &map->rsrc)) { printk(KERN_ERR MOD_NAME ": cannot reserve MTD resource\n"); map->rsrc.parent = NULL; } } /* Make the whole region visible in the map */ map->map.virt = window->virt; map->map.phys = window->phys; cfi = map->map.fldrv_priv; for(i = 0; i < cfi->numchips; i++) cfi->chips[i].start += offset; /* Now that the mtd devices is complete claim and export it */ map->mtd->owner = THIS_MODULE; if (mtd_device_register(map->mtd, NULL, 0)) { map_destroy(map->mtd); map->mtd = NULL; goto out; } /* Calculate the new value of map_top */ map_top += map->mtd->size; /* File away the map structure */ list_add(&map->list, &window->maps); map = NULL; } out: /* Free any left over map structures */ if (map) kfree(map); /* See if I have any map structures */ if (list_empty(&window->maps)) { ck804xrom_cleanup(window); return -ENODEV; } return 0; } static void __devexit ck804xrom_remove_one (struct pci_dev *pdev) { struct ck804xrom_window *window = &ck804xrom_window; ck804xrom_cleanup(window); } static struct pci_device_id ck804xrom_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0362), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0363), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0364), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0365), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0366), .driver_data = DEV_MCP55 }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0367), .driver_data = DEV_MCP55 }, { 0, } }; #if 0 MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl); static struct pci_driver ck804xrom_driver = { .name = MOD_NAME, .id_table = ck804xrom_pci_tbl, .probe = ck804xrom_init_one, .remove = ck804xrom_remove_one, }; #endif static int __init init_ck804xrom(void) { struct pci_dev *pdev; struct pci_device_id *id; int retVal; pdev = NULL; for(id = ck804xrom_pci_tbl; id->vendor; id++) { pdev = pci_get_device(id->vendor, id->device, NULL); if (pdev) break; } if (pdev) { retVal = ck804xrom_init_one(pdev, id); pci_dev_put(pdev); return retVal; } return -ENXIO; #if 0 return pci_register_driver(&ck804xrom_driver); #endif } static void __exit cleanup_ck804xrom(void) { ck804xrom_remove_one(ck804xrom_window.pdev); } module_init(init_ck804xrom); module_exit(cleanup_ck804xrom); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>, Dave Olsen <dolsen@lnxi.com>"); MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
gpl-2.0
jollaman999/jolla-kernel_G_Gen3
drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
9543
2860
/* * LeadTek Y04G0051 remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table leadtek_y04g0051[] = { { 0x0300, KEY_POWER2 }, { 0x0303, KEY_SCREEN }, { 0x0304, KEY_RIGHT }, { 0x0305, KEY_1 }, { 0x0306, KEY_2 }, { 0x0307, KEY_3 }, { 0x0308, KEY_LEFT }, { 0x0309, KEY_4 }, { 0x030a, KEY_5 }, { 0x030b, KEY_6 }, { 0x030c, KEY_UP }, { 0x030d, KEY_7 }, { 0x030e, KEY_8 }, { 0x030f, KEY_9 }, { 0x0310, KEY_DOWN }, { 0x0311, KEY_AGAIN }, { 0x0312, KEY_0 }, { 0x0313, KEY_OK }, /* 1st ok */ { 0x0314, KEY_MUTE }, { 0x0316, KEY_OK }, /* 2nd ok */ { 0x031e, KEY_VIDEO }, /* 2nd video */ { 0x031b, KEY_AUDIO }, { 0x031f, KEY_TEXT }, { 0x0340, KEY_SLEEP }, { 0x0341, KEY_DOT }, { 0x0342, KEY_REWIND }, { 0x0343, KEY_PLAY }, { 0x0344, KEY_FASTFORWARD }, { 0x0345, KEY_TIME }, { 0x0346, KEY_STOP }, /* 2nd stop */ { 0x0347, KEY_RECORD }, { 0x0348, KEY_CAMERA }, { 0x0349, KEY_ESC }, { 0x034a, KEY_NEW }, { 0x034b, KEY_RED }, { 0x034c, KEY_GREEN }, { 0x034d, KEY_YELLOW }, { 0x034e, KEY_BLUE }, { 0x034f, KEY_MENU }, { 0x0350, KEY_STOP }, /* 1st stop */ { 0x0351, KEY_CHANNEL }, { 0x0352, KEY_VIDEO }, /* 1st video */ { 0x0353, KEY_EPG }, { 0x0354, KEY_PREVIOUS }, { 0x0355, KEY_NEXT }, { 0x0356, KEY_TV }, { 0x035a, KEY_VOLUMEDOWN }, { 0x035b, KEY_CHANNELUP }, { 0x035e, KEY_VOLUMEUP }, { 0x035f, KEY_CHANNELDOWN }, }; static struct rc_map_list leadtek_y04g0051_map = { .map = { .scan = leadtek_y04g0051, .size = ARRAY_SIZE(leadtek_y04g0051), .rc_type = RC_TYPE_NEC, .name = RC_MAP_LEADTEK_Y04G0051, } }; static int __init init_rc_map_leadtek_y04g0051(void) { return rc_map_register(&leadtek_y04g0051_map); } static void __exit exit_rc_map_leadtek_y04g0051(void) { rc_map_unregister(&leadtek_y04g0051_map); } module_init(init_rc_map_leadtek_y04g0051) module_exit(exit_rc_map_leadtek_y04g0051) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
TheTypoMaster/yotrino-linux-kernel
drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
9543
3556
/* * TwinHan AzureWave AD-TU700(704J) remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table azurewave_ad_tu700[] = { { 0x0000, KEY_TAB }, /* Tab */ { 0x0001, KEY_2 }, { 0x0002, KEY_CHANNELDOWN }, { 0x0003, KEY_1 }, { 0x0004, KEY_MENU }, /* Record List */ { 0x0005, KEY_CHANNELUP }, { 0x0006, KEY_3 }, { 0x0007, KEY_SLEEP }, /* Hibernate */ { 0x0008, KEY_VIDEO }, /* A/V */ { 0x0009, KEY_4 }, { 0x000a, KEY_VOLUMEDOWN }, { 0x000c, KEY_CANCEL }, /* Cancel */ { 0x000d, KEY_7 }, { 0x000e, KEY_AGAIN }, /* Recall */ { 0x000f, KEY_TEXT }, /* Teletext */ { 0x0010, KEY_MUTE }, { 0x0011, KEY_RECORD }, { 0x0012, KEY_FASTFORWARD }, /* FF >> */ { 0x0013, KEY_BACK }, /* Back */ { 0x0014, KEY_PLAY }, { 0x0015, KEY_0 }, { 0x0016, KEY_POWER2 }, /* [red power button] */ { 0x0017, KEY_FAVORITES }, /* Favorite List */ { 0x0018, KEY_RED }, { 0x0019, KEY_8 }, { 0x001a, KEY_STOP }, { 0x001b, KEY_9 }, { 0x001c, KEY_EPG }, /* Info/EPG */ { 0x001d, KEY_5 }, { 0x001e, KEY_VOLUMEUP }, { 0x001f, KEY_6 }, { 0x0040, KEY_REWIND }, /* FR << */ { 0x0041, KEY_PREVIOUS }, /* Replay */ { 0x0042, KEY_NEXT }, /* Skip */ { 0x0043, KEY_SUBTITLE }, /* Subtitle / CC */ { 0x0045, KEY_KPPLUS }, /* Zoom+ */ { 0x0046, KEY_KPMINUS }, /* Zoom- */ { 0x0047, KEY_NEW }, /* PIP */ { 0x0048, KEY_INFO }, /* Preview */ { 0x0049, KEY_MODE }, /* L/R */ { 0x004a, KEY_CLEAR }, /* Clear */ { 0x004b, KEY_UP }, /* up arrow */ { 0x004c, KEY_PAUSE }, { 0x004d, KEY_ZOOM }, /* Full Screen */ { 0x004e, KEY_LEFT }, /* left arrow */ { 0x004f, KEY_OK }, /* Enter / ok */ { 0x0050, KEY_LANGUAGE }, /* SAP */ { 0x0051, KEY_DOWN }, /* down arrow */ { 0x0052, KEY_RIGHT }, /* right arrow */ { 0x0053, KEY_GREEN }, { 0x0054, KEY_CAMERA }, /* Capture */ { 0x005e, KEY_YELLOW }, { 0x005f, KEY_BLUE }, }; static struct rc_map_list azurewave_ad_tu700_map = { .map = { .scan = azurewave_ad_tu700, .size = ARRAY_SIZE(azurewave_ad_tu700), .rc_type = RC_TYPE_NEC, .name = RC_MAP_AZUREWAVE_AD_TU700, } }; static int __init init_rc_map_azurewave_ad_tu700(void) { return rc_map_register(&azurewave_ad_tu700_map); } static void __exit exit_rc_map_azurewave_ad_tu700(void) { rc_map_unregister(&azurewave_ad_tu700_map); } module_init(init_rc_map_azurewave_ad_tu700) module_exit(exit_rc_map_azurewave_ad_tu700) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
Evervolv/android_kernel_samsung_smdk4412
fs/befs/inode.c
12871
1153
/* * inode.c * * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com> */ #include <linux/fs.h> #include "befs.h" #include "inode.h" /* Validates the correctness of the befs inode Returns BEFS_OK if the inode should be used, otherwise returns BEFS_BAD_INODE */ int befs_check_inode(struct super_block *sb, befs_inode * raw_inode, befs_blocknr_t inode) { u32 magic1 = fs32_to_cpu(sb, raw_inode->magic1); befs_inode_addr ino_num = fsrun_to_cpu(sb, raw_inode->inode_num); u32 flags = fs32_to_cpu(sb, raw_inode->flags); /* check magic header. */ if (magic1 != BEFS_INODE_MAGIC1) { befs_error(sb, "Inode has a bad magic header - inode = %lu", inode); return BEFS_BAD_INODE; } /* * Sanity check2: inodes store their own block address. Check it. */ if (inode != iaddr2blockno(sb, &ino_num)) { befs_error(sb, "inode blocknr field disagrees with vfs " "VFS: %lu, Inode %lu", inode, iaddr2blockno(sb, &ino_num)); return BEFS_BAD_INODE; } /* * check flag */ if (!(flags & BEFS_INODE_IN_USE)) { befs_error(sb, "inode is not used - inode = %lu", inode); return BEFS_BAD_INODE; } return BEFS_OK; }
gpl-2.0
wolf-feathers66/wolf.kernel
samples/kfifo/record-example.c
13127
4072
/* * Sample dynamic sized record fifo implementation * * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net> * * Released under the GPL version 2 only. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/kfifo.h> /* * This module shows how to create a variable sized record fifo. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 128 /* name of the proc entry */ #define PROC_FIFO "record-fifo" /* lock for procfs read access */ static DEFINE_MUTEX(read_lock); /* lock for procfs write access */ static DEFINE_MUTEX(write_lock); /* * define DYNAMIC in this example for a dynamically allocated fifo. * * Otherwise the fifo storage will be a part of the fifo structure. */ #if 0 #define DYNAMIC #endif /* * struct kfifo_rec_ptr_1 and STRUCT_KFIFO_REC_1 can handle records of a * length between 0 and 255 bytes. * * struct kfifo_rec_ptr_2 and STRUCT_KFIFO_REC_2 can handle records of a * length between 0 and 65535 bytes. */ #ifdef DYNAMIC struct kfifo_rec_ptr_1 test; #else typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest; static mytest test; #endif static const char *expected_result[] = { "a", "bb", "ccc", "dddd", "eeeee", "ffffff", "ggggggg", "hhhhhhhh", "iiiiiiiii", "jjjjjjjjjj", }; static int __init testfunc(void) { char buf[100]; unsigned int i; unsigned int ret; struct { unsigned char buf[6]; } hello = { "hello" }; printk(KERN_INFO "record fifo test start\n"); kfifo_in(&test, &hello, sizeof(hello)); /* show the size of the next record in the fifo */ printk(KERN_INFO "fifo peek len: %u\n", kfifo_peek_len(&test)); /* put in variable length data */ for (i = 0; i < 10; i++) { memset(buf, 'a' + i, i + 1); kfifo_in(&test, buf, i + 1); } /* skip first element of the fifo */ printk(KERN_INFO "skip 1st element\n"); kfifo_skip(&test); printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* show the first record without removing from the fifo */ ret = kfifo_out_peek(&test, buf, sizeof(buf)); if (ret) printk(KERN_INFO "%.*s\n", ret, buf); /* check the correctness of all values in the fifo */ i = 0; while (!kfifo_is_empty(&test)) { ret = kfifo_out(&test, buf, sizeof(buf)); buf[ret] = '\0'; printk(KERN_INFO "item = %.*s\n", ret, buf); if (strcmp(buf, expected_result[i++])) { printk(KERN_WARNING "value mismatch: test failed\n"); return -EIO; } } if (i != ARRAY_SIZE(expected_result)) { printk(KERN_WARNING "size mismatch: test failed\n"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static ssize_t fifo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&write_lock)) return -ERESTARTSYS; ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); return ret ? ret : copied; } static ssize_t fifo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&read_lock)) return -ERESTARTSYS; ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); return ret ? ret : copied; } static const struct file_operations fifo_fops = { .owner = THIS_MODULE, .read = fifo_read, .write = fifo_write, .llseek = noop_llseek, }; static int __init example_init(void) { #ifdef DYNAMIC int ret; ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } #else INIT_KFIFO(test); #endif if (testfunc() < 0) { #ifdef DYNAMIC kfifo_free(&test); #endif return -EIO; } if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { #ifdef DYNAMIC kfifo_free(&test); #endif return -ENOMEM; } return 0; } static void __exit example_exit(void) { remove_proc_entry(PROC_FIFO, NULL); #ifdef DYNAMIC kfifo_free(&test); #endif } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
gpl-2.0
alexandrinno/android_device_sony_D2403
drivers/scsi/scsi_module.c
14919
1688
/* * Copyright (C) 2003 Christoph Hellwig. * Released under GPL v2. * * Support for old-style host templates. * * NOTE: Do not use this for new drivers ever. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <scsi/scsi_host.h> static int __init init_this_scsi_driver(void) { struct scsi_host_template *sht = &driver_template; struct Scsi_Host *shost; struct list_head *l; int error; if (!sht->release) { printk(KERN_ERR "scsi HBA driver %s didn't set a release method.\n", sht->name); return -EINVAL; } sht->module = THIS_MODULE; INIT_LIST_HEAD(&sht->legacy_hosts); sht->detect(sht); if (list_empty(&sht->legacy_hosts)) return -ENODEV; list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) { error = scsi_add_host(shost, NULL); if (error) goto fail; scsi_scan_host(shost); } return 0; fail: l = &shost->sht_legacy_list; while ((l = l->prev) != &sht->legacy_hosts) scsi_remove_host(list_entry(l, struct Scsi_Host, sht_legacy_list)); return error; } static void __exit exit_this_scsi_driver(void) { struct scsi_host_template *sht = &driver_template; struct Scsi_Host *shost, *s; list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) scsi_remove_host(shost); list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list) sht->release(shost); if (list_empty(&sht->legacy_hosts)) return; printk(KERN_WARNING "%s did not call scsi_unregister\n", sht->name); dump_stack(); list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list) scsi_unregister(shost); } module_init(init_this_scsi_driver); module_exit(exit_this_scsi_driver);
gpl-2.0
webbhorn/netgroups
drivers/net/wireless/iwlwifi/mvm/power.c
72
20716
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" #include "fw-api-power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, struct iwl_beacon_filter_cmd *cmd) { int ret; ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC, sizeof(struct iwl_beacon_filter_cmd), cmd); if (!ret) { IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", le32_to_cpu(cmd->ba_enable_beacon_abort)); IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", le32_to_cpu(cmd->ba_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", le32_to_cpu(cmd->bf_debug_flag)); IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", le32_to_cpu(cmd->bf_enable_beacon_filter)); IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", le32_to_cpu(cmd->bf_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", le32_to_cpu(cmd->bf_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", le32_to_cpu(cmd->bf_roaming_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", le32_to_cpu(cmd->bf_roaming_state)); IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", le32_to_cpu(cmd->bf_temp_threshold)); IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", le32_to_cpu(cmd->bf_temp_fast_filter)); IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter)); } return ret; } static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->bss_conf.cqm_rssi_thold) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); } cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); } int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), .ba_enable_beacon_abort = cpu_to_le32(enable), }; if (!mvmvif->bf_data.bf_enabled) return 0; if (mvm->cur_ucode == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = enable; iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); } static void iwl_mvm_power_log(struct iwl_mvm *mvm, struct iwl_mac_power_cmd *cmd) { IWL_DEBUG_POWER(mvm, "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags)); IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds)); if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { IWL_DEBUG_POWER(mvm, "Disable power management\n"); return; } IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", le32_to_cpu(cmd->rx_data_timeout)); IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", le32_to_cpu(cmd->tx_data_timeout)); if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", cmd->skip_dtim_periods); if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold); if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->rx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->tx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); } } static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { struct ieee80211_hw *hw = mvm->hw; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; int dtimper, dtimper_msec; int keep_alive; bool radar_detect = false; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); enum ieee80211_ac_numbers ac; bool tid_found = false; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); dtimper = hw->conf.ps_dtim_period ?: 1; /* * Regardless of power management state the driver must set * keep alive period. FW will use it for sending keep alive NDPs * immediately after association. Check that keep alive period * is at least 3 * DTIM */ dtimper_msec = dtimper * vif->bss_conf.beacon_int; keep_alive = max_t(int, 3 * dtimper_msec, MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); cmd->keep_alive_seconds = cpu_to_le16(keep_alive); if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && mvmvif->dbgfs_pm.disable_power_off) cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif if (!vif->bss_conf.ps) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); if (vif->bss_conf.beacon_rate && (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; } /* Check if radar detection is required on current channel */ rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); WARN_ON(!chanctx_conf); if (chanctx_conf) { chan = chanctx_conf->def.chan; radar_detect = chan->flags & IEEE80211_CHAN_RADAR; } rcu_read_unlock(); /* Check skip over DTIM conditions */ if (!radar_detect && (dtimper <= 10) && (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || mvm->cur_ucode == IWL_UCODE_WOWLAN)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); cmd->skip_dtim_periods = 3; } if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); } else { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { if (!mvmvif->queue_params[ac].uapsd) continue; if (mvm->cur_ucode != IWL_UCODE_WOWLAN) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); cmd->uapsd_ac_flags |= BIT(ac); /* QNDP TID - the highest TID with no admission control */ if (!tid_found && !mvmvif->queue_params[ac].acm) { tid_found = true; switch (ac) { case IEEE80211_AC_VO: cmd->qndp_tid = 6; break; case IEEE80211_AC_VI: cmd->qndp_tid = 5; break; case IEEE80211_AC_BE: cmd->qndp_tid = 0; break; case IEEE80211_AC_BK: cmd->qndp_tid = 1; break; } } } if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | BIT(IEEE80211_AC_VI) | BIT(IEEE80211_AC_BE) | BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; cmd->heavy_rx_thld_packets = IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; cmd->heavy_tx_thld_percentage = IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; cmd->heavy_rx_thld_percentage = IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) cmd->keep_alive_seconds = cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { if (mvmvif->dbgfs_pm.skip_over_dtim) cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) cmd->rx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { if (mvmvif->dbgfs_pm.lprx_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { if (mvmvif->dbgfs_pm.snooze_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); } #endif /* CONFIG_IWLWIFI_DEBUGFS */ } static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; bool ba_enable; struct iwl_mac_power_cmd cmd = {}; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; /* * TODO: The following vif_count verification is temporary condition. * Avoid power mode update if more than one interface is currently * active. Remove this condition when FW will support power management * on multiple MACs. */ IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n", mvm->vif_count); if (mvm->vif_count > 1) return 0; iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_log(mvm, &cmd); ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC, sizeof(cmd), &cmd); if (ret) return ret; ba_enable = !!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); } static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mac_power_cmd cmd = {}; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && mvmvif->dbgfs_pm.disable_power_off) cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif iwl_mvm_power_log(mvm, &cmd); return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC, sizeof(cmd), &cmd); } #ifdef CONFIG_IWLWIFI_DEBUGFS static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz) { struct iwl_mac_power_cmd cmd = {}; int pos = 0; iwl_mvm_power_build_cmd(mvm, vif, &cmd); pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? 0 : 1); pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme); pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", le16_to_cpu(cmd.flags)); pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", le16_to_cpu(cmd.keep_alive_seconds)); if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", cmd.skip_dtim_periods); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", le32_to_cpu(cmd.rx_data_timeout)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", le32_to_cpu(cmd.tx_data_timeout)); } if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) pos += scnprintf(buf+pos, bufsz-pos, "lprx_rssi_threshold = %d\n", cmd.lprx_rssi_threshold); if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.rx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.tx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n", cmd.uapsd_ac_flags); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n", cmd.uapsd_max_sp); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n", cmd.heavy_tx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n", cmd.heavy_rx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n", cmd.heavy_tx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", cmd.heavy_rx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ? 1 : 0); } if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n", cmd.snooze_interval); pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n", cmd.snooze_window); } } return pos; } void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) cmd->bf_roaming_energy_delta = cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) cmd->bf_temp_threshold = cpu_to_le32(dbgfs_bf->bf_temp_threshold); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) cmd->bf_temp_fast_filter = cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) cmd->bf_temp_slow_filter = cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) cmd->ba_enable_beacon_abort = cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); } #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; int ret; if (mvmvif != mvm->bf_allowed_vif || vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) mvmvif->bf_data.bf_enabled = true; return ret; } int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) || vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) mvmvif->bf_data.bf_enabled = false; return ret; } int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (!mvmvif->bf_data.bf_enabled) return 0; return iwl_mvm_enable_beacon_filter(mvm, vif); } const struct iwl_mvm_power_ops pm_mac_ops = { .power_update_mode = iwl_mvm_power_mac_update_mode, .power_disable = iwl_mvm_power_mac_disable, #ifdef CONFIG_IWLWIFI_DEBUGFS .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read, #endif };
gpl-2.0
jrfastab/Linux-Kernel-QOS
tools/perf/ui/browsers/hists.c
72
50523
#include <stdio.h> #include "../libslang.h" #include <stdlib.h> #include <string.h> #include <linux/rbtree.h> #include "../../util/evsel.h" #include "../../util/evlist.h" #include "../../util/hist.h" #include "../../util/pstack.h" #include "../../util/sort.h" #include "../../util/util.h" #include "../../arch/common.h" #include "../browser.h" #include "../helpline.h" #include "../util.h" #include "../ui.h" #include "map.h" struct hist_browser { struct ui_browser b; struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; int print_seq; bool show_dso; float min_pcnt; u64 nr_pcnt_entries; }; extern void hist_browser__init_hpp(void); static int hists__browser_title(struct hists *hists, char *bf, size_t size, const char *ev_name); static void hist_browser__refresh_dimensions(struct hist_browser *browser) { /* 3 == +/- toggle symbol before actual hist_entry rendering */ browser->b.width = 3 + (hists__sort_list_width(browser->hists) + sizeof("[k]")); } static void hist_browser__reset(struct hist_browser *browser) { browser->b.nr_entries = browser->hists->nr_entries; hist_browser__refresh_dimensions(browser); ui_browser__reset_index(&browser->b); } static char tree__folded_sign(bool unfolded) { return unfolded ? '-' : '+'; } static char map_symbol__folded(const struct map_symbol *ms) { return ms->has_children ? tree__folded_sign(ms->unfolded) : ' '; } static char hist_entry__folded(const struct hist_entry *he) { return map_symbol__folded(&he->ms); } static char callchain_list__folded(const struct callchain_list *cl) { return map_symbol__folded(&cl->ms); } static void map_symbol__set_folding(struct map_symbol *ms, bool unfold) { ms->unfolded = unfold ? ms->has_children : false; } static int callchain_node__count_rows_rb_tree(struct callchain_node *node) { int n = 0; struct rb_node *nd; for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; char folded_sign = ' '; /* No children */ list_for_each_entry(chain, &child->val, list) { ++n; /* We need this because we may not have children */ folded_sign = callchain_list__folded(chain); if (folded_sign == '+') break; } if (folded_sign == '-') /* Have children and they're unfolded */ n += callchain_node__count_rows_rb_tree(child); } return n; } static int callchain_node__count_rows(struct callchain_node *node) { struct callchain_list *chain; bool unfolded = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; unfolded = chain->ms.unfolded; } if (unfolded) n += callchain_node__count_rows_rb_tree(node); return n; } static int callchain__count_rows(struct rb_root *chain) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__count_rows(node); } return n; } static bool map_symbol__toggle_fold(struct map_symbol *ms) { if (!ms) return false; if (!ms->has_children) return false; ms->unfolded = !ms->unfolded; return true; } static void callchain_node__init_have_children_rb_tree(struct callchain_node *node) { struct rb_node *nd = rb_first(&node->rb_root); for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool first = true; list_for_each_entry(chain, &child->val, list) { if (first) { first = false; chain->ms.has_children = chain->list.next != &child->val || !RB_EMPTY_ROOT(&child->rb_root); } else chain->ms.has_children = chain->list.next == &child->val && !RB_EMPTY_ROOT(&child->rb_root); } callchain_node__init_have_children_rb_tree(child); } } static void callchain_node__init_have_children(struct callchain_node *node) { struct callchain_list *chain; list_for_each_entry(chain, &node->val, list) chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root); callchain_node__init_have_children_rb_tree(node); } static void callchain__init_have_children(struct rb_root *root) { struct rb_node *nd; for (nd = rb_first(root); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); callchain_node__init_have_children(node); } } static void hist_entry__init_have_children(struct hist_entry *he) { if (!he->init_have_children) { he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain); callchain__init_have_children(&he->sorted_chain); he->init_have_children = true; } } static bool hist_browser__toggle_fold(struct hist_browser *browser) { if (map_symbol__toggle_fold(browser->selection)) { struct hist_entry *he = browser->he_selection; hist_entry__init_have_children(he); browser->hists->nr_entries -= he->nr_rows; if (he->ms.unfolded) he->nr_rows = callchain__count_rows(&he->sorted_chain); else he->nr_rows = 0; browser->hists->nr_entries += he->nr_rows; browser->b.nr_entries = browser->hists->nr_entries; return true; } /* If it doesn't have children, no toggling performed */ return false; } static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold) { int n = 0; struct rb_node *nd; for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool has_children = false; list_for_each_entry(chain, &child->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(child, unfold); } return n; } static int callchain_node__set_folding(struct callchain_node *node, bool unfold) { struct callchain_list *chain; bool has_children = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(node, unfold); return n; } static int callchain__set_folding(struct rb_root *chain, bool unfold) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__set_folding(node, unfold); } return n; } static void hist_entry__set_folding(struct hist_entry *he, bool unfold) { hist_entry__init_have_children(he); map_symbol__set_folding(&he->ms, unfold); if (he->ms.has_children) { int n = callchain__set_folding(&he->sorted_chain, unfold); he->nr_rows = unfold ? n : 0; } else he->nr_rows = 0; } static void hists__set_folding(struct hists *hists, bool unfold) { struct rb_node *nd; hists->nr_entries = 0; for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); hists->nr_entries += 1 + he->nr_rows; } } static void hist_browser__set_folding(struct hist_browser *browser, bool unfold) { hists__set_folding(browser->hists, unfold); browser->b.nr_entries = browser->hists->nr_entries; /* Go to the start, we may be way after valid entries after a collapse */ ui_browser__reset_index(&browser->b); } static void ui_browser__warn_lost_events(struct ui_browser *browser) { ui_browser__warning(browser, 4, "Events are being lost, check IO/CPU overload!\n\n" "You may want to run 'perf' using a RT scheduler policy:\n\n" " perf top -r 80\n\n" "Or reduce the sampling frequency."); } static void hist_browser__update_pcnt_entries(struct hist_browser *hb); static int hist_browser__run(struct hist_browser *browser, const char *ev_name, struct hist_browser_timer *hbt) { int key; char title[160]; int delay_secs = hbt ? hbt->refresh : 0; browser->b.entries = &browser->hists->entries; browser->b.nr_entries = browser->hists->nr_entries; if (browser->min_pcnt) browser->b.nr_entries = browser->nr_pcnt_entries; hist_browser__refresh_dimensions(browser); hists__browser_title(browser->hists, title, sizeof(title), ev_name); if (ui_browser__show(&browser->b, title, "Press '?' for help on key bindings") < 0) return -1; while (1) { key = ui_browser__run(&browser->b, delay_secs); switch (key) { case K_TIMER: { u64 nr_entries; hbt->timer(hbt->arg); if (browser->min_pcnt) { hist_browser__update_pcnt_entries(browser); nr_entries = browser->nr_pcnt_entries; } else { nr_entries = browser->hists->nr_entries; } ui_browser__update_nr_entries(&browser->b, nr_entries); if (browser->hists->stats.nr_lost_warned != browser->hists->stats.nr_events[PERF_RECORD_LOST]) { browser->hists->stats.nr_lost_warned = browser->hists->stats.nr_events[PERF_RECORD_LOST]; ui_browser__warn_lost_events(&browser->b); } hists__browser_title(browser->hists, title, sizeof(title), ev_name); ui_browser__show_title(&browser->b, title); continue; } case 'D': { /* Debug */ static int seq; struct hist_entry *h = rb_entry(browser->b.top, struct hist_entry, rb_node); ui_helpline__pop(); ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d", seq++, browser->b.nr_entries, browser->hists->nr_entries, browser->b.height, browser->b.index, browser->b.top_idx, h->row_offset, h->nr_rows); } break; case 'C': /* Collapse the whole world. */ hist_browser__set_folding(browser, false); break; case 'E': /* Expand the whole world. */ hist_browser__set_folding(browser, true); break; case K_ENTER: if (hist_browser__toggle_fold(browser)) break; /* fall thru */ default: goto out; } } out: ui_browser__hide(&browser->b); return key; } static char *callchain_list__sym_name(struct callchain_list *cl, char *bf, size_t bfsize, bool show_dso) { int printed; if (cl->ms.sym) printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); else printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); if (show_dso) scnprintf(bf + printed, bfsize - printed, " %s", cl->ms.map ? cl->ms.map->dso->short_name : "unknown"); return bf; } #define LEVEL_OFFSET_STEP 3 static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser, struct callchain_node *chain_node, u64 total, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *node; int first_row = row, width, offset = level * LEVEL_OFFSET_STEP; u64 new_total, remaining; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = chain_node->children_hit; else new_total = total; remaining = new_total; node = rb_first(&chain_node->rb_root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); u64 cumul = callchain_cumul_hits(child); struct callchain_list *chain; char folded_sign = ' '; int first = true; int extra_offset = 0; remaining -= cumul; list_for_each_entry(chain, &child->val, list) { char bf[1024], *alloc_str; const char *str; int color; bool was_first = first; if (first) first = false; else extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; goto do_next; } alloc_str = NULL; str = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); if (was_first) { double percent = cumul * 100.0 / new_total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; else str = alloc_str; } color = HE_COLORSET_NORMAL; width = browser->b.width - (offset + extra_offset + 2); if (ui_browser__is_current_entry(&browser->b, row)) { browser->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } ui_browser__set_color(&browser->b, color); ui_browser__gotorc(&browser->b, row, 0); slsmg_write_nstring(" ", offset + extra_offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(str, width); free(alloc_str); if (++row == browser->b.height) goto out; do_next: if (folded_sign == '+') break; } if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total, new_level, row, row_offset, is_current_entry); } if (row == browser->b.height) goto out; node = next; } out: return row - first_row; } static int hist_browser__show_callchain_node(struct hist_browser *browser, struct callchain_node *node, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct callchain_list *chain; int first_row = row, offset = level * LEVEL_OFFSET_STEP, width = browser->b.width - offset; char folded_sign = ' '; list_for_each_entry(chain, &node->val, list) { char bf[1024], *s; int color; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; continue; } color = HE_COLORSET_NORMAL; if (ui_browser__is_current_entry(&browser->b, row)) { browser->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); ui_browser__gotorc(&browser->b, row, 0); ui_browser__set_color(&browser->b, color); slsmg_write_nstring(" ", offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(s, width - 2); if (++row == browser->b.height) goto out; } if (folded_sign == '-') row += hist_browser__show_callchain_node_rb_tree(browser, node, browser->hists->stats.total_period, level + 1, row, row_offset, is_current_entry); out: return row - first_row; } static int hist_browser__show_callchain(struct hist_browser *browser, struct rb_root *chain, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *nd; int first_row = row; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); row += hist_browser__show_callchain_node(browser, node, level, row, row_offset, is_current_entry); if (row == browser->b.height) break; } return row - first_row; } struct hpp_arg { struct ui_browser *b; char folded_sign; bool current_entry; }; static int __hpp__color_callchain(struct hpp_arg *arg) { if (!symbol_conf.use_callchain) return 0; slsmg_printf("%c ", arg->folded_sign); return 2; } static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he, u64 (*get_field)(struct hist_entry *), int (*callchain_cb)(struct hpp_arg *)) { int ret = 0; double percent = 0.0; struct hists *hists = he->hists; struct hpp_arg *arg = hpp->ptr; if (hists->stats.total_period) percent = 100.0 * get_field(he) / hists->stats.total_period; ui_browser__set_percent_color(arg->b, percent, arg->current_entry); if (callchain_cb) ret += callchain_cb(arg); ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); slsmg_printf("%s", hpp->buf); if (symbol_conf.event_group) { int prev_idx, idx_delta; struct perf_evsel *evsel = hists_to_evsel(hists); struct hist_entry *pair; int nr_members = evsel->nr_members; if (nr_members <= 1) goto out; prev_idx = perf_evsel__group_idx(evsel); list_for_each_entry(pair, &he->pairs.head, pairs.node) { u64 period = get_field(pair); u64 total = pair->hists->stats.total_period; if (!total) continue; evsel = hists_to_evsel(pair->hists); idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members in the middle which * have no sample */ ui_browser__set_percent_color(arg->b, 0.0, arg->current_entry); ret += scnprintf(hpp->buf, hpp->size, " %6.2f%%", 0.0); slsmg_printf("%s", hpp->buf); } percent = 100.0 * period / total; ui_browser__set_percent_color(arg->b, percent, arg->current_entry); ret += scnprintf(hpp->buf, hpp->size, " %6.2f%%", percent); slsmg_printf("%s", hpp->buf); prev_idx = perf_evsel__group_idx(evsel); } idx_delta = nr_members - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members at last which have no sample */ ui_browser__set_percent_color(arg->b, 0.0, arg->current_entry); ret += scnprintf(hpp->buf, hpp->size, " %6.2f%%", 0.0); slsmg_printf("%s", hpp->buf); } } out: if (!arg->current_entry || !arg->b->navkeypressed) ui_browser__set_color(arg->b, HE_COLORSET_NORMAL); return ret; } #define __HPP_COLOR_PERCENT_FN(_type, _field, _cb) \ static u64 __hpp_get_##_field(struct hist_entry *he) \ { \ return he->stat._field; \ } \ \ static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \ struct hist_entry *he) \ { \ return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \ } __HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain) __HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL) __HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL) __HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL) __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL) #undef __HPP_COLOR_PERCENT_FN void hist_browser__init_hpp(void) { perf_hpp__column_enable(PERF_HPP__OVERHEAD); perf_hpp__init(); perf_hpp__format[PERF_HPP__OVERHEAD].color = hist_browser__hpp_color_overhead; perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = hist_browser__hpp_color_overhead_sys; perf_hpp__format[PERF_HPP__OVERHEAD_US].color = hist_browser__hpp_color_overhead_us; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = hist_browser__hpp_color_overhead_guest_sys; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = hist_browser__hpp_color_overhead_guest_us; } static int hist_browser__show_entry(struct hist_browser *browser, struct hist_entry *entry, unsigned short row) { char s[256]; int printed = 0; int width = browser->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&browser->b, row); off_t row_offset = entry->row_offset; bool first = true; struct perf_hpp_fmt *fmt; if (current_entry) { browser->he_selection = entry; browser->selection = &entry->ms; } if (symbol_conf.use_callchain) { hist_entry__init_have_children(entry); folded_sign = hist_entry__folded(entry); } if (row_offset == 0) { struct hpp_arg arg = { .b = &browser->b, .folded_sign = folded_sign, .current_entry = current_entry, }; struct perf_hpp hpp = { .buf = s, .size = sizeof(s), .ptr = &arg, }; ui_browser__gotorc(&browser->b, row, 0); perf_hpp__for_each_format(fmt) { if (!first) { slsmg_printf(" "); width -= 2; } first = false; if (fmt->color) { width -= fmt->color(&hpp, entry); } else { width -= fmt->entry(&hpp, entry); slsmg_printf("%s", s); } } /* The scroll bar isn't being used */ if (!browser->b.navkeypressed) width += 1; hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists); slsmg_write_nstring(s, width); ++row; ++printed; } else --row_offset; if (folded_sign == '-' && row != browser->b.height) { printed += hist_browser__show_callchain(browser, &entry->sorted_chain, 1, row, &row_offset, &current_entry); if (current_entry) browser->he_selection = entry; } return printed; } static void ui_browser__hists_init_top(struct ui_browser *browser) { if (browser->top == NULL) { struct hist_browser *hb; hb = container_of(browser, struct hist_browser, b); browser->top = rb_first(&hb->hists->entries); } } static unsigned int hist_browser__refresh(struct ui_browser *browser) { unsigned row = 0; struct rb_node *nd; struct hist_browser *hb = container_of(browser, struct hist_browser, b); ui_browser__hists_init_top(browser); for (nd = browser->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); float percent = h->stat.period * 100.0 / hb->hists->stats.total_period; if (h->filtered) continue; if (percent < hb->min_pcnt) continue; row += hist_browser__show_entry(hb, h, row); if (row == browser->height) break; } return row; } static struct rb_node *hists__filter_entries(struct rb_node *nd, struct hists *hists, float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); float percent = h->stat.period * 100.0 / hists->stats.total_period; if (percent < min_pcnt) return NULL; if (!h->filtered) return nd; nd = rb_next(nd); } return NULL; } static struct rb_node *hists__filter_prev_entries(struct rb_node *nd, struct hists *hists, float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); float percent = h->stat.period * 100.0 / hists->stats.total_period; if (!h->filtered && percent >= min_pcnt) return nd; nd = rb_prev(nd); } return NULL; } static void ui_browser__hists_seek(struct ui_browser *browser, off_t offset, int whence) { struct hist_entry *h; struct rb_node *nd; bool first = true; struct hist_browser *hb; hb = container_of(browser, struct hist_browser, b); if (browser->nr_entries == 0) return; ui_browser__hists_init_top(browser); switch (whence) { case SEEK_SET: nd = hists__filter_entries(rb_first(browser->entries), hb->hists, hb->min_pcnt); break; case SEEK_CUR: nd = browser->top; goto do_offset; case SEEK_END: nd = hists__filter_prev_entries(rb_last(browser->entries), hb->hists, hb->min_pcnt); first = false; break; default: return; } /* * Moves not relative to the first visible entry invalidates its * row_offset: */ h = rb_entry(browser->top, struct hist_entry, rb_node); h->row_offset = 0; /* * Here we have to check if nd is expanded (+), if it is we can't go * the next top level hist_entry, instead we must compute an offset of * what _not_ to show and not change the first visible entry. * * This offset increments when we are going from top to bottom and * decreases when we're going from bottom to top. * * As we don't have backpointers to the top level in the callchains * structure, we need to always print the whole hist_entry callchain, * skipping the first ones that are before the first visible entry * and stop when we printed enough lines to fill the screen. */ do_offset: if (offset > 0) { do { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { u16 remaining = h->nr_rows - h->row_offset; if (offset > remaining) { offset -= remaining; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; browser->top = nd; break; } } nd = hists__filter_entries(rb_next(nd), hb->hists, hb->min_pcnt); if (nd == NULL) break; --offset; browser->top = nd; } while (offset != 0); } else if (offset < 0) { while (1) { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { if (first) { if (-offset > h->row_offset) { offset += h->row_offset; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; browser->top = nd; break; } } else { if (-offset > h->nr_rows) { offset += h->nr_rows; h->row_offset = 0; } else { h->row_offset = h->nr_rows + offset; offset = 0; browser->top = nd; break; } } } nd = hists__filter_prev_entries(rb_prev(nd), hb->hists, hb->min_pcnt); if (nd == NULL) break; ++offset; browser->top = nd; if (offset == 0) { /* * Last unfiltered hist_entry, check if it is * unfolded, if it is then we should have * row_offset at its last entry. */ h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) h->row_offset = h->nr_rows; break; } first = false; } } else { browser->top = nd; h = rb_entry(nd, struct hist_entry, rb_node); h->row_offset = 0; } } static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser, struct callchain_node *chain_node, u64 total, int level, FILE *fp) { struct rb_node *node; int offset = level * LEVEL_OFFSET_STEP; u64 new_total, remaining; int printed = 0; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = chain_node->children_hit; else new_total = total; remaining = new_total; node = rb_first(&chain_node->rb_root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); u64 cumul = callchain_cumul_hits(child); struct callchain_list *chain; char folded_sign = ' '; int first = true; int extra_offset = 0; remaining -= cumul; list_for_each_entry(chain, &child->val, list) { char bf[1024], *alloc_str; const char *str; bool was_first = first; if (first) first = false; else extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); alloc_str = NULL; str = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); if (was_first) { double percent = cumul * 100.0 / new_total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; else str = alloc_str; } printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str); free(alloc_str); if (folded_sign == '+') break; } if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total, new_level, fp); } node = next; } return printed; } static int hist_browser__fprintf_callchain_node(struct hist_browser *browser, struct callchain_node *node, int level, FILE *fp) { struct callchain_list *chain; int offset = level * LEVEL_OFFSET_STEP; char folded_sign = ' '; int printed = 0; list_for_each_entry(chain, &node->val, list) { char bf[1024], *s; folded_sign = callchain_list__folded(chain); s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); } if (folded_sign == '-') printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node, browser->hists->stats.total_period, level + 1, fp); return printed; } static int hist_browser__fprintf_callchain(struct hist_browser *browser, struct rb_root *chain, int level, FILE *fp) { struct rb_node *nd; int printed = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); printed += hist_browser__fprintf_callchain_node(browser, node, level, fp); } return printed; } static int hist_browser__fprintf_entry(struct hist_browser *browser, struct hist_entry *he, FILE *fp) { char s[8192]; double percent; int printed = 0; char folded_sign = ' '; if (symbol_conf.use_callchain) folded_sign = hist_entry__folded(he); hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists); percent = (he->stat.period * 100.0) / browser->hists->stats.total_period; if (symbol_conf.use_callchain) printed += fprintf(fp, "%c ", folded_sign); printed += fprintf(fp, " %5.2f%%", percent); if (symbol_conf.show_nr_samples) printed += fprintf(fp, " %11u", he->stat.nr_events); if (symbol_conf.show_total_period) printed += fprintf(fp, " %12" PRIu64, he->stat.period); printed += fprintf(fp, "%s\n", rtrim(s)); if (folded_sign == '-') printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp); return printed; } static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp) { struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), browser->hists, browser->min_pcnt); int printed = 0; while (nd) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); printed += hist_browser__fprintf_entry(browser, h, fp); nd = hists__filter_entries(rb_next(nd), browser->hists, browser->min_pcnt); } return printed; } static int hist_browser__dump(struct hist_browser *browser) { char filename[64]; FILE *fp; while (1) { scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq); if (access(filename, F_OK)) break; /* * XXX: Just an arbitrary lazy upper limit */ if (++browser->print_seq == 8192) { ui_helpline__fpush("Too many perf.hist.N files, nothing written!"); return -1; } } fp = fopen(filename, "w"); if (fp == NULL) { char bf[64]; const char *err = strerror_r(errno, bf, sizeof(bf)); ui_helpline__fpush("Couldn't write to %s: %s", filename, err); return -1; } ++browser->print_seq; hist_browser__fprintf(browser, fp); fclose(fp); ui_helpline__fpush("%s written!", filename); return 0; } static struct hist_browser *hist_browser__new(struct hists *hists) { struct hist_browser *browser = zalloc(sizeof(*browser)); if (browser) { browser->hists = hists; browser->b.refresh = hist_browser__refresh; browser->b.seek = ui_browser__hists_seek; browser->b.use_navkeypressed = true; } return browser; } static void hist_browser__delete(struct hist_browser *browser) { free(browser); } static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser) { return browser->he_selection; } static struct thread *hist_browser__selected_thread(struct hist_browser *browser) { return browser->he_selection->thread; } static int hists__browser_title(struct hists *hists, char *bf, size_t size, const char *ev_name) { char unit; int printed; const struct dso *dso = hists->dso_filter; const struct thread *thread = hists->thread_filter; unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; u64 nr_events = hists->stats.total_period; struct perf_evsel *evsel = hists_to_evsel(hists); char buf[512]; size_t buflen = sizeof(buf); if (perf_evsel__is_group_event(evsel)) { struct perf_evsel *pos; perf_evsel__group_desc(evsel, buf, buflen); ev_name = buf; for_each_group_member(pos, evsel) { nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; nr_events += pos->hists.stats.total_period; } } nr_samples = convert_unit(nr_samples, &unit); printed = scnprintf(bf, size, "Samples: %lu%c of event '%s', Event count (approx.): %lu", nr_samples, unit, ev_name, nr_events); if (hists->uid_filter_str) printed += snprintf(bf + printed, size - printed, ", UID: %s", hists->uid_filter_str); if (thread) printed += scnprintf(bf + printed, size - printed, ", Thread: %s(%d)", (thread->comm_set ? thread->comm : ""), thread->pid); if (dso) printed += scnprintf(bf + printed, size - printed, ", DSO: %s", dso->short_name); return printed; } static inline void free_popup_options(char **options, int n) { int i; for (i = 0; i < n; ++i) { free(options[i]); options[i] = NULL; } } /* Check whether the browser is for 'top' or 'report' */ static inline bool is_report_browser(void *timer) { return timer == NULL; } /* * Only runtime switching of perf data file will make "input_name" point * to a malloced buffer. So add "is_input_name_malloced" flag to decide * whether we need to call free() for current "input_name" during the switch. */ static bool is_input_name_malloced = false; static int switch_data_file(void) { char *pwd, *options[32], *abs_path[32], *tmp; DIR *pwd_dir; int nr_options = 0, choice = -1, ret = -1; struct dirent *dent; pwd = getenv("PWD"); if (!pwd) return ret; pwd_dir = opendir(pwd); if (!pwd_dir) return ret; memset(options, 0, sizeof(options)); memset(options, 0, sizeof(abs_path)); while ((dent = readdir(pwd_dir))) { char path[PATH_MAX]; u64 magic; char *name = dent->d_name; FILE *file; if (!(dent->d_type == DT_REG)) continue; snprintf(path, sizeof(path), "%s/%s", pwd, name); file = fopen(path, "r"); if (!file) continue; if (fread(&magic, 1, 8, file) < 8) goto close_file_and_continue; if (is_perf_magic(magic)) { options[nr_options] = strdup(name); if (!options[nr_options]) goto close_file_and_continue; abs_path[nr_options] = strdup(path); if (!abs_path[nr_options]) { free(options[nr_options]); ui__warning("Can't search all data files due to memory shortage.\n"); fclose(file); break; } nr_options++; } close_file_and_continue: fclose(file); if (nr_options >= 32) { ui__warning("Too many perf data files in PWD!\n" "Only the first 32 files will be listed.\n"); break; } } closedir(pwd_dir); if (nr_options) { choice = ui__popup_menu(nr_options, options); if (choice < nr_options && choice >= 0) { tmp = strdup(abs_path[choice]); if (tmp) { if (is_input_name_malloced) free((void *)input_name); input_name = tmp; is_input_name_malloced = true; ret = 0; } else ui__warning("Data switch failed due to memory shortage!\n"); } } free_popup_options(options, nr_options); free_popup_options(abs_path, nr_options); return ret; } static void hist_browser__update_pcnt_entries(struct hist_browser *hb) { u64 nr_entries = 0; struct rb_node *nd = rb_first(&hb->hists->entries); while (nd) { nr_entries++; nd = hists__filter_entries(rb_next(nd), hb->hists, hb->min_pcnt); } hb->nr_pcnt_entries = nr_entries; } static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, const char *helpline, const char *ev_name, bool left_exits, struct hist_browser_timer *hbt, float min_pcnt, struct perf_session_env *env) { struct hists *hists = &evsel->hists; struct hist_browser *browser = hist_browser__new(hists); struct branch_info *bi; struct pstack *fstack; char *options[16]; int nr_options = 0; int key = -1; char buf[64]; char script_opt[64]; int delay_secs = hbt ? hbt->refresh : 0; if (browser == NULL) return -1; if (min_pcnt) { browser->min_pcnt = min_pcnt; hist_browser__update_pcnt_entries(browser); } fstack = pstack__new(2); if (fstack == NULL) goto out; ui_helpline__push(helpline); memset(options, 0, sizeof(options)); while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; int choice = 0, annotate = -2, zoom_dso = -2, zoom_thread = -2, annotate_f = -2, annotate_t = -2, browse_map = -2; int scripts_comm = -2, scripts_symbol = -2, scripts_all = -2, switch_data = -2; nr_options = 0; key = hist_browser__run(browser, ev_name, hbt); if (browser->he_selection != NULL) { thread = hist_browser__selected_thread(browser); dso = browser->selection->map ? browser->selection->map->dso : NULL; } switch (key) { case K_TAB: case K_UNTAB: if (nr_events == 1) continue; /* * Exit the browser, let hists__browser_tree * go to the next or previous */ goto out_free_stack; case 'a': if (!sort__has_sym) { ui_browser__warning(&browser->b, delay_secs * 2, "Annotation is only available for symbolic views, " "include \"sym*\" in --sort to use it."); continue; } if (browser->selection == NULL || browser->selection->sym == NULL || browser->selection->map->dso->annotate_warned) continue; goto do_annotate; case 'P': hist_browser__dump(browser); continue; case 'd': goto zoom_dso; case 'V': browser->show_dso = !browser->show_dso; continue; case 't': goto zoom_thread; case '/': if (ui_browser__input_window("Symbol to show", "Please enter the name of symbol you want to see", buf, "ENTER: OK, ESC: Cancel", delay_secs * 2) == K_ENTER) { hists->symbol_filter_str = *buf ? buf : NULL; hists__filter_by_symbol(hists); hist_browser__reset(browser); } continue; case 'r': if (is_report_browser(hbt)) goto do_scripts; continue; case 's': if (is_report_browser(hbt)) goto do_data_switch; continue; case K_F1: case 'h': case '?': ui_browser__help_window(&browser->b, "h/?/F1 Show this window\n" "UP/DOWN/PGUP\n" "PGDN/SPACE Navigate\n" "q/ESC/CTRL+C Exit browser\n\n" "For multiple event sessions:\n\n" "TAB/UNTAB Switch events\n\n" "For symbolic views (--sort has sym):\n\n" "-> Zoom into DSO/Threads & Annotate current symbol\n" "<- Zoom out\n" "a Annotate current symbol\n" "C Collapse all callchains\n" "E Expand all callchains\n" "d Zoom into current DSO\n" "t Zoom into current Thread\n" "r Run available scripts('perf report' only)\n" "s Switch to another data file in PWD ('perf report' only)\n" "P Print histograms to perf.hist.N\n" "V Verbose (DSO names in callchains, etc)\n" "/ Filter symbol by name"); continue; case K_ENTER: case K_RIGHT: /* menu */ break; case K_LEFT: { const void *top; if (pstack__empty(fstack)) { /* * Go back to the perf_evsel_menu__run or other user */ if (left_exits) goto out_free_stack; continue; } top = pstack__pop(fstack); if (top == &browser->hists->dso_filter) goto zoom_out_dso; if (top == &browser->hists->thread_filter) goto zoom_out_thread; continue; } case K_ESC: if (!left_exits && !ui_browser__dialog_yesno(&browser->b, "Do you really want to exit?")) continue; /* Fall thru */ case 'q': case CTRL('c'): goto out_free_stack; default: continue; } if (!sort__has_sym) goto add_exit_option; if (sort__mode == SORT_MODE__BRANCH) { bi = browser->he_selection->branch_info; if (browser->selection != NULL && bi && bi->from.sym != NULL && !bi->from.map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", bi->from.sym->name) > 0) annotate_f = nr_options++; if (browser->selection != NULL && bi && bi->to.sym != NULL && !bi->to.map->dso->annotate_warned && (bi->to.sym != bi->from.sym || bi->to.map->dso != bi->from.map->dso) && asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) annotate_t = nr_options++; } else { if (browser->selection != NULL && browser->selection->sym != NULL && !browser->selection->map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", browser->selection->sym->name) > 0) annotate = nr_options++; } if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", (browser->hists->thread_filter ? "out of" : "into"), (thread->comm_set ? thread->comm : ""), thread->pid) > 0) zoom_thread = nr_options++; if (dso != NULL && asprintf(&options[nr_options], "Zoom %s %s DSO", (browser->hists->dso_filter ? "out of" : "into"), (dso->kernel ? "the Kernel" : dso->short_name)) > 0) zoom_dso = nr_options++; if (browser->selection != NULL && browser->selection->map != NULL && asprintf(&options[nr_options], "Browse map details") > 0) browse_map = nr_options++; /* perf script support */ if (browser->he_selection) { struct symbol *sym; if (asprintf(&options[nr_options], "Run scripts for samples of thread [%s]", browser->he_selection->thread->comm) > 0) scripts_comm = nr_options++; sym = browser->he_selection->ms.sym; if (sym && sym->namelen && asprintf(&options[nr_options], "Run scripts for samples of symbol [%s]", sym->name) > 0) scripts_symbol = nr_options++; } if (asprintf(&options[nr_options], "Run scripts for all samples") > 0) scripts_all = nr_options++; if (is_report_browser(hbt) && asprintf(&options[nr_options], "Switch to another data file in PWD") > 0) switch_data = nr_options++; add_exit_option: options[nr_options++] = (char *)"Exit"; retry_popup_menu: choice = ui__popup_menu(nr_options, options); if (choice == nr_options - 1) break; if (choice == -1) { free_popup_options(options, nr_options - 1); continue; } if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; int err; do_annotate: if (!objdump_path && perf_session_env__lookup_objdump(env)) continue; he = hist_browser__selected_entry(browser); if (he == NULL) continue; /* * we stash the branch_info symbol + map into the * the ms so we don't have to rewrite all the annotation * code to use branch_info. * in branch mode, the ms struct is not used */ if (choice == annotate_f) { he->ms.sym = he->branch_info->from.sym; he->ms.map = he->branch_info->from.map; } else if (choice == annotate_t) { he->ms.sym = he->branch_info->to.sym; he->ms.map = he->branch_info->to.map; } /* * Don't let this be freed, say, by hists__decay_entry. */ he->used = true; err = hist_entry__tui_annotate(he, evsel, hbt); he->used = false; /* * offer option to annotate the other branch source or target * (if they exists) when returning from annotate */ if ((err == 'q' || err == CTRL('c')) && annotate_t != -2 && annotate_f != -2) goto retry_popup_menu; ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); if (err) ui_browser__handle_resize(&browser->b); } else if (choice == browse_map) map__browse(browser->selection->map); else if (choice == zoom_dso) { zoom_dso: if (browser->hists->dso_filter) { pstack__remove(fstack, &browser->hists->dso_filter); zoom_out_dso: ui_helpline__pop(); browser->hists->dso_filter = NULL; sort_dso.elide = false; } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); browser->hists->dso_filter = dso; sort_dso.elide = true; pstack__push(fstack, &browser->hists->dso_filter); } hists__filter_by_dso(hists); hist_browser__reset(browser); } else if (choice == zoom_thread) { zoom_thread: if (browser->hists->thread_filter) { pstack__remove(fstack, &browser->hists->thread_filter); zoom_out_thread: ui_helpline__pop(); browser->hists->thread_filter = NULL; sort_thread.elide = false; } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", thread->comm_set ? thread->comm : "", thread->pid); browser->hists->thread_filter = thread; sort_thread.elide = true; pstack__push(fstack, &browser->hists->thread_filter); } hists__filter_by_thread(hists); hist_browser__reset(browser); } /* perf scripts support */ else if (choice == scripts_all || choice == scripts_comm || choice == scripts_symbol) { do_scripts: memset(script_opt, 0, 64); if (choice == scripts_comm) sprintf(script_opt, " -c %s ", browser->he_selection->thread->comm); if (choice == scripts_symbol) sprintf(script_opt, " -S %s ", browser->he_selection->ms.sym->name); script_browse(script_opt); } /* Switch to another data file */ else if (choice == switch_data) { do_data_switch: if (!switch_data_file()) { key = K_SWITCH_INPUT_DATA; break; } else ui__warning("Won't switch the data files due to\n" "no valid data file get selected!\n"); } } out_free_stack: pstack__delete(fstack); out: hist_browser__delete(browser); free_popup_options(options, nr_options - 1); return key; } struct perf_evsel_menu { struct ui_browser b; struct perf_evsel *selection; bool lost_events, lost_events_warned; float min_pcnt; struct perf_session_env *env; }; static void perf_evsel_menu__write(struct ui_browser *browser, void *entry, int row) { struct perf_evsel_menu *menu = container_of(browser, struct perf_evsel_menu, b); struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); bool current_entry = ui_browser__is_current_entry(browser, row); unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; const char *ev_name = perf_evsel__name(evsel); char bf[256], unit; const char *warn = " "; size_t printed; ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); if (perf_evsel__is_group_event(evsel)) { struct perf_evsel *pos; ev_name = perf_evsel__group_name(evsel); for_each_group_member(pos, evsel) { nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; } } nr_events = convert_unit(nr_events, &unit); printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); slsmg_printf("%s", bf); nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST]; if (nr_events != 0) { menu->lost_events = true; if (!current_entry) ui_browser__set_color(browser, HE_COLORSET_TOP); nr_events = convert_unit(nr_events, &unit); printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!", nr_events, unit, unit == ' ' ? "" : " "); warn = bf; } slsmg_write_nstring(warn, browser->width - printed); if (current_entry) menu->selection = evsel; } static int perf_evsel_menu__run(struct perf_evsel_menu *menu, int nr_events, const char *help, struct hist_browser_timer *hbt) { struct perf_evlist *evlist = menu->b.priv; struct perf_evsel *pos; const char *ev_name, *title = "Available samples"; int delay_secs = hbt ? hbt->refresh : 0; int key; if (ui_browser__show(&menu->b, title, "ESC: exit, ENTER|->: Browse histograms") < 0) return -1; while (1) { key = ui_browser__run(&menu->b, delay_secs); switch (key) { case K_TIMER: hbt->timer(hbt->arg); if (!menu->lost_events_warned && menu->lost_events) { ui_browser__warn_lost_events(&menu->b); menu->lost_events_warned = true; } continue; case K_RIGHT: case K_ENTER: if (!menu->selection) continue; pos = menu->selection; browse_hists: perf_evlist__set_selected(evlist, pos); /* * Give the calling tool a chance to populate the non * default evsel resorted hists tree. */ if (hbt) hbt->timer(hbt->arg); ev_name = perf_evsel__name(pos); key = perf_evsel__hists_browse(pos, nr_events, help, ev_name, true, hbt, menu->min_pcnt, menu->env); ui_browser__show_title(&menu->b, title); switch (key) { case K_TAB: if (pos->node.next == &evlist->entries) pos = list_entry(evlist->entries.next, struct perf_evsel, node); else pos = list_entry(pos->node.next, struct perf_evsel, node); goto browse_hists; case K_UNTAB: if (pos->node.prev == &evlist->entries) pos = list_entry(evlist->entries.prev, struct perf_evsel, node); else pos = list_entry(pos->node.prev, struct perf_evsel, node); goto browse_hists; case K_ESC: if (!ui_browser__dialog_yesno(&menu->b, "Do you really want to exit?")) continue; /* Fall thru */ case K_SWITCH_INPUT_DATA: case 'q': case CTRL('c'): goto out; default: continue; } case K_LEFT: continue; case K_ESC: if (!ui_browser__dialog_yesno(&menu->b, "Do you really want to exit?")) continue; /* Fall thru */ case 'q': case CTRL('c'): goto out; default: continue; } } out: ui_browser__hide(&menu->b); return key; } static bool filter_group_entries(struct ui_browser *self __maybe_unused, void *entry) { struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel)) return true; return false; } static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, int nr_entries, const char *help, struct hist_browser_timer *hbt, float min_pcnt, struct perf_session_env *env) { struct perf_evsel *pos; struct perf_evsel_menu menu = { .b = { .entries = &evlist->entries, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, .filter = filter_group_entries, .nr_entries = nr_entries, .priv = evlist, }, .min_pcnt = min_pcnt, .env = env, }; ui_helpline__push("Press ESC to exit"); list_for_each_entry(pos, &evlist->entries, node) { const char *ev_name = perf_evsel__name(pos); size_t line_len = strlen(ev_name) + 7; if (menu.b.width < line_len) menu.b.width = line_len; } return perf_evsel_menu__run(&menu, nr_entries, help, hbt); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, struct hist_browser_timer *hbt, float min_pcnt, struct perf_session_env *env) { int nr_entries = evlist->nr_entries; single_entry: if (nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); const char *ev_name = perf_evsel__name(first); return perf_evsel__hists_browse(first, nr_entries, help, ev_name, false, hbt, min_pcnt, env); } if (symbol_conf.event_group) { struct perf_evsel *pos; nr_entries = 0; list_for_each_entry(pos, &evlist->entries, node) if (perf_evsel__is_group_leader(pos)) nr_entries++; if (nr_entries == 1) goto single_entry; } return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env); }
gpl-2.0
kerneldevs/RM-35-KERNEL-PECAN
drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
72
8376
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include "vidc_type.h" #include "vcd_power_sm.h" #include "vcd_core.h" #include "vcd.h" u32 vcd_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_S_SUCCESS; VCD_MSG_MED("Device power state = %d", dev_ctxt->pwr_clk_state); VCD_MSG_MED("event = 0x%x", event); switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_TERM_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: case VCD_EVT_PWR_DEV_SET_PERFLVL: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_device_power_event(dev_ctxt, event, cctxt); break; } case VCD_EVT_PWR_CLNT_CMD_BEGIN: case VCD_EVT_PWR_CLNT_CMD_END: case VCD_EVT_PWR_CLNT_CMD_FAIL: case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { rc = vcd_client_power_event(dev_ctxt, cctxt, event); break; } } if (VCD_FAILED(rc)) VCD_MSG_ERROR("vcd_power_event: event 0x%x failed", event); return rc; } u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_ERR_FAIL; u32 set_perf_lvl; switch (event) { case VCD_EVT_PWR_DEV_INIT_BEGIN: { if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { if (res_trk_get_max_perf_level(&dev_ctxt-> max_perf_lvl)) { if (res_trk_power_up()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt-> set_perf_lvl_pending = false; rc = vcd_enable_clock(dev_ctxt, cctxt); if (VCD_FAILED(rc)) { (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; } } } } break; } case VCD_EVT_PWR_DEV_INIT_END: case VCD_EVT_PWR_DEV_TERM_FAIL: case VCD_EVT_PWR_DEV_SLEEP_BEGIN: case VCD_EVT_PWR_DEV_HWTIMEOUT: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_INIT_FAIL: case VCD_EVT_PWR_DEV_TERM_END: { if (dev_ctxt->pwr_clk_state != VCD_PWRCLK_STATE_OFF) { (void)vcd_disable_clock(dev_ctxt); (void)res_trk_power_down(); dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; dev_ctxt->curr_perf_lvl = 0; dev_ctxt->reqd_perf_lvl = 0; dev_ctxt->active_clnts = 0; dev_ctxt->set_perf_lvl_pending = false; rc = VCD_S_SUCCESS; } break; } case VCD_EVT_PWR_DEV_TERM_BEGIN: case VCD_EVT_PWR_DEV_SLEEP_END: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_DEV_SET_PERFLVL: { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); break; } } return rc; } u32 vcd_client_power_event( struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, u32 event) { u32 rc = VCD_ERR_FAIL; switch (event) { case VCD_EVT_PWR_CLNT_CMD_BEGIN: { rc = vcd_un_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_END: { rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_CMD_FAIL: { if (!vcd_core_is_busy(dev_ctxt)) rc = vcd_gate_clock(dev_ctxt); break; } case VCD_EVT_PWR_CLNT_PAUSE: case VCD_EVT_PWR_CLNT_LAST_FRAME: case VCD_EVT_PWR_CLNT_ERRFATAL: { if (cctxt) { rc = VCD_S_SUCCESS; if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl -= cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = false; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } case VCD_EVT_PWR_CLNT_RESUME: case VCD_EVT_PWR_CLNT_FIRST_FRAME: { if (cctxt) { rc = VCD_S_SUCCESS; if (!cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl += cctxt->reqd_perf_lvl; cctxt->status.req_perf_lvl = true; rc = vcd_set_perf_level(dev_ctxt, dev_ctxt->reqd_perf_lvl); } } break; } } return rc; } u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt) { u32 rc = VCD_S_SUCCESS; u32 set_perf_lvl; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_enable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? dev_ctxt-> reqd_perf_lvl : VCD_MIN_PERF_LEVEL; rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl); if (!VCD_FAILED(rc)) { if (res_trk_enable_clocks()) { dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; } } else { rc = VCD_ERR_FAIL; } } if (!VCD_FAILED(rc)) dev_ctxt->active_clnts++; return rc; } u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { VCD_MSG_ERROR("vcd_disable_clock(): Already in state " "VCD_PWRCLK_STATE_OFF\n"); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) { dev_ctxt->active_clnts--; if (!dev_ctxt->active_clnts) { if (!res_trk_disable_clocks()) rc = VCD_ERR_FAIL; dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_NOTCLOCKED; dev_ctxt->curr_perf_lvl = 0; } } return rc; } u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl) { u32 rc = VCD_S_SUCCESS; if (!vcd_core_is_busy(dev_ctxt)) { if (res_trk_set_perf_level(perf_lvl, &dev_ctxt->curr_perf_lvl, dev_ctxt)) { dev_ctxt->set_perf_lvl_pending = false; } else { rc = VCD_ERR_FAIL; dev_ctxt->set_perf_lvl_pending = true; } } else { dev_ctxt->set_perf_lvl_pending = true; } return rc; } u32 vcd_update_clnt_perf_lvl( struct vcd_clnt_ctxt *cctxt, struct vcd_property_frame_rate *fps, u32 frm_p_units) { u32 rc = VCD_S_SUCCESS; struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; u32 new_perf_lvl; new_perf_lvl = frm_p_units * fps->fps_numerator / fps->fps_denominator; if (cctxt->status.req_perf_lvl) { dev_ctxt->reqd_perf_lvl = dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl + new_perf_lvl; rc = vcd_set_perf_level(cctxt->dev_ctxt, dev_ctxt->reqd_perf_lvl); } cctxt->reqd_perf_lvl = new_perf_lvl; return rc; } u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; #ifndef VIDC_1080p_DISABLE_GATING if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) rc = VCD_S_SUCCESS; else if (res_trk_disable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKGATED; else rc = VCD_ERR_FAIL; #endif return rc; } u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt) { u32 rc = VCD_S_SUCCESS; #ifndef VIDC_1080p_DISABLE_GATING if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__); rc = VCD_ERR_FAIL; } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED) rc = VCD_S_SUCCESS; else if (res_trk_enable_clocks()) dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; else rc = VCD_ERR_FAIL; #endif return rc; }
gpl-2.0
Cpasjuste/android_kernel_lg_p999
fs/pipe.c
328
25769
/* * linux/fs/pipe.c * * Copyright (C) 1991, 1992, 1999 Linus Torvalds */ #include <linux/mm.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pipe_fs_i.h> #include <linux/uio.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/audit.h> #include <linux/syscalls.h> #include <asm/uaccess.h> #include <asm/ioctls.h> /* * We use a start+len construction, which provides full use of the * allocated memory. * -- Florian Coosmann (FGC) * * Reads with count = 0 should always return 0. * -- Julian Bradfield 1999-06-07. * * FIFOs and Pipes now generate SIGIO for both readers and writers. * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 * * pipe_read & write cleanup * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 */ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) { if (pipe->inode) mutex_lock_nested(&pipe->inode->i_mutex, subclass); } void pipe_lock(struct pipe_inode_info *pipe) { /* * pipe_lock() nests non-pipe inode locks (for writing to a file) */ pipe_lock_nested(pipe, I_MUTEX_PARENT); } EXPORT_SYMBOL(pipe_lock); void pipe_unlock(struct pipe_inode_info *pipe) { if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); } EXPORT_SYMBOL(pipe_unlock); void pipe_double_lock(struct pipe_inode_info *pipe1, struct pipe_inode_info *pipe2) { BUG_ON(pipe1 == pipe2); if (pipe1 < pipe2) { pipe_lock_nested(pipe1, I_MUTEX_PARENT); pipe_lock_nested(pipe2, I_MUTEX_CHILD); } else { pipe_lock_nested(pipe2, I_MUTEX_PARENT); pipe_lock_nested(pipe1, I_MUTEX_CHILD); } } /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe) { DEFINE_WAIT(wait); /* * Pipes are system-local resources, so sleeping on them * is considered a noninteractive wait: */ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); pipe_unlock(pipe); schedule(); finish_wait(&pipe->wait, &wait); pipe_lock(pipe); } static int pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, int atomic) { unsigned long copy; while (len > 0) { while (!iov->iov_len) iov++; copy = min_t(unsigned long, len, iov->iov_len); if (atomic) { if (__copy_from_user_inatomic(to, iov->iov_base, copy)) return -EFAULT; } else { if (copy_from_user(to, iov->iov_base, copy)) return -EFAULT; } to += copy; len -= copy; iov->iov_base += copy; iov->iov_len -= copy; } return 0; } static int pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len, int atomic) { unsigned long copy; while (len > 0) { while (!iov->iov_len) iov++; copy = min_t(unsigned long, len, iov->iov_len); if (atomic) { if (__copy_to_user_inatomic(iov->iov_base, from, copy)) return -EFAULT; } else { if (copy_to_user(iov->iov_base, from, copy)) return -EFAULT; } from += copy; len -= copy; iov->iov_base += copy; iov->iov_len -= copy; } return 0; } /* * Attempt to pre-fault in the user memory, so we can use atomic copies. * Returns the number of bytes not faulted in. */ static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len) { while (!iov->iov_len) iov++; while (len > 0) { unsigned long this_len; this_len = min_t(unsigned long, len, iov->iov_len); if (fault_in_pages_writeable(iov->iov_base, this_len)) break; len -= this_len; iov++; } return len; } /* * Pre-fault in the user memory, so we can use atomic copies. */ static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len) { while (!iov->iov_len) iov++; while (len > 0) { unsigned long this_len; this_len = min_t(unsigned long, len, iov->iov_len); fault_in_pages_readable(iov->iov_base, this_len); len -= this_len; iov++; } } static void anon_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * If nobody else uses this page, and we don't already have a * temporary page, let's keep track of it as a one-deep * allocation cache. (Otherwise just release our reference to it) */ if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else page_cache_release(page); } /** * generic_pipe_buf_map - virtually map a pipe buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer that should be mapped * @atomic: whether to use an atomic map * * Description: * This function returns a kernel virtual address mapping for the * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided * and the caller has to be careful not to fault before calling * the unmap function. * * Note that this function occupies KM_USER0 if @atomic != 0. */ void *generic_pipe_buf_map(struct pipe_inode_info *pipe, struct pipe_buffer *buf, int atomic) { if (atomic) { buf->flags |= PIPE_BUF_FLAG_ATOMIC; return kmap_atomic(buf->page, KM_USER0); } return kmap(buf->page); } /** * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer that should be unmapped * @map_data: the data that the mapping function returned * * Description: * This function undoes the mapping that ->map() provided. */ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe, struct pipe_buffer *buf, void *map_data) { if (buf->flags & PIPE_BUF_FLAG_ATOMIC) { buf->flags &= ~PIPE_BUF_FLAG_ATOMIC; kunmap_atomic(map_data, KM_USER0); } else kunmap(buf->page); } /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal * * Description: * This function attempts to steal the &struct page attached to * @buf. If successful, this function returns 0 and returns with * the page locked. The caller may then reuse the page for whatever * he wishes; the typical use is insertion into a different file * page cache. */ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * A reference of one is golden, that means that the owner of this * page is the only one holding a reference to it. lock the page * and return OK. */ if (page_count(page) == 1) { lock_page(page); return 0; } return 1; } /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to * * Description: * This function grabs an extra reference to @buf. It's used in * in the tee() system call, when we duplicate the buffers in one * pipe into another. */ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_get(buf->page); } /** * generic_pipe_buf_confirm - verify contents of the pipe buffer * @info: the pipe that the buffer belongs to * @buf: the buffer to confirm * * Description: * This function does nothing, because the generic pipe code uses * pages that are always good when inserted into the pipe. */ int generic_pipe_buf_confirm(struct pipe_inode_info *info, struct pipe_buffer *buf) { return 0; } /** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to * * Description: * This function releases a reference to @buf. */ void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_release(buf->page); } static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static ssize_t pipe_read(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error, atomic; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) error = ret; break; } atomic = !iov_fault_in_pages_write(iov, chars); redo: addr = ops->map(pipe, buf, atomic); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); ops->unmap(pipe, buf, addr); if (unlikely(error)) { /* * Just retry with the slow path if we failed. */ if (atomic) { atomic = 0; goto redo; } if (!ret) ret = error; break; } ret += chars; buf->offset += chars; buf->len -= chars; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (PIPE_BUFFERS-1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } mutex_unlock(&inode->i_mutex); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; } static ssize_t pipe_write(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t ppos) { struct file *filp = iocb->ki_filp; struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe; ssize_t ret; int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; ssize_t chars; total_len = iov_length(iov, nr_segs); /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (pipe->nrbufs && chars != 0) { int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & (PIPE_BUFFERS-1); struct pipe_buffer *buf = pipe->bufs + lastbuf; const struct pipe_buf_operations *ops = buf->ops; int offset = buf->offset + buf->len; if (ops->can_merge && offset + chars <= PAGE_SIZE) { int error, atomic = 1; void *addr; error = ops->confirm(pipe, buf); if (error) goto out; iov_fault_in_pages_read(iov, chars); redo1: addr = ops->map(pipe, buf, atomic); error = pipe_iov_copy_from_user(offset + addr, iov, chars, atomic); ops->unmap(pipe, buf, addr); ret = error; do_wakeup = 1; if (error) { if (atomic) { atomic = 0; goto redo1; } goto out; } buf->len += chars; total_len -= chars; ret = chars; if (!total_len) goto out; } } for (;;) { int bufs; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = pipe->nrbufs; if (bufs < PIPE_BUFFERS) { int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1); struct pipe_buffer *buf = pipe->bufs + newbuf; struct page *page = pipe->tmp_page; char *src; int error, atomic = 1; if (!page) { page = alloc_page(GFP_HIGHUSER); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } pipe->tmp_page = page; } /* Always wake up, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; chars = PAGE_SIZE; if (chars > total_len) chars = total_len; iov_fault_in_pages_read(iov, chars); redo2: if (atomic) src = kmap_atomic(page, KM_USER0); else src = kmap(page); error = pipe_iov_copy_from_user(src, iov, chars, atomic); if (atomic) kunmap_atomic(src, KM_USER0); else kunmap(page); if (unlikely(error)) { if (atomic) { atomic = 0; goto redo2; } if (!ret) ret = error; break; } ret += chars; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = chars; pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; total_len -= chars; if (!total_len) break; } if (bufs < PIPE_BUFFERS) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } out: mutex_unlock(&inode->i_mutex); if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0) file_update_time(filp); return ret; } static ssize_t bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return -EBADF; } static ssize_t bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { return -EBADF; } static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe; int count, buf, nrbufs; switch (cmd) { case FIONREAD: mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; count = 0; buf = pipe->curbuf; nrbufs = pipe->nrbufs; while (--nrbufs >= 0) { count += pipe->bufs[buf].len; buf = (buf+1) & (PIPE_BUFFERS-1); } mutex_unlock(&inode->i_mutex); return put_user(count, (int __user *)arg); default: return -EINVAL; } } /* No kernel lock held - fine */ static unsigned int pipe_poll(struct file *filp, poll_table *wait) { unsigned int mask; struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe = inode->i_pipe; int nrbufs; poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = pipe->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) mask |= POLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0; /* * Most Unices do not set POLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!pipe->readers) mask |= POLLERR; } return mask; } static int pipe_release(struct inode *inode, int decr, int decw) { struct pipe_inode_info *pipe; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; pipe->readers -= decr; pipe->writers -= decw; if (!pipe->readers && !pipe->writers) { free_pipe_info(inode); } else { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } mutex_unlock(&inode->i_mutex); return 0; } static int pipe_read_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_path.dentry->d_inode; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers); mutex_unlock(&inode->i_mutex); return retval; } static int pipe_write_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_path.dentry->d_inode; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers); mutex_unlock(&inode->i_mutex); return retval; } static int pipe_rdwr_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe = inode->i_pipe; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); if (retval >= 0) { retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); if (retval < 0) /* this can happen only if on == T */ fasync_helper(-1, filp, 0, &pipe->fasync_readers); } mutex_unlock(&inode->i_mutex); return retval; } static int pipe_read_release(struct inode *inode, struct file *filp) { return pipe_release(inode, 1, 0); } static int pipe_write_release(struct inode *inode, struct file *filp) { return pipe_release(inode, 0, 1); } static int pipe_rdwr_release(struct inode *inode, struct file *filp) { int decr, decw; decr = (filp->f_mode & FMODE_READ) != 0; decw = (filp->f_mode & FMODE_WRITE) != 0; return pipe_release(inode, decr, decw); } static int pipe_read_open(struct inode *inode, struct file *filp) { int ret = -ENOENT; mutex_lock(&inode->i_mutex); if (inode->i_pipe) { ret = 0; inode->i_pipe->readers++; } mutex_unlock(&inode->i_mutex); return ret; } static int pipe_write_open(struct inode *inode, struct file *filp) { int ret = -ENOENT; mutex_lock(&inode->i_mutex); if (inode->i_pipe) { ret = 0; inode->i_pipe->writers++; } mutex_unlock(&inode->i_mutex); return ret; } static int pipe_rdwr_open(struct inode *inode, struct file *filp) { int ret = -ENOENT; mutex_lock(&inode->i_mutex); if (inode->i_pipe) { ret = 0; if (filp->f_mode & FMODE_READ) inode->i_pipe->readers++; if (filp->f_mode & FMODE_WRITE) inode->i_pipe->writers++; } mutex_unlock(&inode->i_mutex); return ret; } /* * The file_operations structs are not static because they * are also used in linux/fs/fifo.c to do operations on FIFOs. * * Pipes reuse fifos' file_operations structs. */ const struct file_operations read_pipefifo_fops = { .llseek = no_llseek, .read = do_sync_read, .aio_read = pipe_read, .write = bad_pipe_w, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .open = pipe_read_open, .release = pipe_read_release, .fasync = pipe_read_fasync, }; const struct file_operations write_pipefifo_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = do_sync_write, .aio_write = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .open = pipe_write_open, .release = pipe_write_release, .fasync = pipe_write_fasync, }; const struct file_operations rdwr_pipefifo_fops = { .llseek = no_llseek, .read = do_sync_read, .aio_read = pipe_read, .write = do_sync_write, .aio_write = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .open = pipe_rdwr_open, .release = pipe_rdwr_release, .fasync = pipe_rdwr_fasync, }; struct pipe_inode_info * alloc_pipe_info(struct inode *inode) { struct pipe_inode_info *pipe; pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); if (pipe) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; pipe->inode = inode; } return pipe; } void __free_pipe_info(struct pipe_inode_info *pipe) { int i; for (i = 0; i < PIPE_BUFFERS; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) buf->ops->release(pipe, buf); } if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe); } void free_pipe_info(struct inode *inode) { __free_pipe_info(inode->i_pipe); inode->i_pipe = NULL; } static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { /* * At creation time, we pretended this dentry was hashed * (by clearing DCACHE_UNHASHED bit in d_flags) * At delete time, we restore the truth : not hashed. * (so that dput() can proceed correctly) */ dentry->d_flags |= DCACHE_UNHASHED; return 0; } /* * pipefs_dname() is called from d_path(). */ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations pipefs_dentry_operations = { .d_delete = pipefs_delete_dentry, .d_dname = pipefs_dname, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode(pipe_mnt->mnt_sb); struct pipe_inode_info *pipe; if (!inode) goto fail_inode; pipe = alloc_pipe_info(inode); if (!pipe) goto fail_iput; inode->i_pipe = pipe; pipe->readers = pipe->writers = 1; inode->i_fop = &rdwr_pipefifo_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; return inode; fail_iput: iput(inode); fail_inode: return NULL; } struct file *create_write_pipe(int flags) { int err; struct inode *inode; struct file *f; struct dentry *dentry; struct qstr name = { .name = "" }; err = -ENFILE; inode = get_pipe_inode(); if (!inode) goto err; err = -ENOMEM; dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name); if (!dentry) goto err_inode; dentry->d_op = &pipefs_dentry_operations; /* * We dont want to publish this dentry into global dentry hash table. * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED * This permits a working /proc/$pid/fd/XXX on pipes */ dentry->d_flags &= ~DCACHE_UNHASHED; d_instantiate(dentry, inode); err = -ENFILE; f = alloc_file(pipe_mnt, dentry, FMODE_WRITE, &write_pipefifo_fops); if (!f) goto err_dentry; f->f_mapping = inode->i_mapping; f->f_flags = O_WRONLY | (flags & O_NONBLOCK); f->f_version = 0; return f; err_dentry: free_pipe_info(inode); dput(dentry); return ERR_PTR(err); err_inode: free_pipe_info(inode); iput(inode); err: return ERR_PTR(err); } void free_write_pipe(struct file *f) { free_pipe_info(f->f_dentry->d_inode); path_put(&f->f_path); put_filp(f); } struct file *create_read_pipe(struct file *wrf, int flags) { struct file *f = get_empty_filp(); if (!f) return ERR_PTR(-ENFILE); /* Grab pipe from the writer */ f->f_path = wrf->f_path; path_get(&wrf->f_path); f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping; f->f_pos = 0; f->f_flags = O_RDONLY | (flags & O_NONBLOCK); f->f_op = &read_pipefifo_fops; f->f_mode = FMODE_READ; f->f_version = 0; return f; } int do_pipe_flags(int *fd, int flags) { struct file *fw, *fr; int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK)) return -EINVAL; fw = create_write_pipe(flags); if (IS_ERR(fw)) return PTR_ERR(fw); fr = create_read_pipe(fw, flags); error = PTR_ERR(fr); if (IS_ERR(fr)) goto err_write_pipe; error = get_unused_fd_flags(flags); if (error < 0) goto err_read_pipe; fdr = error; error = get_unused_fd_flags(flags); if (error < 0) goto err_fdr; fdw = error; audit_fd_pair(fdr, fdw); fd_install(fdr, fr); fd_install(fdw, fw); fd[0] = fdr; fd[1] = fdw; return 0; err_fdr: put_unused_fd(fdr); err_read_pipe: path_put(&fr->f_path); put_filp(fr); err_write_pipe: free_write_pipe(fw); return error; } /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) { int fd[2]; int error; error = do_pipe_flags(fd, flags); if (!error) { if (copy_to_user(fildes, fd, sizeof(fd))) { sys_close(fd[0]); sys_close(fd[1]); error = -EFAULT; } } return error; } SYSCALL_DEFINE1(pipe, int __user *, fildes) { return sys_pipe2(fildes, 0); } /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static int pipefs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .get_sb = pipefs_get_sb, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } static void __exit exit_pipe_fs(void) { unregister_filesystem(&pipe_fs_type); mntput(pipe_mnt); } fs_initcall(init_pipe_fs); module_exit(exit_pipe_fs);
gpl-2.0
arjen75/ics-lge-kernel-msm7x27-chick
drivers/char/csdio.c
328
27260
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 and only version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/serial_reg.h> #include <linux/circ_buf.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/platform_device.h> /* Char device */ #include <linux/cdev.h> #include <linux/fs.h> /* Sdio device */ #include <linux/mmc/core.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/csdio.h> #define FALSE 0 #define TRUE 1 #define VERSION "0.5" #define CSDIO_NUM_OF_SDIO_FUNCTIONS 7 #define CSDIO_DEV_NAME "csdio" #define TP_DEV_NAME CSDIO_DEV_NAME"f" #define CSDIO_DEV_PERMISSIONS 0666 #define CSDIO_SDIO_BUFFER_SIZE (64*512) int csdio_major; int csdio_minor; int csdio_transport_nr_devs = CSDIO_NUM_OF_SDIO_FUNCTIONS; static uint csdio_vendor_id; static uint csdio_device_id; static char *host_name; static struct csdio_func_t { struct sdio_func *m_func; int m_enabled; struct cdev m_cdev; /* char device structure */ struct device *m_device; u32 m_block_size; } *g_csdio_func_table[CSDIO_NUM_OF_SDIO_FUNCTIONS] = {0}; struct csdio_t { struct cdev m_cdev; struct device *m_device; struct class *m_driver_class; struct fasync_struct *m_async_queue; unsigned char m_current_irq_mask; /* currently enabled irqs */ struct mmc_host *m_host; unsigned int m_num_of_func; } g_csdio; struct csdio_file_descriptor { struct csdio_func_t *m_port; u32 m_block_mode;/* data tran. byte(0)/block(1) */ u32 m_op_code; /* address auto increment flag */ u32 m_address; }; static void *g_sdio_buffer; /* * Open and release */ static int csdio_transport_open(struct inode *inode, struct file *filp) { int ret = 0; struct csdio_func_t *port = NULL; /* device information */ struct sdio_func *func = NULL; struct csdio_file_descriptor *descriptor = NULL; port = container_of(inode->i_cdev, struct csdio_func_t, m_cdev); func = port->m_func; descriptor = kzalloc(sizeof(struct csdio_file_descriptor), GFP_KERNEL); if (!descriptor) { ret = -ENOMEM; goto exit; } pr_info(TP_DEV_NAME"%d: open: func=%p, port=%p\n", func->num, func, port); sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) { pr_err(TP_DEV_NAME"%d:Enable func failed (%d)\n", func->num, ret); ret = -EIO; goto free_descriptor; } descriptor->m_port = port; filp->private_data = descriptor; goto release_host; free_descriptor: kfree(descriptor); release_host: sdio_release_host(func); exit: return ret; } static int csdio_transport_release(struct inode *inode, struct file *filp) { int ret = 0; struct csdio_file_descriptor *descriptor = filp->private_data; struct csdio_func_t *port = descriptor->m_port; struct sdio_func *func = port->m_func; pr_info(TP_DEV_NAME"%d: release\n", func->num); sdio_claim_host(func); ret = sdio_disable_func(func); if (ret) { pr_err(TP_DEV_NAME"%d:Disable func failed(%d)\n", func->num, ret); ret = -EIO; } sdio_release_host(func); kfree(descriptor); return ret; } /* * Data management: read and write */ static ssize_t csdio_transport_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { ssize_t ret = 0; struct csdio_file_descriptor *descriptor = filp->private_data; struct csdio_func_t *port = descriptor->m_port; struct sdio_func *func = port->m_func; size_t t_count = count; if (descriptor->m_block_mode) { pr_info(TP_DEV_NAME "%d: CMD53 read, Md:%d, Addr:0x%04X," " Un:%d (Bl:%d, BlSz:%d)\n", func->num, descriptor->m_block_mode, descriptor->m_address, count*port->m_block_size, count, port->m_block_size); /* recalculate size */ count *= port->m_block_size; } sdio_claim_host(func); if (descriptor->m_op_code) { /* auto increment */ ret = sdio_memcpy_fromio(func, g_sdio_buffer, descriptor->m_address, count); } else { /* FIFO */ ret = sdio_readsb(func, g_sdio_buffer, descriptor->m_address, count); } sdio_release_host(func); if (!ret) { if (copy_to_user(buf, g_sdio_buffer, count)) ret = -EFAULT; else ret = t_count; } if (ret < 0) { pr_err(TP_DEV_NAME "%d: CMD53 read failed (%d)" "(Md:%d, Addr:0x%04X, Sz:%d)\n", func->num, ret, descriptor->m_block_mode, descriptor->m_address, count); } return ret; } static ssize_t csdio_transport_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { ssize_t ret = 0; struct csdio_file_descriptor *descriptor = filp->private_data; struct csdio_func_t *port = descriptor->m_port; struct sdio_func *func = port->m_func; size_t t_count = count; if (descriptor->m_block_mode) count *= port->m_block_size; if (copy_from_user(g_sdio_buffer, buf, count)) { pr_err(TP_DEV_NAME"%d:copy_from_user failed\n", func->num); ret = -EFAULT; } else { sdio_claim_host(func); if (descriptor->m_op_code) { /* auto increment */ ret = sdio_memcpy_toio(func, descriptor->m_address, g_sdio_buffer, count); } else { /* FIFO */ ret = sdio_writesb(func, descriptor->m_address, g_sdio_buffer, count); } sdio_release_host(func); if (!ret) { ret = t_count; } else { pr_err(TP_DEV_NAME "%d: CMD53 write failed (%d)" "(Md:%d, Addr:0x%04X, Sz:%d)\n", func->num, ret, descriptor->m_block_mode, descriptor->m_address, count); } } return ret; } /* disable interrupt for sdio client */ static int disable_sdio_client_isr(struct sdio_func *func) { int ret; /* disable for all functions, to restore interrupts * use g_csdio.m_current_irq_mask */ sdio_f0_writeb(func, 0, SDIO_CCCR_IENx, &ret); if (ret) pr_err(CSDIO_DEV_NAME" Can't sdio_f0_writeb (%d)\n", ret); return ret; } /* * This handles the interrupt from SDIO. */ static void csdio_sdio_irq(struct sdio_func *func) { int ret; pr_info(CSDIO_DEV_NAME" csdio_sdio_irq: func=%d\n", func->num); ret = disable_sdio_client_isr(func); if (ret) { pr_err(CSDIO_DEV_NAME" Can't disable client isr(%d)\n", ret); return; } /* signal asynchronous readers */ if (g_csdio.m_async_queue) kill_fasync(&g_csdio.m_async_queue, SIGIO, POLL_IN); } /* * The ioctl() implementation */ static int csdio_transport_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0; int ret = 0; struct csdio_file_descriptor *descriptor = filp->private_data; struct csdio_func_t *port = descriptor->m_port; struct sdio_func *func = port->m_func; /* extract the type and number bitfields sanity check: return ENOTTY (inappropriate ioctl) before access_ok() */ if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) || (_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) { pr_err(TP_DEV_NAME "Wrong ioctl command parameters\n"); ret = -ENOTTY; goto exit; } /* the direction is a bitmask, and VERIFY_WRITE catches R/W * transfers. `Type' is user-oriented, while access_ok is kernel-oriented, so the concept of "read" and "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else { if (_IOC_DIR(cmd) & _IOC_WRITE) { err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } } if (err) { pr_err(TP_DEV_NAME "Wrong ioctl access direction\n"); ret = -EFAULT; goto exit; } switch (cmd) { case CSDIO_IOC_SET_OP_CODE: { pr_info(TP_DEV_NAME"%d:SET_OP_CODE=%d\n", func->num, descriptor->m_op_code); ret = get_user(descriptor->m_op_code, (unsigned char __user *)arg); if (ret) { pr_err(TP_DEV_NAME"%d:SET_OP_CODE get data" " from user space failed(%d)\n", func->num, ret); ret = -ENOTTY; break; } } break; case CSDIO_IOC_FUNCTION_SET_BLOCK_SIZE: { unsigned block_size; ret = get_user(block_size, (unsigned __user *)arg); if (ret) { pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE get data" " from user space failed(%d)\n", func->num, ret); ret = -ENOTTY; break; } pr_info(TP_DEV_NAME"%d:SET_BLOCK_SIZE=%d\n", func->num, block_size); sdio_claim_host(func); ret = sdio_set_block_size(func, block_size); if (!ret) { port->m_block_size = block_size; } else { pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE set block" " size to %d failed (%d)\n", func->num, block_size, ret); ret = -ENOTTY; break; } sdio_release_host(func); } break; case CSDIO_IOC_SET_BLOCK_MODE: { pr_info(TP_DEV_NAME"%d:SET_BLOCK_MODE=%d\n", func->num, descriptor->m_block_mode); ret = get_user(descriptor->m_block_mode, (unsigned char __user *)arg); if (ret) { pr_err(TP_DEV_NAME"%d:SET_BLOCK_MODE get data" " from user space failed\n", func->num); ret = -ENOTTY; break; } } break; case CSDIO_IOC_CMD52: { struct csdio_cmd52_ctrl_t cmd52ctrl; int cmd52ret; if (copy_from_user(&cmd52ctrl, (const unsigned char __user *)arg, sizeof(cmd52ctrl))) { pr_err(TP_DEV_NAME"%d:IOC_CMD52 get data" " from user space failed\n", func->num); ret = -ENOTTY; break; } sdio_claim_host(func); if (cmd52ctrl.m_write) sdio_writeb(func, cmd52ctrl.m_data, cmd52ctrl.m_address, &cmd52ret); else cmd52ctrl.m_data = sdio_readb(func, cmd52ctrl.m_address, &cmd52ret); cmd52ctrl.m_ret = cmd52ret; sdio_release_host(func); if (cmd52ctrl.m_ret) pr_err(TP_DEV_NAME"%d:IOC_CMD52 failed (%d)\n", func->num, cmd52ctrl.m_ret); if (copy_to_user((unsigned char __user *)arg, &cmd52ctrl, sizeof(cmd52ctrl))) { pr_err(TP_DEV_NAME"%d:IOC_CMD52 put data" " to user space failed\n", func->num); ret = -ENOTTY; break; } } break; case CSDIO_IOC_CMD53: { struct csdio_cmd53_ctrl_t csdio_cmd53_ctrl; if (copy_from_user(&csdio_cmd53_ctrl, (const char __user *)arg, sizeof(csdio_cmd53_ctrl))) { ret = -EPERM; pr_err(TP_DEV_NAME"%d:" "Get data from user space failed\n", func->num); break; } descriptor->m_block_mode = csdio_cmd53_ctrl.m_block_mode; descriptor->m_op_code = csdio_cmd53_ctrl.m_op_code; descriptor->m_address = csdio_cmd53_ctrl.m_address; } break; case CSDIO_IOC_CONNECT_ISR: { pr_info(CSDIO_DEV_NAME" SDIO_CONNECT_ISR" " func=%d, csdio_sdio_irq=%x\n", func->num, (unsigned int)csdio_sdio_irq); sdio_claim_host(func); ret = sdio_claim_irq(func, csdio_sdio_irq); sdio_release_host(func); if (ret) { pr_err(CSDIO_DEV_NAME" SDIO_CONNECT_ISR" " claim irq failed(%d)\n", ret); } else { /* update current irq mask for disable/enable */ g_csdio.m_current_irq_mask |= (1 << func->num); } } break; case CSDIO_IOC_DISCONNECT_ISR: { pr_info(CSDIO_DEV_NAME " SDIO_DISCONNECT_ISR func=%d\n", func->num); sdio_claim_host(func); sdio_release_irq(func); sdio_release_host(func); /* update current irq mask for disable/enable */ g_csdio.m_current_irq_mask &= ~(1 << func->num); } break; default: /* redundant, as cmd was checked against MAXNR */ pr_warning(TP_DEV_NAME"%d: Redundant IOCTL\n", func->num); ret = -ENOTTY; } exit: return ret; } static const struct file_operations csdio_transport_fops = { .owner = THIS_MODULE, .read = csdio_transport_read, .write = csdio_transport_write, .ioctl = csdio_transport_ioctl, .open = csdio_transport_open, .release = csdio_transport_release, }; static void csdio_transport_cleanup(struct csdio_func_t *port) { int devno = MKDEV(csdio_major, csdio_minor + port->m_func->num); device_destroy(g_csdio.m_driver_class, devno); port->m_device = NULL; cdev_del(&port->m_cdev); } #if defined(CONFIG_DEVTMPFS) static inline int csdio_cdev_update_permissions( const char *devname, int dev_minor) { return 0; } #else static int csdio_cdev_update_permissions( const char *devname, int dev_minor) { int ret = 0; mm_segment_t fs; struct file *file; struct inode *inode; struct iattr newattrs; int mode = CSDIO_DEV_PERMISSIONS; char dev_file[64]; fs = get_fs(); set_fs(get_ds()); snprintf(dev_file, sizeof(dev_file), "/dev/%s%d", devname, dev_minor); file = filp_open(dev_file, O_RDWR, 0); if (IS_ERR(file)) { ret = -EFAULT; goto exit; } inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; ret = notify_change(file->f_path.dentry, &newattrs); mutex_unlock(&inode->i_mutex); filp_close(file, NULL); exit: set_fs(fs); return ret; } #endif static struct device *csdio_cdev_init(struct cdev *char_dev, const struct file_operations *file_op, int dev_minor, const char *devname, struct device *parent) { int ret = 0; struct device *new_device = NULL; dev_t devno = MKDEV(csdio_major, dev_minor); /* Initialize transport device */ cdev_init(char_dev, file_op); char_dev->owner = THIS_MODULE; char_dev->ops = file_op; ret = cdev_add(char_dev, devno, 1); /* Fail gracefully if need be */ if (ret) { pr_warning("Error %d adding CSDIO char device '%s%d'", ret, devname, dev_minor); goto exit; } pr_info("'%s%d' char driver registered\n", devname, dev_minor); /* create a /dev entry for transport drivers */ new_device = device_create(g_csdio.m_driver_class, parent, devno, NULL, "%s%d", devname, dev_minor); if (!new_device) { pr_err("Can't create device node '/dev/%s%d'\n", devname, dev_minor); goto cleanup; } /* no irq attached */ g_csdio.m_current_irq_mask = 0; if (csdio_cdev_update_permissions(devname, dev_minor)) { pr_warning("%s%d: Unable to update access permissions of the" " '/dev/%s%d'\n", devname, dev_minor, devname, dev_minor); } pr_info("%s%d: Device node '/dev/%s%d' created successfully\n", devname, dev_minor, devname, dev_minor); goto exit; cleanup: cdev_del(char_dev); exit: return new_device; } /* Looks for first non empty function, returns NULL otherwise */ static struct sdio_func *get_active_func(void) { int i; for (i = 0; i < CSDIO_NUM_OF_SDIO_FUNCTIONS; i++) { if (g_csdio_func_table[i]) return g_csdio_func_table[i]->m_func; } return NULL; } static ssize_t show_vdd(struct device *dev, struct device_attribute *attr, char *buf) { if (NULL == g_csdio.m_host) return snprintf(buf, PAGE_SIZE, "N/A\n"); return snprintf(buf, PAGE_SIZE, "%d\n", g_csdio.m_host->ios.vdd); } static int set_vdd_helper(int value) { struct mmc_ios *ios = NULL; if (NULL == g_csdio.m_host) { pr_err("%s0: Set VDD, no MMC host assigned\n", CSDIO_DEV_NAME); return -ENXIO; } mmc_claim_host(g_csdio.m_host); ios = &g_csdio.m_host->ios; ios->vdd = value; g_csdio.m_host->ops->set_ios(g_csdio.m_host, ios); mmc_release_host(g_csdio.m_host); return 0; } static ssize_t set_vdd(struct device *dev, struct device_attribute *att, const char *buf, size_t count) { int value = 0; sscanf(buf, "%d", &value); if (set_vdd_helper(value)) return -ENXIO; return count; } static DEVICE_ATTR(vdd, S_IRUGO | S_IWUSR, show_vdd, set_vdd); static struct attribute *dev_attrs[] = { &dev_attr_vdd.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; /* * The ioctl() implementation for control device */ static int csdio_ctrl_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0; int ret = 0; pr_info("CSDIO ctrl ioctl.\n"); /* extract the type and number bitfields sanity check: return ENOTTY (inappropriate ioctl) before access_ok() */ if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) || (_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) { pr_err(CSDIO_DEV_NAME "Wrong ioctl command parameters\n"); ret = -ENOTTY; goto exit; } /* the direction is a bitmask, and VERIFY_WRITE catches R/W transfers. `Type' is user-oriented, while access_ok is kernel-oriented, so the concept of "read" and "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else { if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } if (err) { pr_err(CSDIO_DEV_NAME "Wrong ioctl access direction\n"); ret = -EFAULT; goto exit; } switch (cmd) { case CSDIO_IOC_ENABLE_HIGHSPEED_MODE: pr_info(CSDIO_DEV_NAME" ENABLE_HIGHSPEED_MODE\n"); break; case CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS: { struct mmc_host *host = g_csdio.m_host; struct mmc_ios *ios = NULL; if (NULL == host) { pr_err("%s0: " "CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS," " no MMC host assigned\n", CSDIO_DEV_NAME); ret = -EFAULT; goto exit; } ios = &host->ios; mmc_claim_host(host); ret = get_user(host->ios.clock, (unsigned int __user *)arg); if (ret) { pr_err(CSDIO_DEV_NAME " get data from user space failed\n"); } else { pr_err(CSDIO_DEV_NAME "SET_DATA_TRANSFER_CLOCKS(%d-%d)(%d)\n", host->f_min, host->f_max, host->ios.clock); host->ops->set_ios(host, ios); } mmc_release_host(host); } break; case CSDIO_IOC_ENABLE_ISR: { int ret; unsigned char reg; struct sdio_func *func = get_active_func(); if (!func) { pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR" " no active sdio function\n"); ret = -EFAULT; goto exit; } pr_info(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR func=%d\n", func->num); reg = g_csdio.m_current_irq_mask | 1; sdio_claim_host(func); sdio_f0_writeb(func, reg, SDIO_CCCR_IENx, &ret); sdio_release_host(func); if (ret) { pr_err(CSDIO_DEV_NAME " Can't sdio_f0_writeb (%d)\n", ret); goto exit; } } break; case CSDIO_IOC_DISABLE_ISR: { int ret; struct sdio_func *func = get_active_func(); if (!func) { pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR" " no active sdio function\n"); ret = -EFAULT; goto exit; } pr_info(CSDIO_DEV_NAME " CSDIO_IOC_DISABLE_ISR func=%p\n", func); sdio_claim_host(func); ret = disable_sdio_client_isr(func); sdio_release_host(func); if (ret) { pr_err("%s0: Can't disable client isr (%d)\n", CSDIO_DEV_NAME, ret); goto exit; } } break; case CSDIO_IOC_SET_VDD: { unsigned int vdd = 0; ret = get_user(vdd, (unsigned int __user *)arg); if (ret) { pr_err("%s0: CSDIO_IOC_SET_VDD," " get data from user space failed\n", CSDIO_DEV_NAME); goto exit; } pr_info(CSDIO_DEV_NAME" CSDIO_IOC_SET_VDD - %d\n", vdd); ret = set_vdd_helper(vdd); if (ret) goto exit; } break; case CSDIO_IOC_GET_VDD: { if (NULL == g_csdio.m_host) { pr_err("%s0: CSDIO_IOC_GET_VDD," " no MMC host assigned\n", CSDIO_DEV_NAME); ret = -EFAULT; goto exit; } ret = put_user(g_csdio.m_host->ios.vdd, (unsigned short __user *)arg); if (ret) { pr_err("%s0: CSDIO_IOC_GET_VDD, put data" " to user space failed\n", CSDIO_DEV_NAME); goto exit; } } break; default: /* redundant, as cmd was checked against MAXNR */ pr_warning(CSDIO_DEV_NAME" Redundant IOCTL\n"); ret = -ENOTTY; } exit: return ret; } static int csdio_ctrl_fasync(int fd, struct file *filp, int mode) { pr_info(CSDIO_DEV_NAME " csdio_ctrl_fasync: fd=%d, filp=%p, mode=%d\n", fd, filp, mode); return fasync_helper(fd, filp, mode, &g_csdio.m_async_queue); } /* * Open and close */ static int csdio_ctrl_open(struct inode *inode, struct file *filp) { int ret = 0; struct csdio_t *csdio_ctrl_drv = NULL; /* device information */ pr_info("CSDIO ctrl open.\n"); csdio_ctrl_drv = container_of(inode->i_cdev, struct csdio_t, m_cdev); filp->private_data = csdio_ctrl_drv; /* for other methods */ return ret; } static int csdio_ctrl_release(struct inode *inode, struct file *filp) { pr_info("CSDIO ctrl release.\n"); /* remove this filp from the asynchronously notified filp's */ csdio_ctrl_fasync(-1, filp, 0); return 0; } static const struct file_operations csdio_ctrl_fops = { .owner = THIS_MODULE, .ioctl = csdio_ctrl_ioctl, .open = csdio_ctrl_open, .release = csdio_ctrl_release, .fasync = csdio_ctrl_fasync, }; static int csdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct csdio_func_t *port; int ret = 0; struct mmc_host *host = func->card->host; if (NULL != g_csdio.m_host && g_csdio.m_host != host) { pr_info("%s: Device is on unexpected host\n", CSDIO_DEV_NAME); ret = -ENODEV; goto exit; } /* enforce single instance policy */ if (g_csdio_func_table[func->num-1]) { pr_err("%s - only single SDIO device supported", sdio_func_id(func)); ret = -EEXIST; goto exit; } port = kzalloc(sizeof(struct csdio_func_t), GFP_KERNEL); if (!port) { pr_err("Can't allocate memory\n"); ret = -ENOMEM; goto exit; } /* initialize SDIO side */ port->m_func = func; sdio_set_drvdata(func, port); pr_info("%s - SDIO device found. Function %d\n", sdio_func_id(func), func->num); port->m_device = csdio_cdev_init(&port->m_cdev, &csdio_transport_fops, csdio_minor + port->m_func->num, TP_DEV_NAME, &port->m_func->dev); /* create appropriate char device */ if (!port->m_device) goto free; if (0 == g_csdio.m_num_of_func && NULL == host_name) g_csdio.m_host = host; g_csdio.m_num_of_func++; g_csdio_func_table[func->num-1] = port; port->m_enabled = TRUE; goto exit; free: kfree(port); exit: return ret; } static void csdio_remove(struct sdio_func *func) { struct csdio_func_t *port = sdio_get_drvdata(func); csdio_transport_cleanup(port); sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); kfree(port); g_csdio_func_table[func->num-1] = NULL; g_csdio.m_num_of_func--; if (0 == g_csdio.m_num_of_func && NULL == host_name) g_csdio.m_host = NULL; pr_info("%s%d: Device removed (%s). Function %d\n", CSDIO_DEV_NAME, func->num, sdio_func_id(func), func->num); } /* CONFIG_CSDIO_VENDOR_ID and CONFIG_CSDIO_DEVICE_ID are defined in Kconfig. * Use kernel configuration to change the values or overwrite them through * module parameters */ static struct sdio_device_id csdio_ids[] = { { SDIO_DEVICE(CONFIG_CSDIO_VENDOR_ID, CONFIG_CSDIO_DEVICE_ID) }, { /* end: all zeroes */}, }; MODULE_DEVICE_TABLE(sdio, csdio_ids); static struct sdio_driver csdio_driver = { .probe = csdio_probe, .remove = csdio_remove, .name = "csdio", .id_table = csdio_ids, }; static void __exit csdio_exit(void) { dev_t devno = MKDEV(csdio_major, csdio_minor); sdio_unregister_driver(&csdio_driver); sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp); kfree(g_sdio_buffer); device_destroy(g_csdio.m_driver_class, devno); cdev_del(&g_csdio.m_cdev); class_destroy(g_csdio.m_driver_class); unregister_chrdev_region(devno, csdio_transport_nr_devs); pr_info("%s: Exit driver module\n", CSDIO_DEV_NAME); } static char *csdio_devnode(struct device *dev, mode_t *mode) { *mode = CSDIO_DEV_PERMISSIONS; return NULL; } static int __init csdio_init(void) { int ret = 0; dev_t devno = 0; pr_info("Init CSDIO driver module.\n"); /* Get a range of minor numbers to work with, asking for a dynamic */ /* major unless directed otherwise at load time. */ if (csdio_major) { devno = MKDEV(csdio_major, csdio_minor); ret = register_chrdev_region(devno, csdio_transport_nr_devs, CSDIO_DEV_NAME); } else { ret = alloc_chrdev_region(&devno, csdio_minor, csdio_transport_nr_devs, CSDIO_DEV_NAME); csdio_major = MAJOR(devno); } if (ret < 0) { pr_err("CSDIO: can't get major %d\n", csdio_major); goto exit; } pr_info("CSDIO char driver major number is %d\n", csdio_major); /* kernel module got parameters: overwrite vendor and device id's */ if ((csdio_vendor_id != 0) && (csdio_device_id != 0)) { csdio_ids[0].vendor = (u16)csdio_vendor_id; csdio_ids[0].device = (u16)csdio_device_id; } /* prepare create /dev/... instance */ g_csdio.m_driver_class = class_create(THIS_MODULE, CSDIO_DEV_NAME); if (IS_ERR(g_csdio.m_driver_class)) { ret = -ENOMEM; pr_err(CSDIO_DEV_NAME " class_create failed\n"); goto unregister_region; } g_csdio.m_driver_class->devnode = csdio_devnode; /* create CSDIO ctrl driver */ g_csdio.m_device = csdio_cdev_init(&g_csdio.m_cdev, &csdio_ctrl_fops, csdio_minor, CSDIO_DEV_NAME, NULL); if (!g_csdio.m_device) { pr_err("%s: Unable to create ctrl driver\n", CSDIO_DEV_NAME); goto destroy_class; } g_sdio_buffer = kmalloc(CSDIO_SDIO_BUFFER_SIZE, GFP_KERNEL); if (!g_sdio_buffer) { pr_err("Unable to allocate %d bytes\n", CSDIO_SDIO_BUFFER_SIZE); ret = -ENOMEM; goto destroy_cdev; } ret = sysfs_create_group(&g_csdio.m_device->kobj, &dev_attr_grp); if (ret) { pr_err("%s: Unable to create device attribute\n", CSDIO_DEV_NAME); goto free_sdio_buff; } g_csdio.m_num_of_func = 0; g_csdio.m_host = NULL; if (NULL != host_name) { struct device *dev = bus_find_device_by_name(&platform_bus_type, NULL, host_name); if (NULL != dev) { g_csdio.m_host = dev_get_drvdata(dev); } else { pr_err("%s: Host '%s' doesn't exist!\n", CSDIO_DEV_NAME, host_name); } } pr_info("%s: Match with VendorId=0x%X, DeviceId=0x%X, Host = %s\n", CSDIO_DEV_NAME, csdio_device_id, csdio_vendor_id, (NULL == host_name) ? "Any" : host_name); /* register sdio driver */ ret = sdio_register_driver(&csdio_driver); if (ret) { pr_err("%s: Unable to register as SDIO driver\n", CSDIO_DEV_NAME); goto remove_group; } goto exit; remove_group: sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp); free_sdio_buff: kfree(g_sdio_buffer); destroy_cdev: cdev_del(&g_csdio.m_cdev); destroy_class: class_destroy(g_csdio.m_driver_class); unregister_region: unregister_chrdev_region(devno, csdio_transport_nr_devs); exit: return ret; } module_param(csdio_vendor_id, uint, S_IRUGO); module_param(csdio_device_id, uint, S_IRUGO); module_param(host_name, charp, S_IRUGO); module_init(csdio_init); module_exit(csdio_exit); MODULE_AUTHOR("Code Aurora Forum"); MODULE_DESCRIPTION("CSDIO device driver version " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0
denggww123/EmbedSky-linux-2.6.30.4
drivers/misc/hdpuftrs/hdpu_cpustate.c
840
5561
/* * Sky CPU State Driver * * Copyright (C) 2002 Brian Waite * * This driver allows use of the CPU state bits * It exports the /dev/sky_cpustate and also * /proc/sky_cpustate pseudo-file for status information. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/miscdevice.h> #include <linux/proc_fs.h> #include <linux/hdpu_features.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <linux/seq_file.h> #include <asm/io.h> #define SKY_CPUSTATE_VERSION "1.1" static int hdpu_cpustate_probe(struct platform_device *pdev); static int hdpu_cpustate_remove(struct platform_device *pdev); static unsigned char cpustate_get_state(void); static int cpustate_proc_open(struct inode *inode, struct file *file); static int cpustate_proc_read(struct seq_file *seq, void *offset); static struct cpustate_t cpustate; static const struct file_operations proc_cpustate = { .open = cpustate_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static int cpustate_proc_open(struct inode *inode, struct file *file) { return single_open(file, cpustate_proc_read, NULL); } static int cpustate_proc_read(struct seq_file *seq, void *offset) { seq_printf(seq, "CPU State: %04x\n", cpustate_get_state()); return 0; } static int cpustate_get_ref(int excl) { int retval = -EBUSY; spin_lock(&cpustate.lock); if (cpustate.excl) goto out_busy; if (excl) { if (cpustate.open_count) goto out_busy; cpustate.excl = 1; } cpustate.open_count++; retval = 0; out_busy: spin_unlock(&cpustate.lock); return retval; } static int cpustate_free_ref(void) { spin_lock(&cpustate.lock); cpustate.excl = 0; cpustate.open_count--; spin_unlock(&cpustate.lock); return 0; } static unsigned char cpustate_get_state(void) { return cpustate.cached_val; } static void cpustate_set_state(unsigned char new_state) { unsigned int state = (new_state << 21); #ifdef DEBUG_CPUSTATE printk("CPUSTATE -> 0x%x\n", new_state); #endif spin_lock(&cpustate.lock); cpustate.cached_val = new_state; writel((0xff << 21), cpustate.clr_addr); writel(state, cpustate.set_addr); spin_unlock(&cpustate.lock); } /* * Now all the various file operations that we export. */ static ssize_t cpustate_read(struct file *file, char *buf, size_t count, loff_t * ppos) { unsigned char data; if (count < 0) return -EFAULT; if (count == 0) return 0; data = cpustate_get_state(); if (copy_to_user(buf, &data, sizeof(unsigned char))) return -EFAULT; return sizeof(unsigned char); } static ssize_t cpustate_write(struct file *file, const char *buf, size_t count, loff_t * ppos) { unsigned char data; if (count < 0) return -EFAULT; if (count == 0) return 0; if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char))) return -EFAULT; cpustate_set_state(data); return sizeof(unsigned char); } static int cpustate_open(struct inode *inode, struct file *file) { int ret; lock_kernel(); ret = cpustate_get_ref((file->f_flags & O_EXCL)); unlock_kernel(); return ret; } static int cpustate_release(struct inode *inode, struct file *file) { return cpustate_free_ref(); } static struct platform_driver hdpu_cpustate_driver = { .probe = hdpu_cpustate_probe, .remove = hdpu_cpustate_remove, .driver = { .name = HDPU_CPUSTATE_NAME, .owner = THIS_MODULE, }, }; /* * The various file operations we support. */ static const struct file_operations cpustate_fops = { .owner = THIS_MODULE, .open = cpustate_open, .release = cpustate_release, .read = cpustate_read, .write = cpustate_write, .llseek = no_llseek, }; static struct miscdevice cpustate_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "sky_cpustate", .fops = &cpustate_fops, }; static int hdpu_cpustate_probe(struct platform_device *pdev) { struct resource *res; struct proc_dir_entry *proc_de; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { printk(KERN_ERR "sky_cpustate: " "Invalid memory resource.\n"); return -EINVAL; } cpustate.set_addr = (unsigned long *)res->start; cpustate.clr_addr = (unsigned long *)res->end - 1; ret = misc_register(&cpustate_dev); if (ret) { printk(KERN_WARNING "sky_cpustate: " "Unable to register misc device.\n"); cpustate.set_addr = NULL; cpustate.clr_addr = NULL; return ret; } proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); if (!proc_de) { printk(KERN_WARNING "sky_cpustate: " "Unable to create proc entry\n"); } printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); return 0; } static int hdpu_cpustate_remove(struct platform_device *pdev) { cpustate.set_addr = NULL; cpustate.clr_addr = NULL; remove_proc_entry("sky_cpustate", NULL); misc_deregister(&cpustate_dev); return 0; } static int __init cpustate_init(void) { return platform_driver_register(&hdpu_cpustate_driver); } static void __exit cpustate_exit(void) { platform_driver_unregister(&hdpu_cpustate_driver); } module_init(cpustate_init); module_exit(cpustate_exit); MODULE_AUTHOR("Brian Waite"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
gpl-2.0
aqua-project/Linux-Minimal-x86-Reimplementation
drivers/rtc/rtc-ds1390.c
1352
4558
/* * rtc-ds1390.c -- driver for the Dallas/Maxim DS1390/93/94 SPI RTC * * Copyright (C) 2008 Mercury IMC Ltd * Written by Mark Jackson <mpfj@mimc.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * NOTE: Currently this driver only supports the bare minimum for read * and write the RTC. The extra features provided by the chip family * (alarms, trickle charger, different control registers) are unavailable. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/bcd.h> #include <linux/slab.h> #define DS1390_REG_100THS 0x00 #define DS1390_REG_SECONDS 0x01 #define DS1390_REG_MINUTES 0x02 #define DS1390_REG_HOURS 0x03 #define DS1390_REG_DAY 0x04 #define DS1390_REG_DATE 0x05 #define DS1390_REG_MONTH_CENT 0x06 #define DS1390_REG_YEAR 0x07 #define DS1390_REG_ALARM_100THS 0x08 #define DS1390_REG_ALARM_SECONDS 0x09 #define DS1390_REG_ALARM_MINUTES 0x0A #define DS1390_REG_ALARM_HOURS 0x0B #define DS1390_REG_ALARM_DAY_DATE 0x0C #define DS1390_REG_CONTROL 0x0D #define DS1390_REG_STATUS 0x0E #define DS1390_REG_TRICKLE 0x0F struct ds1390 { struct rtc_device *rtc; u8 txrx_buf[9]; /* cmd + 8 registers */ }; static int ds1390_get_reg(struct device *dev, unsigned char address, unsigned char *data) { struct spi_device *spi = to_spi_device(dev); struct ds1390 *chip = dev_get_drvdata(dev); int status; if (!data) return -EINVAL; /* Clear MSB to indicate read */ chip->txrx_buf[0] = address & 0x7f; /* do the i/o */ status = spi_write_then_read(spi, chip->txrx_buf, 1, chip->txrx_buf, 1); if (status != 0) return status; *data = chip->txrx_buf[1]; return 0; } static int ds1390_read_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); struct ds1390 *chip = dev_get_drvdata(dev); int status; /* build the message */ chip->txrx_buf[0] = DS1390_REG_SECONDS; /* do the i/o */ status = spi_write_then_read(spi, chip->txrx_buf, 1, chip->txrx_buf, 8); if (status != 0) return status; /* The chip sends data in this order: * Seconds, Minutes, Hours, Day, Date, Month / Century, Year */ dt->tm_sec = bcd2bin(chip->txrx_buf[0]); dt->tm_min = bcd2bin(chip->txrx_buf[1]); dt->tm_hour = bcd2bin(chip->txrx_buf[2]); dt->tm_wday = bcd2bin(chip->txrx_buf[3]); dt->tm_mday = bcd2bin(chip->txrx_buf[4]); /* mask off century bit */ dt->tm_mon = bcd2bin(chip->txrx_buf[5] & 0x7f) - 1; /* adjust for century bit */ dt->tm_year = bcd2bin(chip->txrx_buf[6]) + ((chip->txrx_buf[5] & 0x80) ? 100 : 0); return rtc_valid_tm(dt); } static int ds1390_set_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); struct ds1390 *chip = dev_get_drvdata(dev); /* build the message */ chip->txrx_buf[0] = DS1390_REG_SECONDS | 0x80; chip->txrx_buf[1] = bin2bcd(dt->tm_sec); chip->txrx_buf[2] = bin2bcd(dt->tm_min); chip->txrx_buf[3] = bin2bcd(dt->tm_hour); chip->txrx_buf[4] = bin2bcd(dt->tm_wday); chip->txrx_buf[5] = bin2bcd(dt->tm_mday); chip->txrx_buf[6] = bin2bcd(dt->tm_mon + 1) | ((dt->tm_year > 99) ? 0x80 : 0x00); chip->txrx_buf[7] = bin2bcd(dt->tm_year % 100); /* do the i/o */ return spi_write_then_read(spi, chip->txrx_buf, 8, NULL, 0); } static const struct rtc_class_ops ds1390_rtc_ops = { .read_time = ds1390_read_time, .set_time = ds1390_set_time, }; static int ds1390_probe(struct spi_device *spi) { unsigned char tmp; struct ds1390 *chip; int res; spi->mode = SPI_MODE_3; spi->bits_per_word = 8; spi_setup(spi); chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; spi_set_drvdata(spi, chip); res = ds1390_get_reg(&spi->dev, DS1390_REG_SECONDS, &tmp); if (res != 0) { dev_err(&spi->dev, "unable to read device\n"); return res; } chip->rtc = devm_rtc_device_register(&spi->dev, "ds1390", &ds1390_rtc_ops, THIS_MODULE); if (IS_ERR(chip->rtc)) { dev_err(&spi->dev, "unable to register device\n"); res = PTR_ERR(chip->rtc); } return res; } static struct spi_driver ds1390_driver = { .driver = { .name = "rtc-ds1390", .owner = THIS_MODULE, }, .probe = ds1390_probe, }; module_spi_driver(ds1390_driver); MODULE_DESCRIPTION("Dallas/Maxim DS1390/93/94 SPI RTC driver"); MODULE_AUTHOR("Mark Jackson <mpfj@mimc.co.uk>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:rtc-ds1390");
gpl-2.0
TEAM-Gummy/kernel_oppo_msm8974
arch/arm/mach-msm/clock-voter.c
2120
3948
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/mutex.h> #include <linux/clk.h> #include <mach/clk-provider.h> #include "clock-voter.h" static DEFINE_MUTEX(voter_clk_lock); /* Aggregate the rate of clocks that are currently on. */ static unsigned long voter_clk_aggregate_rate(const struct clk *parent) { struct clk *clk; unsigned long rate = 0; list_for_each_entry(clk, &parent->children, siblings) { struct clk_voter *v = to_clk_voter(clk); if (v->enabled) rate = max(clk->rate, rate); } return rate; } static int voter_clk_set_rate(struct clk *clk, unsigned long rate) { int ret = 0; struct clk *clkp; struct clk_voter *clkh, *v = to_clk_voter(clk); unsigned long cur_rate, new_rate, other_rate = 0; if (v->is_branch) return 0; mutex_lock(&voter_clk_lock); if (v->enabled) { struct clk *parent = clk->parent; /* * Get the aggregate rate without this clock's vote and update * if the new rate is different than the current rate */ list_for_each_entry(clkp, &parent->children, siblings) { clkh = to_clk_voter(clkp); if (clkh->enabled && clkh != v) other_rate = max(clkp->rate, other_rate); } cur_rate = max(other_rate, clk->rate); new_rate = max(other_rate, rate); if (new_rate != cur_rate) { ret = clk_set_rate(parent, new_rate); if (ret) goto unlock; } } clk->rate = rate; unlock: mutex_unlock(&voter_clk_lock); return ret; } static int voter_clk_prepare(struct clk *clk) { int ret = 0; unsigned long cur_rate; struct clk *parent; struct clk_voter *v = to_clk_voter(clk); mutex_lock(&voter_clk_lock); parent = clk->parent; if (v->is_branch) { v->enabled = true; goto out; } /* * Increase the rate if this clock is voting for a higher rate * than the current rate. */ cur_rate = voter_clk_aggregate_rate(parent); if (clk->rate > cur_rate) { ret = clk_set_rate(parent, clk->rate); if (ret) goto out; } v->enabled = true; out: mutex_unlock(&voter_clk_lock); return ret; } static void voter_clk_unprepare(struct clk *clk) { unsigned long cur_rate, new_rate; struct clk *parent; struct clk_voter *v = to_clk_voter(clk); mutex_lock(&voter_clk_lock); parent = clk->parent; /* * Decrease the rate if this clock was the only one voting for * the highest rate. */ v->enabled = false; if (v->is_branch) goto out; new_rate = voter_clk_aggregate_rate(parent); cur_rate = max(new_rate, clk->rate); if (new_rate < cur_rate) clk_set_rate(parent, new_rate); out: mutex_unlock(&voter_clk_lock); } static int voter_clk_is_enabled(struct clk *clk) { struct clk_voter *v = to_clk_voter(clk); return v->enabled; } static long voter_clk_round_rate(struct clk *clk, unsigned long rate) { return clk_round_rate(clk->parent, rate); } static bool voter_clk_is_local(struct clk *clk) { return true; } static enum handoff voter_clk_handoff(struct clk *clk) { if (!clk->rate) return HANDOFF_DISABLED_CLK; /* * Send the default rate to the parent if necessary and update the * software state of the voter clock. */ if (voter_clk_prepare(clk) < 0) return HANDOFF_DISABLED_CLK; return HANDOFF_ENABLED_CLK; } struct clk_ops clk_ops_voter = { .prepare = voter_clk_prepare, .unprepare = voter_clk_unprepare, .set_rate = voter_clk_set_rate, .is_enabled = voter_clk_is_enabled, .round_rate = voter_clk_round_rate, .is_local = voter_clk_is_local, .handoff = voter_clk_handoff, };
gpl-2.0
danielhk/android_kernel_samsung_smdk4210
drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
2376
54879
//===================================================== // CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. // // // This file is part of Express Card USB Driver // // $Id: //==================================================== #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include "ft1000_usb.h" #include <linux/types.h> #define HARLEY_READ_REGISTER 0x0 #define HARLEY_WRITE_REGISTER 0x01 #define HARLEY_READ_DPRAM_32 0x02 #define HARLEY_READ_DPRAM_LOW 0x03 #define HARLEY_READ_DPRAM_HIGH 0x04 #define HARLEY_WRITE_DPRAM_32 0x05 #define HARLEY_WRITE_DPRAM_LOW 0x06 #define HARLEY_WRITE_DPRAM_HIGH 0x07 #define HARLEY_READ_OPERATION 0xc1 #define HARLEY_WRITE_OPERATION 0x41 //#define JDEBUG static int ft1000_reset(struct net_device *ft1000dev); static int ft1000_submit_rx_urb(struct ft1000_info *info); static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev); static int ft1000_open (struct net_device *dev); static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev); static int ft1000_chkcard (struct ft1000_device *dev); static u8 tempbuffer[1600]; #define MAX_RCV_LOOP 100 //--------------------------------------------------------------------------- // Function: ft1000_control // // Parameters: ft1000_device - device structure // pipe - usb control message pipe // request - control request // requesttype - control message request type // value - value to be written or 0 // index - register index // data - data buffer to hold the read/write values // size - data size // timeout - control message time out value // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function sends a control message via USB interface synchronously // // Notes: // //--------------------------------------------------------------------------- static int ft1000_control(struct ft1000_device *ft1000dev, unsigned int pipe, u8 request, u8 requesttype, u16 value, u16 index, void *data, u16 size, int timeout) { u16 ret; if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) { DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n"); return -ENODEV; } ret = usb_control_msg(ft1000dev->dev, pipe, request, requesttype, value, index, data, size, LARGE_TIMEOUT); if (ret > 0) ret = 0; return ret; } //--------------------------------------------------------------------------- // Function: ft1000_read_register // // Parameters: ft1000_device - device structure // Data - data buffer to hold the value read // nRegIndex - register index // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function returns the value in a register // // Notes: // //--------------------------------------------------------------------------- int ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data, u16 nRegIndx) { int ret = STATUS_SUCCESS; ret = ft1000_control(ft1000dev, usb_rcvctrlpipe(ft1000dev->dev, 0), HARLEY_READ_REGISTER, HARLEY_READ_OPERATION, 0, nRegIndx, Data, 2, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_write_register // // Parameters: ft1000_device - device structure // value - value to write into a register // nRegIndex - register index // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes the value in a register // // Notes: // //--------------------------------------------------------------------------- int ft1000_write_register(struct ft1000_device *ft1000dev, u16 value, u16 nRegIndx) { int ret = STATUS_SUCCESS; ret = ft1000_control(ft1000dev, usb_sndctrlpipe(ft1000dev->dev, 0), HARLEY_WRITE_REGISTER, HARLEY_WRITE_OPERATION, value, nRegIndx, NULL, 0, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_read_dpram32 // // Parameters: ft1000_device - device structure // indx - starting address to read // buffer - data buffer to hold the data read // cnt - number of byte read from DPRAM // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function read a number of bytes from DPRAM // // Notes: // //--------------------------------------------------------------------------- int ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt) { int ret = STATUS_SUCCESS; ret = ft1000_control(ft1000dev, usb_rcvctrlpipe(ft1000dev->dev, 0), HARLEY_READ_DPRAM_32, HARLEY_READ_OPERATION, 0, indx, buffer, cnt, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_write_dpram32 // // Parameters: ft1000_device - device structure // indx - starting address to write the data // buffer - data buffer to write into DPRAM // cnt - number of bytes to write // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes into DPRAM a number of bytes // // Notes: // //--------------------------------------------------------------------------- int ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt) { int ret = STATUS_SUCCESS; if (cnt % 4) cnt += cnt - (cnt % 4); ret = ft1000_control(ft1000dev, usb_sndctrlpipe(ft1000dev->dev, 0), HARLEY_WRITE_DPRAM_32, HARLEY_WRITE_OPERATION, 0, indx, buffer, cnt, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_read_dpram16 // // Parameters: ft1000_device - device structure // indx - starting address to read // buffer - data buffer to hold the data read // hightlow - high or low 16 bit word // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function read 16 bits from DPRAM // // Notes: // //--------------------------------------------------------------------------- int ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u8 highlow) { int ret = STATUS_SUCCESS; u8 request; if (highlow == 0) request = HARLEY_READ_DPRAM_LOW; else request = HARLEY_READ_DPRAM_HIGH; ret = ft1000_control(ft1000dev, usb_rcvctrlpipe(ft1000dev->dev, 0), request, HARLEY_READ_OPERATION, 0, indx, buffer, 2, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_write_dpram16 // // Parameters: ft1000_device - device structure // indx - starting address to write the data // value - 16bits value to write // hightlow - high or low 16 bit word // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes into DPRAM a number of bytes // // Notes: // //--------------------------------------------------------------------------- int ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow) { int ret = STATUS_SUCCESS; u8 request; if (highlow == 0) request = HARLEY_WRITE_DPRAM_LOW; else request = HARLEY_WRITE_DPRAM_HIGH; ret = ft1000_control(ft1000dev, usb_sndctrlpipe(ft1000dev->dev, 0), request, HARLEY_WRITE_OPERATION, value, indx, NULL, 0, LARGE_TIMEOUT); return ret; } //--------------------------------------------------------------------------- // Function: fix_ft1000_read_dpram32 // // Parameters: ft1000_device - device structure // indx - starting address to read // buffer - data buffer to hold the data read // // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function read DPRAM 4 words at a time // // Notes: // //--------------------------------------------------------------------------- int fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer) { u8 buf[16]; u16 pos; int ret = STATUS_SUCCESS; pos = (indx / 4) * 4; ret = ft1000_read_dpram32(ft1000dev, pos, buf, 16); if (ret == STATUS_SUCCESS) { pos = (indx % 4) * 4; *buffer++ = buf[pos++]; *buffer++ = buf[pos++]; *buffer++ = buf[pos++]; *buffer++ = buf[pos++]; } else { DEBUG("fix_ft1000_read_dpram32: DPRAM32 Read failed\n"); *buffer++ = 0; *buffer++ = 0; *buffer++ = 0; *buffer++ = 0; } return ret; } //--------------------------------------------------------------------------- // Function: fix_ft1000_write_dpram32 // // Parameters: ft1000_device - device structure // indx - starting address to write // buffer - data buffer to write // // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function write to DPRAM 4 words at a time // // Notes: // //--------------------------------------------------------------------------- int fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer) { u16 pos1; u16 pos2; u16 i; u8 buf[32]; u8 resultbuffer[32]; u8 *pdata; int ret = STATUS_SUCCESS; pos1 = (indx / 4) * 4; pdata = buffer; ret = ft1000_read_dpram32(ft1000dev, pos1, buf, 16); if (ret == STATUS_SUCCESS) { pos2 = (indx % 4)*4; buf[pos2++] = *buffer++; buf[pos2++] = *buffer++; buf[pos2++] = *buffer++; buf[pos2++] = *buffer++; ret = ft1000_write_dpram32(ft1000dev, pos1, buf, 16); } else { DEBUG("fix_ft1000_write_dpram32: DPRAM32 Read failed\n"); return ret; } ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16); if (ret == STATUS_SUCCESS) { buffer = pdata; for (i = 0; i < 16; i++) { if (buf[i] != resultbuffer[i]) ret = STATUS_FAILURE; } } if (ret == STATUS_FAILURE) { ret = ft1000_write_dpram32(ft1000dev, pos1, (u8 *)&tempbuffer[0], 16); ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16); if (ret == STATUS_SUCCESS) { buffer = pdata; for (i = 0; i < 16; i++) { if (tempbuffer[i] != resultbuffer[i]) { ret = STATUS_FAILURE; DEBUG("%s Failed to write\n", __func__); } } } } return ret; } //------------------------------------------------------------------------ // // Function: card_reset_dsp // // Synopsis: This function is called to reset or activate the DSP // // Arguments: value - reset or activate // // Returns: None //----------------------------------------------------------------------- static void card_reset_dsp(struct ft1000_device *ft1000dev, bool value) { u16 status = STATUS_SUCCESS; u16 tempword; status = ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_CTRL); if (value) { DEBUG("Reset DSP\n"); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); tempword |= DSP_RESET_BIT; status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET); } else { DEBUG("Activate DSP\n"); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); tempword |= DSP_ENCRYPTED; tempword &= ~DSP_UNENCRYPTED; status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); tempword &= ~EFUSE_MEM_DISABLE; tempword &= ~DSP_RESET_BIT; status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); } } //--------------------------------------------------------------------------- // Function: card_send_command // // Parameters: ft1000_device - device structure // ptempbuffer - command buffer // size - command buffer size // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function sends a command to ASIC // // Notes: // //--------------------------------------------------------------------------- void card_send_command(struct ft1000_device *ft1000dev, void *ptempbuffer, int size) { unsigned short temp; unsigned char *commandbuf; DEBUG("card_send_command: enter card_send_command... size=%d\n", size); commandbuf = (unsigned char *)kmalloc(size + 2, GFP_KERNEL); memcpy((void *)commandbuf + 2, (void *)ptempbuffer, size); ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); if (temp & 0x0100) msleep(10); /* check for odd word */ size = size + 2; /* Must force to be 32 bit aligned */ if (size % 4) size += 4 - (size % 4); ft1000_write_dpram32(ft1000dev, 0, commandbuf, size); msleep(1); ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL); msleep(1); ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); if ((temp & 0x0100) == 0) { //DEBUG("card_send_command: Message sent\n"); } } //-------------------------------------------------------------------------- // // Function: dsp_reload // // Synopsis: This function is called to load or reload the DSP // // Arguments: ft1000dev - device structure // // Returns: None //----------------------------------------------------------------------- int dsp_reload(struct ft1000_device *ft1000dev) { u16 status; u16 tempword; u32 templong; struct ft1000_info *pft1000info; pft1000info = netdev_priv(ft1000dev->net); pft1000info->CardReady = 0; /* Program Interrupt Mask register */ status = ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_SUP_IMASK); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); tempword |= ASIC_RESET_BIT; status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET); msleep(1000); status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET); DEBUG("Reset Register = 0x%x\n", tempword); /* Toggle DSP reset */ card_reset_dsp(ft1000dev, 1); msleep(1000); card_reset_dsp(ft1000dev, 0); msleep(1000); status = ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL); /* Let's check for FEFE */ status = ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX, (u8 *) &templong, 4); DEBUG("templong (fefe) = 0x%8x\n", templong); /* call codeloader */ status = scram_dnldr(ft1000dev, pFileStart, FileLength); if (status != STATUS_SUCCESS) return -EIO; msleep(1000); DEBUG("dsp_reload returned\n"); return 0; } //--------------------------------------------------------------------------- // // Function: ft1000_reset_asic // Description: This function will call the Card Service function to reset the // ASIC. // Input: // dev - device structure // Output: // none // //--------------------------------------------------------------------------- static void ft1000_reset_asic(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); struct ft1000_device *ft1000dev = info->pFt1000Dev; u16 tempword; DEBUG("ft1000_hw:ft1000_reset_asic called\n"); /* Let's use the register provided by the Magnemite ASIC to reset the * ASIC and DSP. */ ft1000_write_register(ft1000dev, (DSP_RESET_BIT | ASIC_RESET_BIT), FT1000_REG_RESET); mdelay(1); /* set watermark to -1 in order to not generate an interrrupt */ ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK); /* clear interrupts */ ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR); DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword); ft1000_write_register(ft1000dev, tempword, FT1000_REG_SUP_ISR); ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR); DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword); } //--------------------------------------------------------------------------- // // Function: ft1000_reset_card // Description: This function will reset the card // Input: // dev - device structure // Output: // status - FALSE (card reset fail) // TRUE (card reset successful) // //--------------------------------------------------------------------------- static int ft1000_reset_card(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); struct ft1000_device *ft1000dev = info->pFt1000Dev; u16 tempword; struct prov_record *ptr; DEBUG("ft1000_hw:ft1000_reset_card called.....\n"); info->fCondResetPend = 1; info->CardReady = 0; info->fProvComplete = 0; /* Make sure we free any memory reserve for provisioning */ while (list_empty(&info->prov_list) == 0) { DEBUG("ft1000_reset_card:deleting provisioning record\n"); ptr = list_entry(info->prov_list.next, struct prov_record, list); list_del(&ptr->list); kfree(ptr->pprov_data); kfree(ptr); } DEBUG("ft1000_hw:ft1000_reset_card: reset asic\n"); ft1000_reset_asic(dev); DEBUG("ft1000_hw:ft1000_reset_card: call dsp_reload\n"); dsp_reload(ft1000dev); DEBUG("dsp reload successful\n"); mdelay(10); /* Initialize DSP heartbeat area */ ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag, FT1000_MAG_HI_HO_INDX); ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *) &tempword, FT1000_MAG_HI_HO_INDX); DEBUG("ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword); info->CardReady = 1; info->fCondResetPend = 0; return TRUE; } #ifdef HAVE_NET_DEVICE_OPS static const struct net_device_ops ftnet_ops = { .ndo_open = &ft1000_open, .ndo_stop = &ft1000_close, .ndo_start_xmit = &ft1000_start_xmit, .ndo_get_stats = &ft1000_netdev_stats, }; #endif //--------------------------------------------------------------------------- // Function: init_ft1000_netdev // // Parameters: ft1000dev - device structure // // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function initialize the network device // // Notes: // //--------------------------------------------------------------------------- int init_ft1000_netdev(struct ft1000_device *ft1000dev) { struct net_device *netdev; struct ft1000_info *pInfo = NULL; struct dpram_blk *pdpram_blk; int i, ret_val; struct list_head *cur, *tmp; char card_nr[2]; unsigned long gCardIndex = 0; DEBUG("Enter init_ft1000_netdev...\n"); netdev = alloc_etherdev(sizeof(struct ft1000_info)); if (!netdev) { DEBUG("init_ft1000_netdev: can not allocate network device\n"); return -ENOMEM; } pInfo = netdev_priv(netdev); memset(pInfo, 0, sizeof(struct ft1000_info)); dev_alloc_name(netdev, netdev->name); DEBUG("init_ft1000_netdev: network device name is %s\n", netdev->name); if (strncmp(netdev->name, "eth", 3) == 0) { card_nr[0] = netdev->name[3]; card_nr[1] = '\0'; ret_val = strict_strtoul(card_nr, 10, &gCardIndex); if (ret_val) { printk(KERN_ERR "Can't parse netdev\n"); goto err_net; } pInfo->CardNumber = gCardIndex; DEBUG("card number = %d\n", pInfo->CardNumber); } else { printk(KERN_ERR "ft1000: Invalid device name\n"); ret_val = -ENXIO; goto err_net; } memset(&pInfo->stats, 0, sizeof(struct net_device_stats)); spin_lock_init(&pInfo->dpram_lock); pInfo->pFt1000Dev = ft1000dev; pInfo->DrvErrNum = 0; pInfo->registered = 1; pInfo->ft1000_reset = ft1000_reset; pInfo->mediastate = 0; pInfo->fifo_cnt = 0; pInfo->DeviceCreated = FALSE; pInfo->CardReady = 0; pInfo->DSP_TIME[0] = 0; pInfo->DSP_TIME[1] = 0; pInfo->DSP_TIME[2] = 0; pInfo->DSP_TIME[3] = 0; pInfo->fAppMsgPend = 0; pInfo->fCondResetPend = 0; pInfo->usbboot = 0; pInfo->dspalive = 0; memset(&pInfo->tempbuf[0], 0, sizeof(pInfo->tempbuf)); INIT_LIST_HEAD(&pInfo->prov_list); INIT_LIST_HEAD(&pInfo->nodes.list); #ifdef HAVE_NET_DEVICE_OPS netdev->netdev_ops = &ftnet_ops; #else netdev->hard_start_xmit = &ft1000_start_xmit; netdev->get_stats = &ft1000_netdev_stats; netdev->open = &ft1000_open; netdev->stop = &ft1000_close; #endif ft1000dev->net = netdev; DEBUG("Initialize free_buff_lock and freercvpool\n"); spin_lock_init(&free_buff_lock); /* initialize a list of buffers to be use for queuing * up receive command data */ INIT_LIST_HEAD(&freercvpool); /* create list of free buffers */ for (i = 0; i < NUM_OF_FREE_BUFFERS; i++) { /* Get memory for DPRAM_DATA link list */ pdpram_blk = kmalloc(sizeof(struct dpram_blk), GFP_KERNEL); if (pdpram_blk == NULL) { ret_val = -ENOMEM; goto err_free; } /* Get a block of memory to store command data */ pdpram_blk->pbuffer = kmalloc(MAX_CMD_SQSIZE, GFP_KERNEL); if (pdpram_blk->pbuffer == NULL) { ret_val = -ENOMEM; kfree(pdpram_blk); goto err_free; } /* link provisioning data */ list_add_tail(&pdpram_blk->list, &freercvpool); } numofmsgbuf = NUM_OF_FREE_BUFFERS; return 0; err_free: list_for_each_safe(cur, tmp, &freercvpool) { pdpram_blk = list_entry(cur, struct dpram_blk, list); list_del(&pdpram_blk->list); kfree(pdpram_blk->pbuffer); kfree(pdpram_blk); } err_net: free_netdev(netdev); return ret_val; } //--------------------------------------------------------------------------- // Function: reg_ft1000_netdev // // Parameters: ft1000dev - device structure // // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function register the network driver // // Notes: // //--------------------------------------------------------------------------- int reg_ft1000_netdev(struct ft1000_device *ft1000dev, struct usb_interface *intf) { struct net_device *netdev; struct ft1000_info *pInfo; int rc; netdev = ft1000dev->net; pInfo = netdev_priv(ft1000dev->net); DEBUG("Enter reg_ft1000_netdev...\n"); ft1000_read_register(ft1000dev, &pInfo->AsicID, FT1000_REG_ASIC_ID); usb_set_intfdata(intf, pInfo); SET_NETDEV_DEV(netdev, &intf->dev); rc = register_netdev(netdev); if (rc) { DEBUG("reg_ft1000_netdev: could not register network device\n"); free_netdev(netdev); return rc; } ft1000_create_dev(ft1000dev); DEBUG("reg_ft1000_netdev returned\n"); pInfo->CardReady = 1; return 0; } static int ft1000_reset(struct net_device *dev) { ft1000_reset_card(dev); return 0; } //--------------------------------------------------------------------------- // Function: ft1000_usb_transmit_complete // // Parameters: urb - transmitted usb urb // // // Returns: none // // Description: This is the callback function when a urb is transmitted // // Notes: // //--------------------------------------------------------------------------- static void ft1000_usb_transmit_complete(struct urb *urb) { struct ft1000_device *ft1000dev = urb->context; if (urb->status) pr_err("%s: TX status %d\n", ft1000dev->net->name, urb->status); netif_wake_queue(ft1000dev->net); } //--------------------------------------------------------------------------- // // Function: ft1000_copy_down_pkt // Description: This function will take an ethernet packet and convert it to // a Flarion packet prior to sending it to the ASIC Downlink // FIFO. // Input: // dev - device structure // packet - address of ethernet packet // len - length of IP packet // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_copy_down_pkt(struct net_device *netdev, u8 * packet, u16 len) { struct ft1000_info *pInfo = netdev_priv(netdev); struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev; int count, ret; u8 *t; struct pseudo_hdr hdr; if (!pInfo->CardReady) { DEBUG("ft1000_copy_down_pkt::Card Not Ready\n"); return -ENODEV; } count = sizeof(struct pseudo_hdr) + len; if (count > MAX_BUF_SIZE) { DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n"); DEBUG("size = %d\n", count); return -EINVAL; } if (count % 4) count = count + (4 - (count % 4)); memset(&hdr, 0, sizeof(struct pseudo_hdr)); hdr.length = ntohs(count); hdr.source = 0x10; hdr.destination = 0x20; hdr.portdest = 0x20; hdr.portsrc = 0x10; hdr.sh_str_id = 0x91; hdr.control = 0x00; hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^ hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control; memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr)); memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len); netif_stop_queue(netdev); usb_fill_bulk_urb(pFt1000Dev->tx_urb, pFt1000Dev->dev, usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr), pFt1000Dev->tx_buf, count, ft1000_usb_transmit_complete, (void *)pFt1000Dev); t = (u8 *) pFt1000Dev->tx_urb->transfer_buffer; ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC); if (ret) { DEBUG("ft1000 failed tx_urb %d\n", ret); return ret; } else { pInfo->stats.tx_packets++; pInfo->stats.tx_bytes += (len + 14); } return 0; } //--------------------------------------------------------------------------- // Function: ft1000_start_xmit // // Parameters: skb - socket buffer to be sent // dev - network device // // // Returns: none // // Description: transmit a ethernet packet // // Notes: // //--------------------------------------------------------------------------- static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ft1000_info *pInfo = netdev_priv(dev); struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev; u8 *pdata; int maxlen, pipe; if (skb == NULL) { DEBUG("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n"); return NETDEV_TX_OK; } if (pFt1000Dev->status & FT1000_STATUS_CLOSING) { DEBUG("network driver is closed, return\n"); goto err; } pipe = usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr); maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe)); pdata = (u8 *) skb->data; if (pInfo->mediastate == 0) { /* Drop packet is mediastate is down */ DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n"); goto err; } if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) { /* Drop packet which has invalid size */ DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n"); goto err; } ft1000_copy_down_pkt(dev, (pdata + ENET_HEADER_SIZE - 2), skb->len - ENET_HEADER_SIZE + 2); err: dev_kfree_skb(skb); return NETDEV_TX_OK; } //--------------------------------------------------------------------------- // // Function: ft1000_copy_up_pkt // Description: This function will take a packet from the FIFO up link and // convert it into an ethernet packet and deliver it to the IP stack // Input: // urb - the receiving usb urb // // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_copy_up_pkt(struct urb *urb) { struct ft1000_info *info = urb->context; struct ft1000_device *ft1000dev = info->pFt1000Dev; struct net_device *net = ft1000dev->net; u16 tempword; u16 len; u16 lena; struct sk_buff *skb; u16 i; u8 *pbuffer = NULL; u8 *ptemp = NULL; u16 *chksum; if (ft1000dev->status & FT1000_STATUS_CLOSING) { DEBUG("network driver is closed, return\n"); return STATUS_SUCCESS; } // Read length len = urb->transfer_buffer_length; lena = urb->actual_length; chksum = (u16 *) ft1000dev->rx_buf; tempword = *chksum++; for (i = 1; i < 7; i++) tempword ^= *chksum++; if (tempword != *chksum) { info->stats.rx_errors++; ft1000_submit_rx_urb(info); return STATUS_FAILURE; } skb = dev_alloc_skb(len + 12 + 2); if (skb == NULL) { DEBUG("ft1000_copy_up_pkt: No Network buffers available\n"); info->stats.rx_errors++; ft1000_submit_rx_urb(info); return STATUS_FAILURE; } pbuffer = (u8 *) skb_put(skb, len + 12); /* subtract the number of bytes read already */ ptemp = pbuffer; /* fake MAC address */ *pbuffer++ = net->dev_addr[0]; *pbuffer++ = net->dev_addr[1]; *pbuffer++ = net->dev_addr[2]; *pbuffer++ = net->dev_addr[3]; *pbuffer++ = net->dev_addr[4]; *pbuffer++ = net->dev_addr[5]; *pbuffer++ = 0x00; *pbuffer++ = 0x07; *pbuffer++ = 0x35; *pbuffer++ = 0xff; *pbuffer++ = 0xff; *pbuffer++ = 0xfe; memcpy(pbuffer, ft1000dev->rx_buf + sizeof(struct pseudo_hdr), len - sizeof(struct pseudo_hdr)); skb->dev = net; skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx(skb); info->stats.rx_packets++; /* Add on 12 bytes for MAC address which was removed */ info->stats.rx_bytes += (lena + 12); ft1000_submit_rx_urb(info); return SUCCESS; } //--------------------------------------------------------------------------- // // Function: ft1000_submit_rx_urb // Description: the receiving function of the network driver // // Input: // info - a private structure contains the device information // // Output: // status - FAILURE // SUCCESS // //--------------------------------------------------------------------------- static int ft1000_submit_rx_urb(struct ft1000_info *info) { int result; struct ft1000_device *pFt1000Dev = info->pFt1000Dev; if (pFt1000Dev->status & FT1000_STATUS_CLOSING) { DEBUG("network driver is closed, return\n"); return -ENODEV; } usb_fill_bulk_urb(pFt1000Dev->rx_urb, pFt1000Dev->dev, usb_rcvbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_in_endpointAddr), pFt1000Dev->rx_buf, MAX_BUF_SIZE, (usb_complete_t) ft1000_copy_up_pkt, info); result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC); if (result) { pr_err("ft1000_submit_rx_urb: submitting rx_urb %d failed\n", result); return result; } return 0; } //--------------------------------------------------------------------------- // Function: ft1000_open // // Parameters: // dev - network device // // // Returns: none // // Description: open the network driver // // Notes: // //--------------------------------------------------------------------------- static int ft1000_open(struct net_device *dev) { struct ft1000_info *pInfo = netdev_priv(dev); struct timeval tv; int ret; DEBUG("ft1000_open is called for card %d\n", pInfo->CardNumber); pInfo->stats.rx_bytes = 0; pInfo->stats.tx_bytes = 0; pInfo->stats.rx_packets = 0; pInfo->stats.tx_packets = 0; do_gettimeofday(&tv); pInfo->ConTm = tv.tv_sec; pInfo->ProgConStat = 0; netif_start_queue(dev); netif_carrier_on(dev); ret = ft1000_submit_rx_urb(pInfo); return ret; } //--------------------------------------------------------------------------- // Function: ft1000_close // // Parameters: // net - network device // // // Returns: none // // Description: close the network driver // // Notes: // //--------------------------------------------------------------------------- int ft1000_close(struct net_device *net) { struct ft1000_info *pInfo = netdev_priv(net); struct ft1000_device *ft1000dev = pInfo->pFt1000Dev; ft1000dev->status |= FT1000_STATUS_CLOSING; DEBUG("ft1000_close: pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev); netif_carrier_off(net); netif_stop_queue(net); ft1000dev->status &= ~FT1000_STATUS_CLOSING; pInfo->ProgConStat = 0xff; return 0; } static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev) { struct ft1000_info *info = netdev_priv(dev); return &(info->stats); } //--------------------------------------------------------------------------- // // Function: ft1000_chkcard // Description: This function will check if the device is presently available on // the system. // Input: // dev - device structure // Output: // status - FALSE (device is not present) // TRUE (device is present) // //--------------------------------------------------------------------------- static int ft1000_chkcard(struct ft1000_device *dev) { u16 tempword; u16 status; struct ft1000_info *info = netdev_priv(dev->net); if (info->fCondResetPend) { DEBUG ("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n"); return TRUE; } /* Mask register is used to check for device presence since it is never * set to zero. */ status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK); if (tempword == 0) { DEBUG ("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n"); return FALSE; } /* The system will return the value of 0xffff for the version register * if the device is not present. */ status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID); if (tempword != 0x1b01) { dev->status |= FT1000_STATUS_CLOSING; DEBUG ("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n"); return FALSE; } return TRUE; } //--------------------------------------------------------------------------- // // Function: ft1000_receive_cmd // Description: This function will read a message from the dpram area. // Input: // dev - network device structure // pbuffer - caller supply address to buffer // pnxtph - pointer to next pseudo header // Output: // Status = 0 (unsuccessful) // = 1 (successful) // //--------------------------------------------------------------------------- static bool ft1000_receive_cmd(struct ft1000_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) { u16 size, ret; u16 *ppseudohdr; int i; u16 tempword; ret = ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *) &size, FT1000_MAG_PH_LEN_INDX); size = ntohs(size) + PSEUDOSZ; if (size > maxsz) { DEBUG("FT1000:ft1000_receive_cmd:Invalid command length = %d\n", size); return FALSE; } else { ppseudohdr = (u16 *) pbuffer; ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE, FT1000_REG_DPRAM_ADDR); ret = ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH); pbuffer++; ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE + 1, FT1000_REG_DPRAM_ADDR); for (i = 0; i <= (size >> 2); i++) { ret = ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL); pbuffer++; ret = ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH); pbuffer++; } /* copy odd aligned word */ ret = ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL); pbuffer++; ret = ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH); pbuffer++; if (size & 0x0001) { /* copy odd byte from fifo */ ret = ft1000_read_register(dev, &tempword, FT1000_REG_DPRAM_DATA); *pbuffer = ntohs(tempword); } /* Check if pseudo header checksum is good * Calculate pseudo header checksum */ tempword = *ppseudohdr++; for (i = 1; i < 7; i++) tempword ^= *ppseudohdr++; if ((tempword != *ppseudohdr)) return FALSE; return TRUE; } } static int ft1000_dsp_prov(void *arg) { struct ft1000_device *dev = (struct ft1000_device *)arg; struct ft1000_info *info = netdev_priv(dev->net); u16 tempword; u16 len; u16 i = 0; struct prov_record *ptr; struct pseudo_hdr *ppseudo_hdr; u16 *pmsg; u16 status; u16 TempShortBuf[256]; DEBUG("*** DspProv Entered\n"); while (list_empty(&info->prov_list) == 0) { DEBUG("DSP Provisioning List Entry\n"); /* Check if doorbell is available */ DEBUG("check if doorbell is cleared\n"); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (status) { DEBUG("ft1000_dsp_prov::ft1000_read_register error\n"); break; } while (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); i++; if (i == 10) { DEBUG("FT1000:ft1000_dsp_prov:message drop\n"); return STATUS_FAILURE; } ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); } if (!(tempword & FT1000_DB_DPRAM_TX)) { DEBUG("*** Provision Data Sent to DSP\n"); /* Send provisioning data */ ptr = list_entry(info->prov_list.next, struct prov_record, list); len = *(u16 *) ptr->pprov_data; len = htons(len); len += PSEUDOSZ; pmsg = (u16 *) ptr->pprov_data; ppseudo_hdr = (struct pseudo_hdr *)pmsg; /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; ppseudo_hdr->portsrc = 0; /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; for (i = 1; i < 7; i++) { ppseudo_hdr->checksum ^= *pmsg++; } TempShortBuf[0] = 0; TempShortBuf[1] = htons(len); memcpy(&TempShortBuf[2], ppseudo_hdr, len); status = ft1000_write_dpram32(dev, 0, (u8 *) &TempShortBuf[0], (unsigned short)(len + 2)); status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL); list_del(&ptr->list); kfree(ptr->pprov_data); kfree(ptr); } msleep(10); } DEBUG("DSP Provisioning List Entry finished\n"); msleep(100); info->fProvComplete = 1; info->CardReady = 1; return STATUS_SUCCESS; } static int ft1000_proc_drvmsg(struct ft1000_device *dev, u16 size) { struct ft1000_info *info = netdev_priv(dev->net); u16 msgtype; u16 tempword; struct media_msg *pmediamsg; struct dsp_init_msg *pdspinitmsg; struct drv_msg *pdrvmsg; u16 i; struct pseudo_hdr *ppseudo_hdr; u16 *pmsg; u16 status; union { u8 byte[2]; u16 wrd; } convert; char *cmdbuffer = kmalloc(1600, GFP_KERNEL); if (!cmdbuffer) return STATUS_FAILURE; status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size); #ifdef JDEBUG DEBUG("ft1000_proc_drvmsg:cmdbuffer\n"); for (i = 0; i < size; i += 5) { if ((i + 5) < size) DEBUG("0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", cmdbuffer[i], cmdbuffer[i + 1], cmdbuffer[i + 2], cmdbuffer[i + 3], cmdbuffer[i + 4]); else { for (j = i; j < size; j++) DEBUG("0x%x ", cmdbuffer[j]); DEBUG("\n"); break; } } #endif pdrvmsg = (struct drv_msg *)&cmdbuffer[2]; msgtype = ntohs(pdrvmsg->type); DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype); switch (msgtype) { case MEDIA_STATE:{ DEBUG ("ft1000_proc_drvmsg:Command message type = MEDIA_STATE"); pmediamsg = (struct media_msg *)&cmdbuffer[0]; if (info->ProgConStat != 0xFF) { if (pmediamsg->state) { DEBUG("Media is up\n"); if (info->mediastate == 0) { if (info->NetDevRegDone) { netif_wake_queue(dev-> net); } info->mediastate = 1; } } else { DEBUG("Media is down\n"); if (info->mediastate == 1) { info->mediastate = 0; if (info->NetDevRegDone) { } info->ConTm = 0; } } } else { DEBUG("Media is down\n"); if (info->mediastate == 1) { info->mediastate = 0; info->ConTm = 0; } } break; } case DSP_INIT_MSG:{ DEBUG ("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG"); pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2]; memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ); DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n", info->DspVer[0], info->DspVer[1], info->DspVer[2], info->DspVer[3]); memcpy(info->HwSerNum, pdspinitmsg->HwSerNum, HWSERNUMSZ); memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ); memcpy(info->eui64, pdspinitmsg->eui64, EUISZ); DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n", info->eui64[0], info->eui64[1], info->eui64[2], info->eui64[3], info->eui64[4], info->eui64[5], info->eui64[6], info->eui64[7]); dev->net->dev_addr[0] = info->eui64[0]; dev->net->dev_addr[1] = info->eui64[1]; dev->net->dev_addr[2] = info->eui64[2]; dev->net->dev_addr[3] = info->eui64[5]; dev->net->dev_addr[4] = info->eui64[6]; dev->net->dev_addr[5] = info->eui64[7]; if (ntohs(pdspinitmsg->length) == (sizeof(struct dsp_init_msg) - 20)) { memcpy(info->ProductMode, pdspinitmsg->ProductMode, MODESZ); memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ); memcpy(info->RfCalDate, pdspinitmsg->RfCalDate, CALDATESZ); DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0], info->RfCalVer[1]); } break; } case DSP_PROVISION:{ DEBUG ("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n"); /* kick off dspprov routine to start provisioning * Send provisioning data to DSP */ if (list_empty(&info->prov_list) == 0) { info->fProvComplete = 0; status = ft1000_dsp_prov(dev); if (status != STATUS_SUCCESS) goto out; } else { info->fProvComplete = 1; status = ft1000_write_register(dev, FT1000_DB_HB, FT1000_REG_DOORBELL); DEBUG ("FT1000:drivermsg:No more DSP provisioning data in dsp image\n"); } DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n"); break; } case DSP_STORE_INFO:{ DEBUG ("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO"); DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n"); tempword = ntohs(pdrvmsg->length); info->DSPInfoBlklen = tempword; if (tempword < (MAX_DSP_SESS_REC - 4)) { pmsg = (u16 *) &pdrvmsg->data[0]; for (i = 0; i < ((tempword + 1) / 2); i++) { DEBUG ("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg); info->DSPInfoBlk[i + 10] = *pmsg++; } } else { info->DSPInfoBlklen = 0; } break; } case DSP_GET_INFO:{ DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n"); /* copy dsp info block to dsp */ info->DrvMsgPend = 1; /* allow any outstanding ioctl to finish */ mdelay(10); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) break; } } /* Put message into Slow Queue * Form Pseudo header */ pmsg = (u16 *) info->DSPInfoBlk; *pmsg++ = 0; *pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen); ppseudo_hdr = (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2]; ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4 + info->DSPInfoBlklen); ppseudo_hdr->source = 0x10; ppseudo_hdr->destination = 0x20; ppseudo_hdr->portdest = 0; ppseudo_hdr->portsrc = 0; ppseudo_hdr->sh_str_id = 0; ppseudo_hdr->control = 0; ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; /* Insert application id */ ppseudo_hdr->portsrc = 0; /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; for (i = 1; i < 7; i++) ppseudo_hdr->checksum ^= *pmsg++; info->DSPInfoBlk[10] = 0x7200; info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen); status = ft1000_write_dpram32(dev, 0, (u8 *) &info->DSPInfoBlk[0], (unsigned short)(info-> DSPInfoBlklen + 22)); status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL); info->DrvMsgPend = 0; break; } case GET_DRV_ERR_RPT_MSG:{ DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n"); /* copy driver error message to dsp */ info->DrvMsgPend = 1; /* allow any outstanding ioctl to finish */ mdelay(10); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) { mdelay(10); status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL); if (tempword & FT1000_DB_DPRAM_TX) mdelay(10); } if ((tempword & FT1000_DB_DPRAM_TX) == 0) { /* Put message into Slow Queue * Form Pseudo header */ pmsg = (u16 *) &tempbuffer[0]; ppseudo_hdr = (struct pseudo_hdr *)pmsg; ppseudo_hdr->length = htons(0x0012); ppseudo_hdr->source = 0x10; ppseudo_hdr->destination = 0x20; ppseudo_hdr->portdest = 0; ppseudo_hdr->portsrc = 0; ppseudo_hdr->sh_str_id = 0; ppseudo_hdr->control = 0; ppseudo_hdr->rsvd1 = 0; ppseudo_hdr->rsvd2 = 0; ppseudo_hdr->qos_class = 0; /* Insert slow queue sequence number */ ppseudo_hdr->seq_num = info->squeseqnum++; /* Insert application id */ ppseudo_hdr->portsrc = 0; /* Calculate new checksum */ ppseudo_hdr->checksum = *pmsg++; for (i = 1; i < 7; i++) ppseudo_hdr->checksum ^= *pmsg++; pmsg = (u16 *) &tempbuffer[16]; *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG); *pmsg++ = htons(0x000e); *pmsg++ = htons(info->DSP_TIME[0]); *pmsg++ = htons(info->DSP_TIME[1]); *pmsg++ = htons(info->DSP_TIME[2]); *pmsg++ = htons(info->DSP_TIME[3]); convert.byte[0] = info->DspVer[0]; convert.byte[1] = info->DspVer[1]; *pmsg++ = convert.wrd; convert.byte[0] = info->DspVer[2]; convert.byte[1] = info->DspVer[3]; *pmsg++ = convert.wrd; *pmsg++ = htons(info->DrvErrNum); card_send_command(dev, (unsigned char *)&tempbuffer[0], (u16) (0x0012 + PSEUDOSZ)); info->DrvErrNum = 0; } info->DrvMsgPend = 0; break; } default: break; } status = STATUS_SUCCESS; out: kfree(cmdbuffer); DEBUG("return from ft1000_proc_drvmsg\n"); return status; } int ft1000_poll(void* dev_id) { struct ft1000_device *dev = (struct ft1000_device *)dev_id; struct ft1000_info *info = netdev_priv(dev->net); u16 tempword; u16 status; u16 size; int i; u16 data; u16 modulo; u16 portid; u16 nxtph; struct dpram_blk *pdpram_blk; struct pseudo_hdr *ppseudo_hdr; unsigned long flags; if (ft1000_chkcard(dev) == FALSE) { DEBUG("ft1000_poll::ft1000_chkcard: failed\n"); return STATUS_FAILURE; } status = ft1000_read_register (dev, &tempword, FT1000_REG_DOORBELL); if ( !status ) { if (tempword & FT1000_DB_DPRAM_RX) { status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0); size = ntohs(data) + 16 + 2; if (size % 4) { modulo = 4 - (size % 4); size = size + modulo; } status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1); portid &= 0xff; if (size < MAX_CMD_SQSIZE) { switch (portid) { case DRIVERID: DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n"); status = ft1000_proc_drvmsg (dev, size); if (status != STATUS_SUCCESS ) return status; break; case DSPBCMSGID: // This is a dsp broadcast message // Check which application has registered for dsp broadcast messages for (i=0; i<MAX_NUM_APP; i++) { if ( (info->app_info[i].DspBCMsgFlag) && (info->app_info[i].fileobject) && (info->app_info[i].NumOfMsg < MAX_MSG_LIMIT) ) { nxtph = FT1000_DPRAM_RX_BASE + 2; pdpram_blk = ft1000_get_buffer (&freercvpool); if (pdpram_blk != NULL) { if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) { ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer; // Put message into the appropriate application block info->app_info[i].nRxMsg++; spin_lock_irqsave(&free_buff_lock, flags); list_add_tail(&pdpram_blk->list, &info->app_info[i].app_sqlist); info->app_info[i].NumOfMsg++; spin_unlock_irqrestore(&free_buff_lock, flags); wake_up_interruptible(&info->app_info[i].wait_dpram_msg); } else { info->app_info[i].nRxMsgMiss++; // Put memory back to free pool ft1000_free_buffer(pdpram_blk, &freercvpool); DEBUG("pdpram_blk::ft1000_get_buffer NULL\n"); } } else { DEBUG("Out of memory in free receive command pool\n"); info->app_info[i].nRxMsgMiss++; } } } break; default: pdpram_blk = ft1000_get_buffer (&freercvpool); if (pdpram_blk != NULL) { if ( ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE, &nxtph) ) { ppseudo_hdr = (struct pseudo_hdr *)pdpram_blk->pbuffer; // Search for correct application block for (i=0; i<MAX_NUM_APP; i++) { if (info->app_info[i].app_id == ppseudo_hdr->portdest) { break; } } if (i == MAX_NUM_APP) { DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ppseudo_hdr->portdest); // Put memory back to free pool ft1000_free_buffer(pdpram_blk, &freercvpool); } else { if (info->app_info[i].NumOfMsg > MAX_MSG_LIMIT) { // Put memory back to free pool ft1000_free_buffer(pdpram_blk, &freercvpool); } else { info->app_info[i].nRxMsg++; // Put message into the appropriate application block list_add_tail(&pdpram_blk->list, &info->app_info[i].app_sqlist); info->app_info[i].NumOfMsg++; } } } else { // Put memory back to free pool ft1000_free_buffer(pdpram_blk, &freercvpool); } } else { DEBUG("Out of memory in free receive command pool\n"); } break; } } else { DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size); } status = ft1000_write_register (dev, FT1000_DB_DPRAM_RX, FT1000_REG_DOORBELL); } else if (tempword & FT1000_DSP_ASIC_RESET) { // Let's reset the ASIC from the Host side as well status = ft1000_write_register (dev, ASIC_RESET_BIT, FT1000_REG_RESET); status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET); i = 0; while (tempword & ASIC_RESET_BIT) { status = ft1000_read_register (dev, &tempword, FT1000_REG_RESET); msleep(10); i++; if (i==100) break; } if (i==100) { DEBUG("Unable to reset ASIC\n"); return STATUS_SUCCESS; } msleep(10); // Program WMARK register status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK); // clear ASIC reset doorbell status = ft1000_write_register (dev, FT1000_DSP_ASIC_RESET, FT1000_REG_DOORBELL); msleep(10); } else if (tempword & FT1000_ASIC_RESET_REQ) { DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n"); // clear ASIC reset request from DSP status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL); status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL); // copy dsp session record from Adapter block status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024); // Program WMARK register status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK); // ring doorbell to tell DSP that ASIC is out of reset status = ft1000_write_register (dev, FT1000_ASIC_RESET_DSP, FT1000_REG_DOORBELL); } else if (tempword & FT1000_DB_COND_RESET) { DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n"); if (info->fAppMsgPend == 0) { // Reset ASIC and DSP status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX); status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX); status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX); status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX); info->CardReady = 0; info->DrvErrNum = DSP_CONDRESET_INFO; DEBUG("ft1000_hw:DSP conditional reset requested\n"); info->ft1000_reset(dev->net); } else { info->fProvComplete = 0; info->fCondResetPend = 1; } ft1000_write_register(dev, FT1000_DB_COND_RESET, FT1000_REG_DOORBELL); } } return STATUS_SUCCESS; }
gpl-2.0
Fusion-Devices/android_kernel_oneplus_msm8974
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
4424
9369
/* * arch/sh/kernel/cpu/sh4a/clock-sh7366.c * * SH7366 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> /* SH7366 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .rate = 33333333, }; /* The dll block multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct sh_clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); else div = 2; return (clk->parent->rate * mult) / div; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_SIUA, DIV4_SIUB, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), }; enum { DIV6_V, DIV6_NR }; struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), }; #define MSTP(_parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_parent, _reg, _bit, _flags) enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026, MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016, MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010, MSTP007, MSTP006, MSTP005, MSTP002, MSTP001, MSTP109, MSTP100, MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217, MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT), [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0), [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0), [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0), [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0), [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0), [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0), [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0), [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0), [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0), [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0), [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0), [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0), [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0), [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0), [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0), [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0), [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0), [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0), [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0), [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0), [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0), [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0), [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0), [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT), [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0), [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0), [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0), [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("dll_clk", &dll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]), CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), /* MSTP32 clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]), CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]), CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]), CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]), CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]), CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]), CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]), CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]), CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]), CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]), CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]), CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]), CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]), CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]), CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]), CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]), CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]), CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]), CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]), CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]), CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]), CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]), CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]), CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]), CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]), CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]), CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]), CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]), CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]), CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]), CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]), CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]), CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
PhSchmitt/mptcp-nexus-a444
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
4424
6976
/* * arch/sh/kernel/cpu/sh4a/clock-sh7786.c * * SH7786 support for the clock framework * * Copyright (C) 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 33333333, }; static unsigned long pll_recalc(struct clk *clk) { int multiplier; /* * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1, * while modes 3, 4, and 5 use an x32. */ multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32; return clk->parent->rate * multiplier; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, 24, 32, 36, 48 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR }; #define DIV4(_bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_P] = DIV4(0, 0x0b40, 0), [DIV4_DU] = DIV4(4, 0x0010, 0), [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc40030 #define MSTPCR1 0xffc40034 enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016, MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008, MSTP005, MSTP004, MSTP002, MSTP112, MSTP110, MSTP109, MSTP108, MSTP105, MSTP104, MSTP103, MSTP102, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* MSTPCR0 */ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0), [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0), [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0), [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0), [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0), [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0), [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0), [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0), [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0), [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0), [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0), [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0), /* MSTPCR1 */ [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0), [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0), [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0), [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0), [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0), [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0), [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0), [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]), CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), /* MSTP32 clocks */ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]), CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]), CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]), CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]), CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]), CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]), CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]), CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]), CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]), CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]), CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]), CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]), CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.6", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.7", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.8", &mstp_clks[MSTP010]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.9", &mstp_clks[MSTP011]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.10", &mstp_clks[MSTP011]), CLKDEV_ICK_ID("tmu_fck", "sh_tmu.11", &mstp_clks[MSTP011]), CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]), CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]), CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]), CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]), CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]), CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]), CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]), CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]), CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]), CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]), CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); for (i = 0; i < ARRAY_SIZE(lookups); i++) clkdev_add(&lookups[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
dewadg/mako-kernel
arch/m68k/platform/coldfire/device.c
4424
6782
/* * device.c -- common ColdFire SoC device support * * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spi/spi.h> #include <linux/gpio.h> #include <asm/traps.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> #include <asm/mcfqspi.h> /* * All current ColdFire parts contain from 2, 3 or 4 UARTS. */ static struct mcf_platform_uart mcf_uart_platform_data[] = { { .mapbase = MCFUART_BASE0, .irq = MCF_IRQ_UART0, }, { .mapbase = MCFUART_BASE1, .irq = MCF_IRQ_UART1, }, #ifdef MCFUART_BASE2 { .mapbase = MCFUART_BASE2, .irq = MCF_IRQ_UART2, }, #endif #ifdef MCFUART_BASE3 { .mapbase = MCFUART_BASE3, .irq = MCF_IRQ_UART3, }, #endif { }, }; static struct platform_device mcf_uart = { .name = "mcfuart", .id = 0, .dev.platform_data = mcf_uart_platform_data, }; #ifdef CONFIG_FEC /* * Some ColdFire cores contain the Fast Ethernet Controller (FEC) * block. It is Freescale's own hardware block. Some ColdFires * have 2 of these. */ static struct resource mcf_fec0_resources[] = { { .start = MCFFEC_BASE0, .end = MCFFEC_BASE0 + MCFFEC_SIZE0 - 1, .flags = IORESOURCE_MEM, }, { .start = MCF_IRQ_FECRX0, .end = MCF_IRQ_FECRX0, .flags = IORESOURCE_IRQ, }, { .start = MCF_IRQ_FECTX0, .end = MCF_IRQ_FECTX0, .flags = IORESOURCE_IRQ, }, { .start = MCF_IRQ_FECENTC0, .end = MCF_IRQ_FECENTC0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mcf_fec0 = { .name = "fec", .id = 0, .num_resources = ARRAY_SIZE(mcf_fec0_resources), .resource = mcf_fec0_resources, }; #ifdef MCFFEC_BASE1 static struct resource mcf_fec1_resources[] = { { .start = MCFFEC_BASE1, .end = MCFFEC_BASE1 + MCFFEC_SIZE1 - 1, .flags = IORESOURCE_MEM, }, { .start = MCF_IRQ_FECRX1, .end = MCF_IRQ_FECRX1, .flags = IORESOURCE_IRQ, }, { .start = MCF_IRQ_FECTX1, .end = MCF_IRQ_FECTX1, .flags = IORESOURCE_IRQ, }, { .start = MCF_IRQ_FECENTC1, .end = MCF_IRQ_FECENTC1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mcf_fec1 = { .name = "fec", .id = 1, .num_resources = ARRAY_SIZE(mcf_fec1_resources), .resource = mcf_fec1_resources, }; #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* * The ColdFire QSPI module is an SPI protocol hardware block used * on a number of different ColdFire CPUs. */ static struct resource mcf_qspi_resources[] = { { .start = MCFQSPI_BASE, .end = MCFQSPI_BASE + MCFQSPI_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = MCF_IRQ_QSPI, .end = MCF_IRQ_QSPI, .flags = IORESOURCE_IRQ, }, }; static int mcf_cs_setup(struct mcfqspi_cs_control *cs_control) { int status; status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0"); if (status) { pr_debug("gpio_request for MCFQSPI_CS0 failed\n"); goto fail0; } status = gpio_direction_output(MCFQSPI_CS0, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n"); goto fail1; } status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1"); if (status) { pr_debug("gpio_request for MCFQSPI_CS1 failed\n"); goto fail1; } status = gpio_direction_output(MCFQSPI_CS1, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n"); goto fail2; } status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2"); if (status) { pr_debug("gpio_request for MCFQSPI_CS2 failed\n"); goto fail2; } status = gpio_direction_output(MCFQSPI_CS2, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n"); goto fail3; } #ifdef MCFQSPI_CS3 status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3"); if (status) { pr_debug("gpio_request for MCFQSPI_CS3 failed\n"); goto fail3; } status = gpio_direction_output(MCFQSPI_CS3, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n"); gpio_free(MCFQSPI_CS3); goto fail3; } #endif return 0; fail3: gpio_free(MCFQSPI_CS2); fail2: gpio_free(MCFQSPI_CS1); fail1: gpio_free(MCFQSPI_CS0); fail0: return status; } static void mcf_cs_teardown(struct mcfqspi_cs_control *cs_control) { #ifdef MCFQSPI_CS3 gpio_free(MCFQSPI_CS3); #endif gpio_free(MCFQSPI_CS2); gpio_free(MCFQSPI_CS1); gpio_free(MCFQSPI_CS0); } static void mcf_cs_select(struct mcfqspi_cs_control *cs_control, u8 chip_select, bool cs_high) { switch (chip_select) { case 0: gpio_set_value(MCFQSPI_CS0, cs_high); break; case 1: gpio_set_value(MCFQSPI_CS1, cs_high); break; case 2: gpio_set_value(MCFQSPI_CS2, cs_high); break; #ifdef MCFQSPI_CS3 case 3: gpio_set_value(MCFQSPI_CS3, cs_high); break; #endif } } static void mcf_cs_deselect(struct mcfqspi_cs_control *cs_control, u8 chip_select, bool cs_high) { switch (chip_select) { case 0: gpio_set_value(MCFQSPI_CS0, !cs_high); break; case 1: gpio_set_value(MCFQSPI_CS1, !cs_high); break; case 2: gpio_set_value(MCFQSPI_CS2, !cs_high); break; #ifdef MCFQSPI_CS3 case 3: gpio_set_value(MCFQSPI_CS3, !cs_high); break; #endif } } static struct mcfqspi_cs_control mcf_cs_control = { .setup = mcf_cs_setup, .teardown = mcf_cs_teardown, .select = mcf_cs_select, .deselect = mcf_cs_deselect, }; static struct mcfqspi_platform_data mcf_qspi_data = { .bus_num = 0, .num_chipselect = 4, .cs_control = &mcf_cs_control, }; static struct platform_device mcf_qspi = { .name = "mcfqspi", .id = 0, .num_resources = ARRAY_SIZE(mcf_qspi_resources), .resource = mcf_qspi_resources, .dev.platform_data = &mcf_qspi_data, }; #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, #ifdef CONFIG_FEC &mcf_fec0, #ifdef MCFFEC_BASE1 &mcf_fec1, #endif #endif #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif }; /* * Some ColdFire UARTs let you set the IRQ line to use. */ static void __init mcf_uart_set_irq(void) { #ifdef MCFUART_UIVR /* UART0 interrupt setup */ writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR); writeb(MCF_IRQ_UART0, MCFUART_BASE0 + MCFUART_UIVR); mcf_mapirq2imr(MCF_IRQ_UART0, MCFINTC_UART0); /* UART1 interrupt setup */ writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR); writeb(MCF_IRQ_UART1, MCFUART_BASE1 + MCFUART_UIVR); mcf_mapirq2imr(MCF_IRQ_UART1, MCFINTC_UART1); #endif } static int __init mcf_init_devices(void) { mcf_uart_set_irq(); platform_add_devices(mcf_devices, ARRAY_SIZE(mcf_devices)); return 0; } arch_initcall(mcf_init_devices);
gpl-2.0
bgcngm/802Xtreem
drivers/mtd/nand/diskonchip.c
4936
50687
/* * drivers/mtd/nand/diskonchip.c * * (C) 2003 Red Hat, Inc. * (C) 2004 Dan Brown <dan_brown@ieee.org> * (C) 2004 Kalev Lember <kalev@smartlink.ee> * * Author: David Woodhouse <dwmw2@infradead.org> * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org> * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee> * * Error correction code lifted from the old docecc code * Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Copyright (C) 2000 Netgem S.A. * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de> * * Interface to generic NAND code for M-Systems DiskOnChip devices */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/rslib.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/doc2000.h> #include <linux/mtd/partitions.h> #include <linux/mtd/inftl.h> #include <linux/module.h> /* Where to look for the devices? */ #ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS #define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0 #endif static unsigned long __initdata doc_locations[] = { #if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) #ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, #else /* CONFIG_MTD_DOCPROBE_HIGH */ 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000, 0xe0000, 0xe2000, 0xe4000, 0xe6000, 0xe8000, 0xea000, 0xec000, 0xee000, #endif /* CONFIG_MTD_DOCPROBE_HIGH */ #else #warning Unknown architecture for DiskOnChip. No default probe locations defined #endif 0xffffffff }; static struct mtd_info *doclist = NULL; struct doc_priv { void __iomem *virtadr; unsigned long physadr; u_char ChipID; u_char CDSNControl; int chips_per_floor; /* The number of chips detected on each floor */ int curfloor; int curchip; int mh0_page; int mh1_page; struct mtd_info *nextdoc; }; /* This is the syndrome computed by the HW ecc generator upon reading an empty page, one with all 0xff for data and stored ecc code. */ static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a }; /* This is the ecc value computed by the HW ecc generator upon writing an empty page, one with all 0xff for data. */ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; #define INFTL_BBT_RESERVED_BLOCKS 4 #define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32) #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask); static void doc200x_select_chip(struct mtd_info *mtd, int chip); static int debug = 0; module_param(debug, int, 0); static int try_dword = 1; module_param(try_dword, int, 0); static int no_ecc_failures = 0; module_param(no_ecc_failures, int, 0); static int no_autopart = 0; module_param(no_autopart, int, 0); static int show_firmware_partition = 0; module_param(show_firmware_partition, int, 0); #ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE static int inftl_bbt_write = 1; #else static int inftl_bbt_write = 0; #endif module_param(inftl_bbt_write, int, 0); static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS; module_param(doc_config_location, ulong, 0); MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); /* Sector size for HW ECC */ #define SECTOR_SIZE 512 /* The sector bytes are packed into NB_DATA 10 bit words */ #define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10) /* Number of roots */ #define NROOTS 4 /* First consective root */ #define FCR 510 /* Number of symbols */ #define NN 1023 /* the Reed Solomon control structure */ static struct rs_control *rs_decoder; /* * The HW decoder in the DoC ASIC's provides us a error syndrome, * which we must convert to a standard syndrome usable by the generic * Reed-Solomon library code. * * Fabrice Bellard figured this out in the old docecc code. I added * some comments, improved a minor bit and converted it to make use * of the generic Reed-Solomon library. tglx */ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) { int i, j, nerr, errpos[8]; uint8_t parity; uint16_t ds[4], s[5], tmp, errval[8], syn[4]; memset(syn, 0, sizeof(syn)); /* Convert the ecc bytes into words */ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8); ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6); ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4); ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); parity = ecc[1]; /* Initialize the syndrome buffer */ for (i = 0; i < NROOTS; i++) s[i] = ds[0]; /* * Evaluate * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0] * where x = alpha^(FCR + i) */ for (j = 1; j < NROOTS; j++) { if (ds[j] == 0) continue; tmp = rs->index_of[ds[j]]; for (i = 0; i < NROOTS; i++) s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)]; } /* Calc syn[i] = s[i] / alpha^(v + i) */ for (i = 0; i < NROOTS; i++) { if (s[i]) syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i)); } /* Call the decoder library */ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval); /* Incorrectable errors ? */ if (nerr < 0) return nerr; /* * Correct the errors. The bitpositions are a bit of magic, * but they are given by the design of the de/encoder circuit * in the DoC ASIC's. */ for (i = 0; i < nerr; i++) { int index, bitpos, pos = 1015 - errpos[i]; uint8_t val; if (pos >= NB_DATA && pos < 1019) continue; if (pos < NB_DATA) { /* extract bit position (MSB first) */ pos = 10 * (NB_DATA - 1 - pos) - 6; /* now correct the following 10 bits. At most two bytes can be modified since pos is even */ index = (pos >> 3) ^ 1; bitpos = pos & 7; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = (uint8_t) (errval[i] >> (2 + bitpos)); parity ^= val; if (index < SECTOR_SIZE) data[index] ^= val; } index = ((pos >> 3) + 1) ^ 1; bitpos = (bitpos + 10) & 7; if (bitpos == 0) bitpos = 8; if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) { val = (uint8_t) (errval[i] << (8 - bitpos)); parity ^= val; if (index < SECTOR_SIZE) data[index] ^= val; } } } /* If the parity is wrong, no rescue possible */ return parity ? -EBADMSG : nerr; } static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) { volatile char dummy; int i; for (i = 0; i < cycles; i++) { if (DoC_is_Millennium(doc)) dummy = ReadDOC(doc->virtadr, NOP); else if (DoC_is_MillenniumPlus(doc)) dummy = ReadDOC(doc->virtadr, Mplus_NOP); else dummy = ReadDOC(doc->virtadr, DOCStatus); } } #define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1) /* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ static int _DoC_WaitReady(struct doc_priv *doc) { void __iomem *docptr = doc->virtadr; unsigned long timeo = jiffies + (HZ * 10); if (debug) printk("_DoC_WaitReady...\n"); /* Out-of-line routine to wait for chip response */ if (DoC_is_MillenniumPlus(doc)) { while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { if (time_after(jiffies, timeo)) { printk("_DoC_WaitReady timed out.\n"); return -EIO; } udelay(1); cond_resched(); } } else { while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { if (time_after(jiffies, timeo)) { printk("_DoC_WaitReady timed out.\n"); return -EIO; } udelay(1); cond_resched(); } } return 0; } static inline int DoC_WaitReady(struct doc_priv *doc) { void __iomem *docptr = doc->virtadr; int ret = 0; if (DoC_is_MillenniumPlus(doc)) { DoC_Delay(doc, 4); if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) /* Call the out-of-line routine to wait */ ret = _DoC_WaitReady(doc); } else { DoC_Delay(doc, 4); if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) /* Call the out-of-line routine to wait */ ret = _DoC_WaitReady(doc); DoC_Delay(doc, 2); } if (debug) printk("DoC_WaitReady OK\n"); return ret; } static void doc2000_write_byte(struct mtd_info *mtd, u_char datum) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (debug) printk("write_byte %02x\n", datum); WriteDOC(datum, docptr, CDSNSlowIO); WriteDOC(datum, docptr, 2k_CDSN_IO); } static u_char doc2000_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; u_char ret; ReadDOC(docptr, CDSNSlowIO); DoC_Delay(doc, 2); ret = ReadDOC(docptr, 2k_CDSN_IO); if (debug) printk("read_byte returns %02x\n", ret); return ret; } static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("writebuf of %d bytes: ", len); for (i = 0; i < len; i++) { WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i); if (debug && i < 16) printk("%02x ", buf[i]); } if (debug) printk("\n"); } static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf of %d bytes: ", len); for (i = 0; i < len; i++) { buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); } } static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf_dword of %d bytes: ", len); if (unlikely((((unsigned long)buf) | len) & 3)) { for (i = 0; i < len; i++) { *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); } } else { for (i = 0; i < len; i += 4) { *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); } } } static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; for (i = 0; i < len; i++) if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) return -EFAULT; return 0; } static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; uint16_t ret; doc200x_select_chip(mtd, nr); doc200x_hwcontrol(mtd, NAND_CMD_READID, NAND_CTRL_CLE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); /* We can't use dev_ready here, but at least we wait for the * command to complete */ udelay(50); ret = this->read_byte(mtd) << 8; ret |= this->read_byte(mtd); if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { /* First chip probe. See if we get same results by 32-bit access */ union { uint32_t dword; uint8_t byte[4]; } ident; void __iomem *docptr = doc->virtadr; doc200x_hwcontrol(mtd, NAND_CMD_READID, NAND_CTRL_CLE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); udelay(50); ident.dword = readl(docptr + DoC_2k_CDSN_IO); if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n"); this->read_buf = &doc2000_readbuf_dword; } } return ret; } static void __init doc2000_count_chips(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; uint16_t mfrid; int i; /* Max 4 chips per floor on DiskOnChip 2000 */ doc->chips_per_floor = 4; /* Find out what the first chip is */ mfrid = doc200x_ident_chip(mtd, 0); /* Find how many chips in each floor. */ for (i = 1; i < 4; i++) { if (doc200x_ident_chip(mtd, i) != mfrid) break; } doc->chips_per_floor = i; printk(KERN_DEBUG "Detected %d chips per floor.\n", i); } static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this) { struct doc_priv *doc = this->priv; int status; DoC_WaitReady(doc); this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); DoC_WaitReady(doc); status = (int)this->read_byte(mtd); return status; } static void doc2001_write_byte(struct mtd_info *mtd, u_char datum) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; WriteDOC(datum, docptr, CDSNSlowIO); WriteDOC(datum, docptr, Mil_CDSN_IO); WriteDOC(datum, docptr, WritePipeTerm); } static u_char doc2001_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; //ReadDOC(docptr, CDSNSlowIO); /* 11.4.5 -- delay twice to allow extended length cycle */ DoC_Delay(doc, 2); ReadDOC(docptr, ReadPipeInit); //return ReadDOC(docptr, Mil_CDSN_IO); return ReadDOC(docptr, LastDataRead); } static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; for (i = 0; i < len; i++) WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); /* Terminate write pipeline */ WriteDOC(0x00, docptr, WritePipeTerm); } static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; /* Start read pipeline */ ReadDOC(docptr, ReadPipeInit); for (i = 0; i < len - 1; i++) buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff)); /* Terminate read pipeline */ buf[i] = ReadDOC(docptr, LastDataRead); } static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; /* Start read pipeline */ ReadDOC(docptr, ReadPipeInit); for (i = 0; i < len - 1; i++) if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { ReadDOC(docptr, LastDataRead); return i; } if (buf[i] != ReadDOC(docptr, LastDataRead)) return i; return 0; } static u_char doc2001plus_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; u_char ret; ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); ret = ReadDOC(docptr, Mplus_LastDataRead); if (debug) printk("read_byte returns %02x\n", ret); return ret; } static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("writebuf of %d bytes: ", len); for (i = 0; i < len; i++) { WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); if (debug && i < 16) printk("%02x ", buf[i]); } if (debug) printk("\n"); } static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("readbuf of %d bytes: ", len); /* Start read pipeline */ ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); for (i = 0; i < len - 2; i++) { buf[i] = ReadDOC(docptr, Mil_CDSN_IO); if (debug && i < 16) printk("%02x ", buf[i]); } /* Terminate read pipeline */ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 2]); buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 1]); if (debug) printk("\n"); } static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; if (debug) printk("verifybuf of %d bytes: ", len); /* Start read pipeline */ ReadDOC(docptr, Mplus_ReadPipeInit); ReadDOC(docptr, Mplus_ReadPipeInit); for (i = 0; i < len - 2; i++) if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { ReadDOC(docptr, Mplus_LastDataRead); ReadDOC(docptr, Mplus_LastDataRead); return i; } if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead)) return len - 2; if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead)) return len - 1; return 0; } static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int floor = 0; if (debug) printk("select chip (%d)\n", chip); if (chip == -1) { /* Disable flash internally */ WriteDOC(0, docptr, Mplus_FlashSelect); return; } floor = chip / doc->chips_per_floor; chip -= (floor * doc->chips_per_floor); /* Assert ChipEnable and deassert WriteProtect */ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); doc->curchip = chip; doc->curfloor = floor; } static void doc200x_select_chip(struct mtd_info *mtd, int chip) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int floor = 0; if (debug) printk("select chip (%d)\n", chip); if (chip == -1) return; floor = chip / doc->chips_per_floor; chip -= (floor * doc->chips_per_floor); /* 11.4.4 -- deassert CE before changing chip */ doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); WriteDOC(floor, docptr, FloorSelect); WriteDOC(chip, docptr, CDSNDeviceSelect); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); doc->curchip = chip; doc->curfloor = floor; } #define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE) static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (ctrl & NAND_CTRL_CHANGE) { doc->CDSNControl &= ~CDSN_CTRL_MSK; doc->CDSNControl |= ctrl & CDSN_CTRL_MSK; if (debug) printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); WriteDOC(doc->CDSNControl, docptr, CDSNControl); /* 11.4.3 -- 4 NOPs after CSDNControl write */ DoC_Delay(doc, 4); } if (cmd != NAND_CMD_NONE) { if (DoC_is_2000(doc)) doc2000_write_byte(mtd, cmd); else doc2001_write_byte(mtd, cmd); } } static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* * Must terminate write pipeline before sending any commands * to the device. */ if (command == NAND_CMD_PAGEPROG) { WriteDOC(0x00, docptr, Mplus_WritePipeTerm); WriteDOC(0x00, docptr, Mplus_WritePipeTerm); } /* * Write out the command to the device. */ if (command == NAND_CMD_SEQIN) { int readcmd; if (column >= mtd->writesize) { /* OOB area */ column -= mtd->writesize; readcmd = NAND_CMD_READOOB; } else if (column < 256) { /* First 256 bytes --> READ0 */ readcmd = NAND_CMD_READ0; } else { column -= 256; readcmd = NAND_CMD_READ1; } WriteDOC(readcmd, docptr, Mplus_FlashCmd); } WriteDOC(command, docptr, Mplus_FlashCmd); WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); if (column != -1 || page_addr != -1) { /* Serially input address */ if (column != -1) { /* Adjust columns for 16 bit buswidth */ if (this->options & NAND_BUSWIDTH_16) column >>= 1; WriteDOC(column, docptr, Mplus_FlashAddress); } if (page_addr != -1) { WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); /* One more address cycle for higher density devices */ if (this->chipsize & 0x0c000000) { WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); printk("high density\n"); } } WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); /* deassert ALE */ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID) WriteDOC(0, docptr, Mplus_FlashControl); } /* * program and erase have their own busy handlers * status and sequential in needs no delay */ switch (command) { case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: case NAND_CMD_SEQIN: case NAND_CMD_STATUS: return; case NAND_CMD_RESET: if (this->dev_ready) break; udelay(this->chip_delay); WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd); WriteDOC(0, docptr, Mplus_WritePipeTerm); WriteDOC(0, docptr, Mplus_WritePipeTerm); while (!(this->read_byte(mtd) & 0x40)) ; return; /* This applies to read commands */ default: /* * If we don't have access to the busy pin, we apply the given * command delay */ if (!this->dev_ready) { udelay(this->chip_delay); return; } } /* Apply this short delay always to ensure that we do wait tWB in * any case on any machine. */ ndelay(100); /* wait until command is processed */ while (!this->dev_ready(mtd)) ; } static int doc200x_dev_ready(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; if (DoC_is_MillenniumPlus(doc)) { /* 11.4.2 -- must NOP four times before checking FR/B# */ DoC_Delay(doc, 4); if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { if (debug) printk("not ready\n"); return 0; } if (debug) printk("was ready\n"); return 1; } else { /* 11.4.2 -- must NOP four times before checking FR/B# */ DoC_Delay(doc, 4); if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { if (debug) printk("not ready\n"); return 0; } /* 11.4.2 -- Must NOP twice if it's ready */ DoC_Delay(doc, 2); if (debug) printk("was ready\n"); return 1; } } static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) { /* This is our last resort if we couldn't find or create a BBT. Just pretend all blocks are good. */ return 0; } static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* Prime the ECC engine */ switch (mode) { case NAND_ECC_READ: WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN, docptr, ECCConf); break; case NAND_ECC_WRITE: WriteDOC(DOC_ECC_RESET, docptr, ECCConf); WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); break; } } static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; /* Prime the ECC engine */ switch (mode) { case NAND_ECC_READ: WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf); break; case NAND_ECC_WRITE: WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf); break; } } /* This code is only called on write */ static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; int i; int emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(0, docptr, 2k_CDSN_IO); WriteDOC(doc->CDSNControl, docptr, CDSNControl); } else if (DoC_is_MillenniumPlus(doc)) { WriteDOC(0, docptr, Mplus_NOP); WriteDOC(0, docptr, Mplus_NOP); WriteDOC(0, docptr, Mplus_NOP); } else { WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); WriteDOC(0, docptr, NOP); } for (i = 0; i < 6; i++) { if (DoC_is_MillenniumPlus(doc)) ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i); else ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); if (ecc_code[i] != empty_write_ecc[i]) emptymatch = 0; } if (DoC_is_MillenniumPlus(doc)) WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); else WriteDOC(DOC_ECC_DIS, docptr, ECCConf); #if 0 /* If emptymatch=1, we might have an all-0xff data buffer. Check. */ if (emptymatch) { /* Note: this somewhat expensive test should not be triggered often. It could be optimized away by examining the data in the writebuf routine, and remembering the result. */ for (i = 0; i < 512; i++) { if (dat[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, we do have an all-0xff data buffer. Return all-0xff ecc value instead of the computed one, so it'll look just like a freshly-erased page. */ if (emptymatch) memset(ecc_code, 0xff, 6); #endif return 0; } static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *isnull) { int i, ret = 0; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; void __iomem *docptr = doc->virtadr; uint8_t calc_ecc[6]; volatile u_char dummy; int emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { dummy = ReadDOC(docptr, 2k_ECCStatus); dummy = ReadDOC(docptr, 2k_ECCStatus); dummy = ReadDOC(docptr, 2k_ECCStatus); } else if (DoC_is_MillenniumPlus(doc)) { dummy = ReadDOC(docptr, Mplus_ECCConf); dummy = ReadDOC(docptr, Mplus_ECCConf); dummy = ReadDOC(docptr, Mplus_ECCConf); } else { dummy = ReadDOC(docptr, ECCConf); dummy = ReadDOC(docptr, ECCConf); dummy = ReadDOC(docptr, ECCConf); } /* Error occurred ? */ if (dummy & 0x80) { for (i = 0; i < 6; i++) { if (DoC_is_MillenniumPlus(doc)) calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i); else calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); if (calc_ecc[i] != empty_read_syndrome[i]) emptymatch = 0; } /* If emptymatch=1, the read syndrome is consistent with an all-0xff data and stored ecc block. Check the stored ecc. */ if (emptymatch) { for (i = 0; i < 6; i++) { if (read_ecc[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, check the data block. */ if (emptymatch) { /* Note: this somewhat expensive test should not be triggered often. It could be optimized away by examining the data in the readbuf routine, and remembering the result. */ for (i = 0; i < 512; i++) { if (dat[i] == 0xff) continue; emptymatch = 0; break; } } /* If emptymatch still =1, this is almost certainly a freshly- erased block, in which case the ECC will not come out right. We'll suppress the error and tell the caller everything's OK. Because it is. */ if (!emptymatch) ret = doc_ecc_decode(rs_decoder, dat, calc_ecc); if (ret > 0) printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret); } if (DoC_is_MillenniumPlus(doc)) WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); else WriteDOC(DOC_ECC_DIS, docptr, ECCConf); if (no_ecc_failures && mtd_is_eccerr(ret)) { printk(KERN_ERR "suppressing ECC failure\n"); ret = 0; } return ret; } //u_char mydatabuf[528]; /* The strange out-of-order .oobfree list below is a (possibly unneeded) * attempt to retain compatibility. It used to read: * .oobfree = { {8, 8} } * Since that leaves two bytes unusable, it was changed. But the following * scheme might affect existing jffs2 installs by moving the cleanmarker: * .oobfree = { {6, 10} } * jffs2 seems to handle the above gracefully, but the current scheme seems * safer. The only problem with it is that any code that parses oobfree must * be able to handle out-of-order segments. */ static struct nand_ecclayout doc200x_oobinfo = { .eccbytes = 6, .eccpos = {0, 1, 2, 3, 4, 5}, .oobfree = {{8, 8}, {6, 2}} }; /* Find the (I)NFTL Media Header, and optionally also the mirror media header. On successful return, buf will contain a copy of the media header for further processing. id is the string to scan for, and will presumably be either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media header. The page #s of the found media headers are placed in mh0_page and mh1_page in the DOC private structure. */ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; unsigned offs; int ret; size_t retlen; for (offs = 0; offs < mtd->size; offs += mtd->erasesize) { ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); if (retlen != mtd->writesize) continue; if (ret) { printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs); } if (memcmp(buf, id, 6)) continue; printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs); if (doc->mh0_page == -1) { doc->mh0_page = offs >> this->page_shift; if (!findmirror) return 1; continue; } doc->mh1_page = offs >> this->page_shift; return 2; } if (doc->mh0_page == -1) { printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id); return 0; } /* Only one mediaheader was found. We want buf to contain a mediaheader on return, so we'll have to re-read the one we found. */ offs = doc->mh0_page << this->page_shift; ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); if (retlen != mtd->writesize) { /* Insanity. Give up. */ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); return 0; } return 1; } static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; int ret = 0; u_char *buf; struct NFTLMediaHeader *mh; const unsigned psize = 1 << this->page_shift; int numparts = 0; unsigned blocks, maxblocks; int offs, numheaders; buf = kmalloc(mtd->writesize, GFP_KERNEL); if (!buf) { printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); return 0; } if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1))) goto out; mh = (struct NFTLMediaHeader *)buf; le16_to_cpus(&mh->NumEraseUnits); le16_to_cpus(&mh->FirstPhysicalEUN); le32_to_cpus(&mh->FormattedSize); printk(KERN_INFO " DataOrgID = %s\n" " NumEraseUnits = %d\n" " FirstPhysicalEUN = %d\n" " FormattedSize = %d\n" " UnitSizeFactor = %d\n", mh->DataOrgID, mh->NumEraseUnits, mh->FirstPhysicalEUN, mh->FormattedSize, mh->UnitSizeFactor); blocks = mtd->size >> this->phys_erase_shift; maxblocks = min(32768U, mtd->erasesize - psize); if (mh->UnitSizeFactor == 0x00) { /* Auto-determine UnitSizeFactor. The constraints are: - There can be at most 32768 virtual blocks. - There can be at most (virtual block size - page size) virtual blocks (because MediaHeader+BBT must fit in 1). */ mh->UnitSizeFactor = 0xff; while (blocks > maxblocks) { blocks >>= 1; maxblocks = min(32768U, (maxblocks << 1) + psize); mh->UnitSizeFactor--; } printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor); } /* NOTE: The lines below modify internal variables of the NAND and MTD layers; variables with have already been configured by nand_scan. Unfortunately, we didn't know before this point what these values should be. Thus, this code is somewhat dependent on the exact implementation of the NAND layer. */ if (mh->UnitSizeFactor != 0xff) { this->bbt_erase_shift += (0xff - mh->UnitSizeFactor); mtd->erasesize <<= (0xff - mh->UnitSizeFactor); printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize); blocks = mtd->size >> this->bbt_erase_shift; maxblocks = min(32768U, mtd->erasesize - psize); } if (blocks > maxblocks) { printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor); goto out; } /* Skip past the media headers. */ offs = max(doc->mh0_page, doc->mh1_page); offs <<= this->page_shift; offs += mtd->erasesize; if (show_firmware_partition == 1) { parts[0].name = " DiskOnChip Firmware / Media Header partition"; parts[0].offset = 0; parts[0].size = offs; numparts = 1; } parts[numparts].name = " DiskOnChip BDTL partition"; parts[numparts].offset = offs; parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift; offs += parts[numparts].size; numparts++; if (offs < mtd->size) { parts[numparts].name = " DiskOnChip Remainder partition"; parts[numparts].offset = offs; parts[numparts].size = mtd->size - offs; numparts++; } ret = numparts; out: kfree(buf); return ret; } /* This is a stripped-down copy of the code in inftlmount.c */ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; int ret = 0; u_char *buf; struct INFTLMediaHeader *mh; struct INFTLPartition *ip; int numparts = 0; int blocks; int vshift, lastvunit = 0; int i; int end = mtd->size; if (inftl_bbt_write) end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift); buf = kmalloc(mtd->writesize, GFP_KERNEL); if (!buf) { printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); return 0; } if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out; doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); mh = (struct INFTLMediaHeader *)buf; le32_to_cpus(&mh->NoOfBootImageBlocks); le32_to_cpus(&mh->NoOfBinaryPartitions); le32_to_cpus(&mh->NoOfBDTLPartitions); le32_to_cpus(&mh->BlockMultiplierBits); le32_to_cpus(&mh->FormatFlags); le32_to_cpus(&mh->PercentUsed); printk(KERN_INFO " bootRecordID = %s\n" " NoOfBootImageBlocks = %d\n" " NoOfBinaryPartitions = %d\n" " NoOfBDTLPartitions = %d\n" " BlockMultiplerBits = %d\n" " FormatFlgs = %d\n" " OsakVersion = %d.%d.%d.%d\n" " PercentUsed = %d\n", mh->bootRecordID, mh->NoOfBootImageBlocks, mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->BlockMultiplierBits, mh->FormatFlags, ((unsigned char *) &mh->OsakVersion)[0] & 0xf, ((unsigned char *) &mh->OsakVersion)[1] & 0xf, ((unsigned char *) &mh->OsakVersion)[2] & 0xf, ((unsigned char *) &mh->OsakVersion)[3] & 0xf, mh->PercentUsed); vshift = this->phys_erase_shift + mh->BlockMultiplierBits; blocks = mtd->size >> vshift; if (blocks > 32768) { printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits); goto out; } blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift); if (inftl_bbt_write && (blocks > mtd->erasesize)) { printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n"); goto out; } /* Scan the partitions */ for (i = 0; (i < 4); i++) { ip = &(mh->Partitions[i]); le32_to_cpus(&ip->virtualUnits); le32_to_cpus(&ip->firstUnit); le32_to_cpus(&ip->lastUnit); le32_to_cpus(&ip->flags); le32_to_cpus(&ip->spareUnits); le32_to_cpus(&ip->Reserved0); printk(KERN_INFO " PARTITION[%d] ->\n" " virtualUnits = %d\n" " firstUnit = %d\n" " lastUnit = %d\n" " flags = 0x%x\n" " spareUnits = %d\n", i, ip->virtualUnits, ip->firstUnit, ip->lastUnit, ip->flags, ip->spareUnits); if ((show_firmware_partition == 1) && (i == 0) && (ip->firstUnit > 0)) { parts[0].name = " DiskOnChip IPL / Media Header partition"; parts[0].offset = 0; parts[0].size = mtd->erasesize * ip->firstUnit; numparts = 1; } if (ip->flags & INFTL_BINARY) parts[numparts].name = " DiskOnChip BDK partition"; else parts[numparts].name = " DiskOnChip BDTL partition"; parts[numparts].offset = ip->firstUnit << vshift; parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift; numparts++; if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit; if (ip->flags & INFTL_LAST) break; } lastvunit++; if ((lastvunit << vshift) < end) { parts[numparts].name = " DiskOnChip Remainder partition"; parts[numparts].offset = lastvunit << vshift; parts[numparts].size = end - parts[numparts].offset; numparts++; } ret = numparts; out: kfree(buf); return ret; } static int __init nftl_scan_bbt(struct mtd_info *mtd) { int ret, numparts; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; struct mtd_partition parts[2]; memset((char *)parts, 0, sizeof(parts)); /* On NFTL, we have to find the media headers before we can read the BBTs, since they're stored in the media header eraseblocks. */ numparts = nftl_partscan(mtd, parts); if (!numparts) return -EIO; this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | NAND_BBT_VERSION; this->bbt_td->veroffs = 7; this->bbt_td->pages[0] = doc->mh0_page + 1; if (doc->mh1_page != -1) { this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | NAND_BBT_VERSION; this->bbt_md->veroffs = 7; this->bbt_md->pages[0] = doc->mh1_page + 1; } else { this->bbt_md = NULL; } /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. At least as nand_bbt.c is currently written. */ if ((ret = nand_scan_bbt(mtd, NULL))) return ret; mtd_device_register(mtd, NULL, 0); if (!no_autopart) mtd_device_register(mtd, parts, numparts); return 0; } static int __init inftl_scan_bbt(struct mtd_info *mtd) { int ret, numparts; struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; struct mtd_partition parts[5]; if (this->numchips > doc->chips_per_floor) { printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n"); return -EIO; } if (DoC_is_MillenniumPlus(doc)) { this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE; if (inftl_bbt_write) this->bbt_td->options |= NAND_BBT_WRITE; this->bbt_td->pages[0] = 2; this->bbt_md = NULL; } else { this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION; if (inftl_bbt_write) this->bbt_td->options |= NAND_BBT_WRITE; this->bbt_td->offs = 8; this->bbt_td->len = 8; this->bbt_td->veroffs = 7; this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS; this->bbt_td->reserved_block_code = 0x01; this->bbt_td->pattern = "MSYS_BBT"; this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION; if (inftl_bbt_write) this->bbt_md->options |= NAND_BBT_WRITE; this->bbt_md->offs = 8; this->bbt_md->len = 8; this->bbt_md->veroffs = 7; this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS; this->bbt_md->reserved_block_code = 0x01; this->bbt_md->pattern = "TBB_SYSM"; } /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. At least as nand_bbt.c is currently written. */ if ((ret = nand_scan_bbt(mtd, NULL))) return ret; memset((char *)parts, 0, sizeof(parts)); numparts = inftl_partscan(mtd, parts); /* At least for now, require the INFTL Media Header. We could probably do without it for non-INFTL use, since all it gives us is autopartitioning, but I want to give it more thought. */ if (!numparts) return -EIO; mtd_device_register(mtd, NULL, 0); if (!no_autopart) mtd_device_register(mtd, parts, numparts); return 0; } static inline int __init doc2000_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2000_read_byte; this->write_buf = doc2000_writebuf; this->read_buf = doc2000_readbuf; this->verify_buf = doc2000_verifybuf; this->scan_bbt = nftl_scan_bbt; doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; doc2000_count_chips(mtd); mtd->name = "DiskOnChip 2000 (NFTL Model)"; return (4 * doc->chips_per_floor); } static inline int __init doc2001_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2001_read_byte; this->write_buf = doc2001_writebuf; this->read_buf = doc2001_readbuf; this->verify_buf = doc2001_verifybuf; ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) { /* It's not a Millennium; it's one of the newer DiskOnChip 2000 units with a similar ASIC. Treat it like a Millennium, except that it can have multiple chips. */ doc2000_count_chips(mtd); mtd->name = "DiskOnChip 2000 (INFTL Model)"; this->scan_bbt = inftl_scan_bbt; return (4 * doc->chips_per_floor); } else { /* Bog-standard Millennium */ doc->chips_per_floor = 1; mtd->name = "DiskOnChip Millennium"; this->scan_bbt = nftl_scan_bbt; return 1; } } static inline int __init doc2001plus_init(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; struct doc_priv *doc = this->priv; this->read_byte = doc2001plus_read_byte; this->write_buf = doc2001plus_writebuf; this->read_buf = doc2001plus_readbuf; this->verify_buf = doc2001plus_verifybuf; this->scan_bbt = inftl_scan_bbt; this->cmd_ctrl = NULL; this->select_chip = doc2001plus_select_chip; this->cmdfunc = doc2001plus_command; this->ecc.hwctl = doc2001plus_enable_hwecc; doc->chips_per_floor = 1; mtd->name = "DiskOnChip Millennium Plus"; return 1; } static int __init doc_probe(unsigned long physadr) { unsigned char ChipID; struct mtd_info *mtd; struct nand_chip *nand; struct doc_priv *doc; void __iomem *virtadr; unsigned char save_control; unsigned char tmp, tmpb, tmpc; int reg, len, numchips; int ret = 0; virtadr = ioremap(physadr, DOC_IOREMAP_LEN); if (!virtadr) { printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr); return -EIO; } /* It's not possible to cleanly detect the DiskOnChip - the * bootup procedure will put the device into reset mode, and * it's not possible to talk to it without actually writing * to the DOCControl register. So we store the current contents * of the DOCControl register's location, in case we later decide * that it's not a DiskOnChip, and want to put it back how we * found it. */ save_control = ReadDOC(virtadr, DOCControl); /* Reset the DiskOnChip ASIC */ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl); WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl); /* Enable the DiskOnChip ASIC */ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl); WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl); ChipID = ReadDOC(virtadr, ChipID); switch (ChipID) { case DOC_ChipID_Doc2k: reg = DoC_2k_ECCStatus; break; case DOC_ChipID_DocMil: reg = DoC_ECCConf; break; case DOC_ChipID_DocMilPlus16: case DOC_ChipID_DocMilPlus32: case 0: /* Possible Millennium Plus, need to do more checks */ /* Possibly release from power down mode */ for (tmp = 0; (tmp < 4); tmp++) ReadDOC(virtadr, Mplus_Power); /* Reset the Millennium Plus ASIC */ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT; WriteDOC(tmp, virtadr, Mplus_DOCControl); WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); mdelay(1); /* Enable the Millennium Plus ASIC */ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT; WriteDOC(tmp, virtadr, Mplus_DOCControl); WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); mdelay(1); ChipID = ReadDOC(virtadr, ChipID); switch (ChipID) { case DOC_ChipID_DocMilPlus16: reg = DoC_Mplus_Toggle; break; case DOC_ChipID_DocMilPlus32: printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n"); default: ret = -ENODEV; goto notfound; } break; default: ret = -ENODEV; goto notfound; } /* Check the TOGGLE bit in the ECC register */ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; if ((tmp == tmpb) || (tmp != tmpc)) { printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr); ret = -ENODEV; goto notfound; } for (mtd = doclist; mtd; mtd = doc->nextdoc) { unsigned char oldval; unsigned char newval; nand = mtd->priv; doc = nand->priv; /* Use the alias resolution register to determine if this is in fact the same DOC aliased to a new address. If writes to one chip's alias resolution register change the value on the other chip, they're the same chip. */ if (ChipID == DOC_ChipID_DocMilPlus16) { oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution); newval = ReadDOC(virtadr, Mplus_AliasResolution); } else { oldval = ReadDOC(doc->virtadr, AliasResolution); newval = ReadDOC(virtadr, AliasResolution); } if (oldval != newval) continue; if (ChipID == DOC_ChipID_DocMilPlus16) { WriteDOC(~newval, virtadr, Mplus_AliasResolution); oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution); WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it } else { WriteDOC(~newval, virtadr, AliasResolution); oldval = ReadDOC(doc->virtadr, AliasResolution); WriteDOC(newval, virtadr, AliasResolution); // restore it } newval = ~newval; if (oldval == newval) { printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr); goto notfound; } } printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr); len = sizeof(struct mtd_info) + sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr)); mtd = kzalloc(len, GFP_KERNEL); if (!mtd) { printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len); ret = -ENOMEM; goto fail; } nand = (struct nand_chip *) (mtd + 1); doc = (struct doc_priv *) (nand + 1); nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); nand->bbt_md = nand->bbt_td + 1; mtd->priv = nand; mtd->owner = THIS_MODULE; nand->priv = doc; nand->select_chip = doc200x_select_chip; nand->cmd_ctrl = doc200x_hwcontrol; nand->dev_ready = doc200x_dev_ready; nand->waitfunc = doc200x_wait; nand->block_bad = doc200x_block_bad; nand->ecc.hwctl = doc200x_enable_hwecc; nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; nand->ecc.layout = &doc200x_oobinfo; nand->ecc.mode = NAND_ECC_HW_SYNDROME; nand->ecc.size = 512; nand->ecc.bytes = 6; nand->ecc.strength = 2; nand->bbt_options = NAND_BBT_USE_FLASH; doc->physadr = physadr; doc->virtadr = virtadr; doc->ChipID = ChipID; doc->curfloor = -1; doc->curchip = -1; doc->mh0_page = -1; doc->mh1_page = -1; doc->nextdoc = doclist; if (ChipID == DOC_ChipID_Doc2k) numchips = doc2000_init(mtd); else if (ChipID == DOC_ChipID_DocMilPlus16) numchips = doc2001plus_init(mtd); else numchips = doc2001_init(mtd); if ((ret = nand_scan(mtd, numchips))) { /* DBB note: i believe nand_release is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ /* nand_release will call mtd_device_unregister, but we haven't yet added it. This is handled without incident by mtd_device_unregister, as far as I can tell. */ nand_release(mtd); kfree(mtd); goto fail; } /* Success! */ doclist = mtd; return 0; notfound: /* Put back the contents of the DOCControl register, in case it's not actually a DiskOnChip. */ WriteDOC(save_control, virtadr, DOCControl); fail: iounmap(virtadr); return ret; } static void release_nanddoc(void) { struct mtd_info *mtd, *nextmtd; struct nand_chip *nand; struct doc_priv *doc; for (mtd = doclist; mtd; mtd = nextmtd) { nand = mtd->priv; doc = nand->priv; nextmtd = doc->nextdoc; nand_release(mtd); iounmap(doc->virtadr); kfree(mtd); } } static int __init init_nanddoc(void) { int i, ret = 0; /* We could create the decoder on demand, if memory is a concern. * This way we have it handy, if an error happens * * Symbolsize is 10 (bits) * Primitve polynomial is x^10+x^3+1 * first consecutive root is 510 * primitve element to generate roots = 1 * generator polinomial degree = 4 */ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS); if (!rs_decoder) { printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n"); return -ENOMEM; } if (doc_config_location) { printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); ret = doc_probe(doc_config_location); if (ret < 0) goto outerr; } else { for (i = 0; (doc_locations[i] != 0xffffffff); i++) { doc_probe(doc_locations[i]); } } /* No banner message any more. Print a message if no DiskOnChip found, so the user knows we at least tried. */ if (!doclist) { printk(KERN_INFO "No valid DiskOnChip devices found\n"); ret = -ENODEV; goto outerr; } return 0; outerr: free_rs(rs_decoder); return ret; } static void __exit cleanup_nanddoc(void) { /* Cleanup the nand/DoC resources */ release_nanddoc(); /* Free the reed solomon resources */ if (rs_decoder) { free_rs(rs_decoder); } } module_init(init_nanddoc); module_exit(cleanup_nanddoc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
gpl-2.0